content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def sizeof_fmt(num, suffix='B'):
"""Return human readable version of in-memory size.
Code from Fred Cirera from Stack Overflow:
https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
|
1aeace0d5ad8ca712704a8ee58e1e206e5e61b56
| 3,643,500
|
def readFPs(filepath):
"""Reads a list of fingerprints from a file"""
try:
myfile = open(filepath, "r")
except:
raise IOError("file does not exist:", filepath)
else:
fps = []
for line in myfile:
if line[0] != "#": # ignore comments
line = line.rstrip().split()
fps.append(line[0])
return fps
|
96d483360c411a27a3b570875f61344ef4dae573
| 3,643,501
|
def validate_take_with_convert(convert, args, kwargs):
"""
If this function is called via the 'numpy' library, the third parameter in
its signature is 'axis', which takes either an ndarray or 'None', so check
if the 'convert' parameter is either an instance of ndarray or is None
"""
if isinstance(convert, ndarray) or convert is None:
args = (convert,) + args
convert = True
validate_take(args, kwargs, max_fname_arg_count=3, method="both")
return convert
|
dacaf4aa6fd5ff9fa577a217c0209d75785abbaf
| 3,643,502
|
from typing import List
def load_operators_expr() -> List[str]:
"""Returns clip loads operators for std.Expr as a list of string."""
abcd = list(ascii_lowercase)
return abcd[-3:] + abcd[:-3]
|
49ba476bebbb6b202b7021458e70e6b1fb927810
| 3,643,503
|
def findScanNumberString(s):
"""If s contains 'NNNN', where N stands for any digit, return the string
beginning with 'NNNN' and extending to the end of s. If 'NNNN' is not
found, return ''."""
n = 0
for i in range(len(s)):
if s[i].isdigit():
n += 1
else:
n = 0
if n == 4:
return s[i-3:]
return ''
|
fd5973383bcf8b74573408d95d4f0065dfbda32f
| 3,643,504
|
import urllib
def parseWsUrl(url):
"""
Parses as WebSocket URL into it's components and returns a tuple (isSecure, host, port, resource, path, params).
isSecure is a flag which is True for wss URLs.
host is the hostname or IP from the URL.
port is the port from the URL or standard port derived from scheme (ws = 80, wss = 443).
resource is the /resource name/ from the URL, the /path/ together with the (optional) /query/ component.
path is the /path/ component properly unescaped.
params is the /query) component properly unescaped and returned as dictionary.
:param url: A valid WebSocket URL, i.e. ws://localhost:9000/myresource?param1=23¶m2=666
:type url: str
:returns: tuple -- A tuple (isSecure, host, port, resource, path, params)
"""
parsed = urlparse.urlparse(url)
if parsed.scheme not in ["ws", "wss"]:
raise Exception("invalid WebSocket scheme '%s'" % parsed.scheme)
if parsed.port is None or parsed.port == "":
if parsed.scheme == "ws":
port = 80
else:
port = 443
else:
port = int(parsed.port)
if parsed.fragment is not None and parsed.fragment != "":
raise Exception("invalid WebSocket URL: non-empty fragment '%s" % parsed.fragment)
if parsed.path is not None and parsed.path != "":
ppath = parsed.path
path = urllib.unquote(ppath)
else:
ppath = "/"
path = ppath
if parsed.query is not None and parsed.query != "":
resource = ppath + "?" + parsed.query
params = urlparse.parse_qs(parsed.query)
else:
resource = ppath
params = {}
return (parsed.scheme == "wss", parsed.hostname, port, resource, path, params)
|
149db7e862f832baf7591fb173cd53d5259cfbba
| 3,643,505
|
def load_image(filename):
"""Loads an image, reads it and returns image size,
dimension and a numpy array of this image.
filename: the name of the image
"""
try:
img = cv2.imread(filename)
print("(H, W, D) = (height, width, depth)")
print("shape: ",img.shape)
h, w, d = img.shape
print('this is the width', w)
print('this is the height', h)
#size = h * w
except Exception as e:
print(e)
print ("Unable to load image")
return img.shape, img
|
2f27d15cd12fcdf4656291a7349883e8d63ff7cf
| 3,643,506
|
def add_manipulable(key, manipulable):
"""
add a ArchipackActiveManip into the stack
if not already present
setup reference to manipulable
return manipulators stack
"""
global manips
if key not in manips.keys():
# print("add_manipulable() key:%s not found create new" % (key))
manips[key] = ArchipackActiveManip(key)
manips[key].manipulable = manipulable
return manips[key].stack
|
3d3709758a96edec261141291950d28d2079ae19
| 3,643,507
|
def get_wave_data_type(sample_type_id):
"""Creates an SDS type definition for WaveData"""
if sample_type_id is None or not isinstance(sample_type_id, str):
raise TypeError('sample_type_id is not an instantiated string')
int_type = SdsType('intType', SdsTypeCode.Int32)
double_type = SdsType('doubleType', SdsTypeCode.Double)
# WaveData uses Order as the key, or primary index
order_property = SdsTypeProperty('Order', True, int_type)
tau_property = SdsTypeProperty('Tau', False, double_type)
radians_property = SdsTypeProperty('Radians', False, double_type)
sin_property = SdsTypeProperty('Sin', False, double_type)
cos_property = SdsTypeProperty('Cos', False, double_type)
tan_property = SdsTypeProperty('Tan', False, double_type)
sinh_property = SdsTypeProperty('Sinh', False, double_type)
cosh_property = SdsTypeProperty('Cosh', False, double_type)
tanh_property = SdsTypeProperty('Tanh', False, double_type)
# Create an SdsType for WaveData Class
wave = SdsType(sample_type_id, SdsTypeCode.Object,
[order_property, tau_property, radians_property, sin_property, cos_property,
tan_property, sinh_property, cosh_property, tanh_property], 'WaveDataSample',
'This is a sample SDS type for storing WaveData type events')
return wave
|
e86d693ac1405b7f440065cbc5eced33adcc666f
| 3,643,508
|
import random
def _spec_augmentation(x,
warp_for_time=False,
num_t_mask=2,
num_f_mask=2,
max_t=50,
max_f=10,
max_w=80):
""" Deep copy x and do spec augmentation then return it
Args:
x: input feature, T * F 2D
num_t_mask: number of time mask to apply
num_f_mask: number of freq mask to apply
max_t: max width of time mask
max_f: max width of freq mask
max_w: max width of time warp
Returns:
augmented feature
"""
y = np.copy(x)
max_frames = y.shape[0]
max_freq = y.shape[1]
# time warp
if warp_for_time and max_frames > max_w * 2:
center = random.randrange(max_w, max_frames - max_w)
warped = random.randrange(center - max_w, center + max_w) + 1
left = Image.fromarray(x[:center]).resize((max_freq, warped), BICUBIC)
right = Image.fromarray(x[center:]).resize((max_freq,
max_frames - warped),
BICUBIC)
y = np.concatenate((left, right), 0)
# time mask
for i in range(num_t_mask):
start = random.randint(0, max_frames - 1)
length = random.randint(1, max_t)
end = min(max_frames, start + length)
y[start:end, :] = 0
# freq mask
for i in range(num_f_mask):
start = random.randint(0, max_freq - 1)
length = random.randint(1, max_f)
end = min(max_freq, start + length)
y[:, start:end] = 0
return y
|
caa4a9010254e13be36e2359d7437cd9f2ced084
| 3,643,509
|
def deg2rad(x, dtype=None):
"""
Converts angles from degrees to radians.
Args:
x (Tensor): Angles in degrees.
dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
output Tensor.
Returns:
Tensor, the corresponding angle in radians. This is a tensor scalar if `x`
is a tensor scalar.
Raises:
TypeError: if `x` is not a tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> x = np.asarray([1, 2, 3, -4, -5])
>>> output = np.deg2rad(x)
>>> print(output)
[ 0.01745329 0.03490658 0.05235988 -0.06981317 -0.08726647]
"""
_check_input_tensor(x)
def convert(a):
return a * pi / 180.0
return _apply_tensor_op(convert, x, dtype=dtype)
|
9e7ff9f5242e5b2eede27b06eb7eb64ba84bbc69
| 3,643,510
|
import math
def point_in_ellipse(origin, point, a, b, pa_rad, verbose=False):
"""
Identify if the point is inside the ellipse.
:param origin A SkyCoord defining the centre of the ellipse.
:param point A SkyCoord defining the point to be checked.
:param a The semi-major axis in arcsec of the ellipse
:param b The semi-minor axis in arcsec of the ellipse
:param pa_rad The position angle of the ellipse. This is the angle of the major axis measured in radians East of
North (or CCW from the y axis).
"""
# Convert point to be in plane of the ellipse, accounting for distortions at high declinations
p_ra_dist = (point.icrs.ra.degree - origin.icrs.ra.degree)* math.cos(origin.icrs.dec.rad)
p_dec_dist = point.icrs.dec.degree - origin.icrs.dec.degree
# Calculate the angle and radius of the test opoint relative to the centre of the ellipse
# Note that we reverse the ra direction to reflect the CCW direction
radius = math.sqrt(p_ra_dist**2 + p_dec_dist**2)
diff_angle = (math.pi/2 + pa_rad) if p_dec_dist == 0 else math.atan(p_ra_dist / p_dec_dist) - pa_rad
# Obtain the point position in terms of the ellipse major and minor axes
minor = radius * math.sin(diff_angle)
major = radius * math.cos(diff_angle)
if verbose:
print ('point relative to ellipse centre angle:{} deg radius:{:.4f}" maj:{:.2f}" min:{:.2f}"'.format(math.degrees(diff_angle), radius*3600,
major*3600, minor*3600))
a_deg = a / 3600.0
b_deg = b / 3600.0
# Calc distance from origin relative to a and b
dist = math.sqrt((major / a_deg) ** 2 + (minor / b_deg) ** 2)
if verbose:
print("Point %s is %f from ellipse %f, %f, %f at %s." % (point, dist, a, b, math.degrees(pa_rad), origin))
return round(dist,3) <= 1.0
|
9c4b056c205b8d25e80211adb0eeb1cdfaf4c11c
| 3,643,511
|
def isNumberString(value):
"""
Checks if value is a string that has only digits - possibly with leading '+' or '-'
"""
if not value:
return False
sign = value[0]
if (sign == '+') or (sign == '-'):
if len(value) <= 1:
return False
absValue = value[1:]
return absValue.isdigit()
else:
if len(value) <= 0:
return False
else:
return value.isdigit()
|
06feaab112e184e6a01c2b300d0e4f1a88f2250e
| 3,643,512
|
def vaseline(tensor, shape, alpha=1.0, time=0.0, speed=1.0):
"""
"""
return value.blend(tensor, center_mask(tensor, bloom(tensor, shape, 1.0), shape), alpha)
|
75e61b21e9ffc1f13a8958ee92d0940596ae116b
| 3,643,513
|
from typing import Union
from typing import Dict
from typing import Any
from typing import List
def _func_length(target_attr: Union[Dict[str, Any], List[Any]], *_: Any) -> int:
"""Function for returning the length of a dictionary or list."""
return len(target_attr)
|
b66a883c763c93d9a62a7c09324ab8671d325d05
| 3,643,514
|
from typing import Optional
def import_places_from_swissnames3d(
projection: str = "LV95", file: Optional[TextIOWrapper] = None
) -> str:
"""
import places from SwissNAMES3D
:param projection: "LV03" or "LV95"
see http://mapref.org/CoordinateReferenceFrameChangeLV03.LV95.html#Zweig1098
:param file: path to local unzipped file. if provided, the `projection`
parameter will be ignored.
"""
try:
file = file or get_swissnames3d_remote_file(projection=projection)
except HTTPError as error:
return f"Error downloading {PLACE_DATA_URL}: {error}. "
except ConnectionError:
return f"Error connecting to {PLACE_DATA_URL}. "
with file:
count = get_csv_line_count(file, header=True)
data = parse_places_from_csv(file, projection=projection)
source_info = f"SwissNAMES3D {projection}"
return save_places_from_generator(data, count, source_info)
|
cc90f3da95bf84ff3dd854de310a6690a28fd750
| 3,643,515
|
import logging
def create_file_handler(log_file, handler_level, formatter=logging.Formatter(LOG_FORMAT_STRING)):
"""
Creates file handler which logs even debug messages.
"""
if handler_level == 'debug':
level = logging.DEBUG
elif handler_level == 'info':
level = logging.INFO
elif handler_level == 'warning':
level = logging.WARNING
elif handler_level == 'error':
level = logging.ERROR
elif handler_level == 'critical':
level = logging.CRITICAL
else:
raise Exception('logger level has to be defined')
fh = MakeFileHandler(log_file)
fh.setLevel(level)
fh.setFormatter(formatter)
return fh
|
0c294fe8d6c7e831a4a567dc101f8cef4fe980d2
| 3,643,516
|
def _generate_data(size):
""" For testing reasons only """
# return FeatureSpec('dummy', name=None, data='x' * size)
return PlotSpec(data='x' * size, mapping=None, scales=[], layers=[])
|
62cbbe947b4d20726f24503c38b9ba2c5d8bdc82
| 3,643,517
|
def configuration_filename(feature_dir, proposed_splits, split, generalized):
"""Calculates configuration specific filenames.
Args:
feature_dir (`str`): directory of features wrt
to dataset directory.
proposed_splits (`bool`): whether using proposed splits.
split (`str`): train split.
generalized (`bool`): whether GZSL setting.
Returns:
`str` containing arguments in appropriate form.
"""
return '{}{}_{}{}.pt'.format(
feature_dir,
('_proposed_splits' if proposed_splits else ''),
split,
'_generalized' if generalized else '',
)
|
a3fc2c23746be7ed17f91820dd30a8156f91940c
| 3,643,518
|
import array
def gammaBGRbuf(
buf: array,
gamma: float) -> array:
"""Apply a gamma adjustment to a
BGR buffer
Args:
buf: unsigned byte array
holding BGR data
gamma: float gamma adjust
Returns:
unsigned byte array
holding gamma adjusted
BGR data
"""
applygammaBGRbuf(buf, gamma)
return buf
|
2d32f2ae0f1aae12f2ed8597f99b5cd5547ea108
| 3,643,519
|
def sentence_avg_word_length(df, new_col_name, col_with_lyrics):
"""
Count the average word length in a dataframe lyrics column, given a column name, process it, and save as new_col_name
Parameters
----------
df : dataframe
new_col_name : name of new column
col_with_lyric: column with lyrics
Returns
return dataframe with new column
"""
df[new_col_name] = df[col_with_lyrics].apply(_sentence_avg_word_length)
return df
|
50dd7cb7145f5c6b39d3e8199f294b788ca361c0
| 3,643,520
|
def to_sigmas(t,p,w_1,w_2,w_3):
"""Given t = sin(theta), p = sin(phi), and the stds this computes the covariance matrix and its inverse"""
p2 = p*p
t2 = t*t
tc2 = 1-t2
pc2 = 1-p2
tc= np.sqrt(tc2)
pc= np.sqrt(pc2)
s1,s2,s3 = 1./(w_1*w_1),1./(w_2*w_2),1./(w_3*w_3)
a = pc2*tc2*s1 + t2*s2 + p2*tc2*s3
b = pc2*t2*s1 + tc2*s2 + p2*t2*s3
c = p2*s1 + pc2*s3
d = tc*t*(pc2*s1 - s2 + p2*s3)
e = p*pc*tc*(s3 - s1)
f = p*pc*t*(s3 - s1)
sigma_inv = np.array([[a, d, e], [d, b, f], [e, f, c]])
sigma = np.array([[(b*c - f ** 2)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (-(c*d) + e*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (-(b*e) + d*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2)],
[(-(c*d) + e*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (a*c - e ** 2)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (d*e - a*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2)],
[(-(b*e) + d*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (d*e - a*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (a*b - d ** 2)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2)]])
return sigma,sigma_inv
|
e6144c8d3313e25cd701f703703309820c60032e
| 3,643,521
|
def fetch_atlas_pauli_2017(version='prob', data_dir=None, verbose=1):
"""Download the Pauli et al. (2017) atlas with in total
12 subcortical nodes.
Parameters
----------
version: str, optional (default='prob')
Which version of the atlas should be download. This can be 'prob'
for the probabilistic atlas or 'det' for the deterministic atlas.
data_dir : str, optional (default=None)
Path of the data directory. Used to force data storage in a specified
location.
verbose : int
verbosity level (0 means no message).
Returns
-------
sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- maps: 3D Nifti image, values are indices in the list of labels.
- labels: list of strings. Starts with 'Background'.
- description: a short description of the atlas and some references.
References
----------
https://osf.io/r2hvk/
`Pauli, W. M., Nili, A. N., & Tyszka, J. M. (2018). A high-resolution
probabilistic in vivo atlas of human subcortical brain nuclei.
Scientific Data, 5, 180063-13. http://doi.org/10.1038/sdata.2018.63``
"""
if version == 'prob':
url_maps = 'https://osf.io/w8zq2/download'
filename = 'pauli_2017_labels.nii.gz'
elif version == 'labels':
url_maps = 'https://osf.io/5mqfx/download'
filename = 'pauli_2017_prob.nii.gz'
else:
raise NotImplementedError('{} is no valid version for '.format(version) + \
'the Pauli atlas')
url_labels = 'https://osf.io/6qrcb/download'
dataset_name = 'pauli_2017'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
files = [(filename,
url_maps,
{'move':filename}),
('labels.txt',
url_labels,
{'move':'labels.txt'})]
atlas_file, labels = _fetch_files(data_dir, files)
labels = np.loadtxt(labels, dtype=str)[:, 1].tolist()
fdescr = _get_dataset_descr(dataset_name)
return Bunch(maps=atlas_file,
labels=labels,
description=fdescr)
|
c7dbf85de92c143a221d91c3dd6f452a4d79ee2f
| 3,643,522
|
import traceback
def GeometricError(ref_point_1, ref_point_2):
"""Deprecation notice function. Please use indicated correct function"""
print(GeometricError.__name__ + ' is deprecated, use ' + geometricError.__name__ + ' instead')
traceback.print_stack(limit=2)
return geometricError(ref_point_1, ref_point_2)
|
aefd3a21ffa7123401af7ac2b106bc4efde624b5
| 3,643,523
|
def svn_fs_open2(*args):
"""svn_fs_open2(char const * path, apr_hash_t fs_config, apr_pool_t result_pool, apr_pool_t scratch_pool) -> svn_error_t"""
return _fs.svn_fs_open2(*args)
|
433e8fe01d5b6c3c7b66f8caa3c50e8386e99e92
| 3,643,524
|
def config(workspace):
"""Return a config object."""
return Config(workspace.root_uri, {})
|
6d02a61f4653742b90838a773458944a581f8ed4
| 3,643,525
|
from typing import List
from typing import Optional
def longest_sequence_index(sequences: List[List[XmonQubit]]) -> Optional[int]:
"""Gives the position of a longest sequence.
Args:
sequences: List of node sequences.
Returns:
Index of the longest sequence from the sequences list. If more than one
longest sequence exist, the first one is returned. None is returned for
empty list.
"""
if sequences:
return max(range(len(sequences)), key=lambda i: len(sequences[i]))
return None
|
32aafa324daea819e48bc14516a8532c110c0362
| 3,643,526
|
def subset_raster(rast, band=1, bbox=None, logger=None):
"""
:param rast: The rasterio raster object
:param band: The band number you want to contour. Default: 1
:param bbox: The bounding box in which to generate contours.
:param logger: The logger object to use for this tool
:return: A dict with the keys 'raster', 'array', 'affine', 'min', and 'max'. Raster is the original rasterio object,
array is the numpy array, affine is the transformation for the bbox, min/max are the min/max values within the bbox.
"""
# Affine transformations between raster and world coordinates.
# See https://github.com/sgillies/affine
# See https://github.com/mapbox/rasterio/blob/master/docs/windowed-rw.rst
a = rast.affine # Convert from pixel coordinates to world coordinates
reverse_affine = ~a # Convert from world coordinates to pixel coordinates
# Copy the metadata
kwargs = rast.meta.copy()
# Read the band
if bbox is not None:
bbox = list(bbox)
if len(bbox) != 4:
logger.error('BBOX is not of length 4. Should be (xmin, ymin, xmax, ymax)')
raise ValueError('BBOX is not of length 4. Should be (xmin, ymin, xmax, ymax)')
# Restrict to the extent of the original raster if our requested
# bbox is larger than the raster extent
min_x = bbox[0]
min_y = bbox[1]
max_x = bbox[2]
max_y = bbox[3]
if min_x < rast.bounds[0]:
min_x = rast.bounds[0]
if min_y < rast.bounds[1]:
min_y = rast.bounds[1]
if max_x > rast.bounds[2]:
max_x = rast.bounds[2]
if max_y > rast.bounds[3]:
max_y = rast.bounds[3]
bbox = (min_x, min_y, max_x, max_y)
# Convert the bounding box (world coordinates) to pixel coordinates
# window = ((row_start, row_stop), (col_start, col_stop))
window_bl = world_to_pixel_coords(rast.affine, [(bbox[0], bbox[1]),])
window_tr = world_to_pixel_coords(rast.affine, [(bbox[2], bbox[3]),])
window_rows = [int(window_bl[0, 1]), int(window_tr[0, 1])]
window_cols = [int(window_bl[0, 0]), int(window_tr[0, 0])]
window = (
(min(window_rows), max(window_rows)),
(min(window_cols), max(window_cols)))
# print('')
# print(window[0])
# print(window[1])
kwargs.update({
'height': abs(window[0][1] - window[0][0]),
'width': abs(window[1][1] - window[1][0]),
'affine': rast.window_transform(window)
})
else:
window = None
# Read the data but only the window we set
rast_band = rast.read(band, window=window, masked=True)
rast_a = kwargs['affine']
return {
'crs': rast.crs,
'array': rast_band,
'affine': rast_a,
'min': rast_band.min(),
'max': rast_band.max()
}
|
62bb0bc292fa2a9d09dc746ce329394cf9dd2fcb
| 3,643,527
|
def extract_date_features(df):
"""Expand datetime values into individual features."""
for col in df.select_dtypes(include=['datetime64[ns]']):
print(f"Now extracting features from column: '{col}'.")
df[col + '_month'] = pd.DatetimeIndex(df[col]).month
df[col + '_day'] = pd.DatetimeIndex(df[col]).day
df[col + '_weekday'] = pd.DatetimeIndex(df[col]).weekday
df.drop(columns=[col], inplace=True)
print("Done!")
return df
|
8726cf0d160de11dfbad701d6a0c7fb3113691f6
| 3,643,528
|
import copy
def record_setitem(data, attr, value):
"""Implement `record_setitem`."""
data2 = copy(data)
py_setattr(data2, attr, value)
return data2
|
52af700d8d282a411e37de83a7ddfab7f3b9de82
| 3,643,529
|
from typing import Optional
def get_git_branch() -> Optional[str]:
"""Get the git branch."""
return _run("git", "branch", "--show-current")
|
dee21ab7e6d9800160e161ae32fad3f9c6c6a8fb
| 3,643,530
|
def open_image(path, verbose=True, squeeze=False):
"""
Open a NIfTI-1 image at the given path. The image might have an arbitrary number of dimensions; however, its first
three axes are assumed to hold its spatial dimensions.
Parameters
----------
path : str
The path of the file to be loaded.
verbose : bool, optional
If `True` (default), print some meta data of the loaded file to standard output.
squeeze : bool, optional
If `True`, remove trailing dimensions of the image volume if they contains a single entry only (default is
`False`). Note that in this case it has not been tested whether the coordinate transformations from the NIfTI-1
header still apply.
Returns
-------
Volume
The resulting 3D image volume, with the ``src_object`` attribute set to the respective
``nibabel.nifti1.Nifti1Image`` instance and the desired anatomical world coordinate system ``system`` set to
"RAS". Relies on the NIfTI header's `get_best_affine()` method to dermine which transformation matrix to use
(qform or sform).
Raises
------
IOError
If something goes wrong.
"""
# According to the NIfTI-1 specification [1]_, the world coordinate system of NIfTI-1 files is always RAS.
src_system = "RAS"
try:
src_object = nibabel.nifti1.load(path)
except Exception as e:
raise IOError(e)
voxel_data = np.asanyarray(src_object.dataobj)
if isinstance(voxel_data, np.memmap):
voxel_data.mode = "c" # Make sure that no changes happen to data on disk: copy on write
hdr = src_object.header
ndim = hdr["dim"][0]
if ndim < 3:
raise IOError("Currently only 3D images can be handled. The given image has {} dimension(s).".format(ndim))
if verbose:
print("Loading image:", path)
print("Meta data:")
print(hdr)
print("Image dimensions:", voxel_data.ndim)
# Squeeze superfluous dimensions (according to the NIfTI-1 specification [1]_, the spatial dimensions are always
# in front)
if squeeze:
voxel_data = __squeeze_dim(voxel_data, verbose)
mat = hdr.get_best_affine()
volume = Volume(src_voxel_data=voxel_data, src_transformation=mat, src_system=src_system,
src_spatial_dimensions=(0, 1, 2), system="RAS", src_object=src_object)
return volume
|
217522c5ea45b9c1cbff8053dc9668cf5473c709
| 3,643,531
|
def add_one_for_ordered_traversal(graph,
node_idx,
current_path=None):
"""
This recursive function returns an ordered traversal of a molecular graph.
This traversal obeys the following rules:
1. Locations may only be visited once
2. All locations must be visted
3. Locations are visited in the order in which the shortest path is
followed
- If potential paths are identical in length, then the one that
provides lightest total weight is followed
- If the total weight of each path is identical (which would be
the case for a molecule that contains any cycle) then the
path the provides the lightest first atom is chosen
- If the lightest first atom is identical, then.............
Recursive algorithm works as follows:
1. Go from node to node until reaching a node that has no neighbors.
2. Once this node is reached, it returns itself back up the stack.
3. If a node only has a single path, this is also immediately returned
up the stack.
4. Once a node is reach that has two possible paths, a choice is made
between the two competing paths. The path that is the shortest is
automatically chosen... But this is actually not what I want.
What I want is that the path leading down is fully traversed and
then the path that provides the lightest direction is gone down first
If both paths are then equal in weight (such as should be the case
for a cycle) then the the path that provides the most direct route
to the heaviest group will be prefered.
If the paths are completely identical, then it should not matter
which one is chosen first from the perspective of a graph.
"""
if current_path == None:
current_path = []
### Make copy of input current_path
current_path = [x for x in current_path]
path = [node_idx]
current_path += [node_idx]
neighbors = graph.adj[node_idx]
### Build entire traversal list
neigh_path_list = []
for entry in neighbors:
# print(node_idx, entry)
if entry in current_path:
continue
neigh_path = add_one_for_ordered_traversal(graph, entry, current_path)
if len(neigh_path) > 0:
neigh_path_list.append(neigh_path)
# print(node_idx, entry, neigh_path)
### Only a single option
if len(neigh_path_list) == 1:
if len(neigh_path_list[0]) == 1:
path += neigh_path_list[0]
return path
elif len(neigh_path_list) == 0:
return [node_idx]
### If there's more than single option, then an algorithm that seeks
### to stich together the neighbor paths in a reasonable and unique way
### should be used
neigh_list_sorted = _sort_neighbor_path_list(graph, neigh_path_list)
# print("SORTED: ", neigh_list_sorted)
path += neigh_list_sorted
return path
|
1c923d07c6ca57d47c900fd2cc05470c4a0eef86
| 3,643,532
|
import subprocess
def get_kubeseal_version() -> str:
"""Retrieve the kubeseal binary version."""
LOGGER.debug("Retrieving kubeseal binary version.")
binary = current_app.config.get("KUBESEAL_BINARY")
kubeseal_subprocess = subprocess.Popen(
[binary, "--version"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output, error = kubeseal_subprocess.communicate()
if error:
error_message = f"Error in run_kubeseal: {error}"
LOGGER.error(error_message)
raise RuntimeError(error_message)
version = "".join(output.decode("utf-8").split("\n"))
return str(version).split(":")[1].replace('"', "").lstrip()
|
3301c18d3bbdf0c0f5cc4a0a98dddf4d7a25ad26
| 3,643,533
|
import json
def get_subject_guide_for_section_params(
year, quarter, curriculum_abbr, course_number, section_id=None):
"""
Returns a SubjectGuide model for the passed section params:
year: year for the section term (4-digits)
quarter: quarter (AUT, WIN, SPR, or SUM)
curriculum_abbr: curriculum abbreviation
course_number: course number
section_id: course section identifier (optional)
"""
quarter = quarter.upper()[:3]
url = "{}/{}/{}/{}/{}/{}/{}".format(
subject_guide_url_prefix, 'course', year, quarter,
quote(curriculum_abbr.upper()), course_number, section_id.upper())
headers = {'Accept': 'application/json'}
response = SubjectGuideDao.getURL(url, headers)
response_data = str(response.data)
if response.status != 200:
raise DataFailureException(url, response.status, response_data)
return _subject_guide_from_json(json.loads(response.data))
|
fe22c43685eb36e3a0849c198e6e5621e763b7a3
| 3,643,534
|
def dense_reach_bonus(task_rew, b_pos, arm_pos, max_reach_bonus=1.5, reach_thresh=.02,
reach_multiplier=all_rew_reach_multiplier):
""" Convenience function for adding a conditional dense reach bonus to an aux task.
If the task_rew is > 1, this indicates that the actual task is complete, and instead of giving a reach
bonus, the max amount of reward given for a reach should be given (regardless of whether reach is satisfied).
If it is < 1, a dense reach reward is given, and the actual task reward is given ONLY if the reach
condition is satisfied. """
if task_rew > 1:
total_rew = task_rew + reach_multiplier * max_reach_bonus
else:
reach_rew = close(reach_thresh, b_pos, arm_pos, close_rew=max_reach_bonus)
new_task_rew = task_rew * int(reach_rew > 1)
total_rew = reach_multiplier * reach_rew + new_task_rew
return total_rew
|
ac1b53836a2a1fd9a4cf7c725222f0e053d65ddb
| 3,643,535
|
import re
def getAllNumbers(text):
"""
This function is a copy of systemtools.basics.getAllNumbers
"""
if text is None:
return None
allNumbers = []
if len(text) > 0:
# Remove space between digits :
spaceNumberExists = True
while spaceNumberExists:
text = re.sub('(([^.,0-9]|^)[0-9]+) ([0-9])', '\\1\\3', text, flags=re.UNICODE)
if re.search('([^.,0-9]|^)[0-9]+ [0-9]', text) is None:
spaceNumberExists = False
numberRegex = '[-+]?[0-9]+[.,][0-9]+|[0-9]+'
allMatchIter = re.finditer(numberRegex, text)
if allMatchIter is not None:
for current in allMatchIter:
currentFloat = current.group()
currentFloat = re.sub("\s", "", currentFloat)
currentFloat = re.sub(",", ".", currentFloat)
currentFloat = float(currentFloat)
if currentFloat.is_integer():
allNumbers.append(int(currentFloat))
else:
allNumbers.append(currentFloat)
return allNumbers
|
42d45d6bb7a5ae1b25d2da6eadb318c3388923d6
| 3,643,536
|
def optimal_string_alignment_distance(s1, s2):
"""
This is a variation of the Damerau-Levenshtein distance that returns the strings' edit distance
taking into account deletion, insertion, substitution, and transposition, under the condition
that no substring is edited more than once.
Args:
s1 (str): Sequence 1.
s2 (str): Sequence 2.
Returns:
float: Optimal String Alignment Distance.
Examples:
>>> rltk.optimal_string_alignment_distance('abcd', 'acbd')
1
>>> rltk.optimal_string_alignment_distance('ca', 'abc')
3
"""
utils.check_for_none(s1, s2)
utils.check_for_type(str, s1, s2)
# s1 = utils.unicode_normalize(s1)
# s2 = utils.unicode_normalize(s2)
n1, n2 = len(s1), len(s2)
dp = [[0] * (n2 + 1) for _ in range(n1 + 1)]
for i in range(0, n1 + 1):
dp[i][0] = i
for j in range(0, n2 + 1):
dp[0][j] = j
for i in range(1, n1 + 1):
for j in range(1, n2 + 1):
cost = 0 if s1[i - 1] == s2[j - 1] else 1
dp[i][j] = min(dp[i][j - 1] + 1,
dp[i - 1][j] + 1,
dp[i - 1][j - 1] + cost)
if (i > 1 and j > 1 and s1[i - 1] == s2[j - 2] and s1[i - 2] == s2[j - 1]):
dp[i][j] = min(dp[i][j], dp[i - 2][j - 2] + cost)
return dp[n1][n2]
|
9c05cfd3217619e76dd1e6063aa1aa689dc1a0ef
| 3,643,537
|
def test_sanitize_callable_params():
"""Callback function are not serializiable.
Therefore, we get them a chance to return something and if the returned type is not accepted, return None.
"""
opt = "--max_epochs 1".split(" ")
parser = ArgumentParser()
parser = Trainer.add_argparse_args(parent_parser=parser)
params = parser.parse_args(opt)
def return_something():
return "something"
params.something = return_something
def wrapper_something():
return return_something
params.wrapper_something_wo_name = lambda: lambda: "1"
params.wrapper_something = wrapper_something
params = _convert_params(params)
params = _flatten_dict(params)
params = _sanitize_callable_params(params)
assert params["gpus"] == "None"
assert params["something"] == "something"
assert params["wrapper_something"] == "wrapper_something"
assert params["wrapper_something_wo_name"] == "<lambda>"
|
d2a553a3c347d5ef0a2be10b21af6920a50697fb
| 3,643,538
|
import six
def get_url(bucket_name, filename):
"""
Gets the uri to the object.
"""
client = storage.Client()
bucket = client.bucket(bucket_name)
blob = bucket.blob(filename)
url = blob.public_url
if isinstance(url, six.binary_type):
url = url.decode('utf-8')
return url
|
15e2d5ae5cfdfeb9794c9cfef1feecbc0f1e4183
| 3,643,539
|
def distance():
"""
Return a random value of FRB distance,
choosen from a range of observed FRB distances.
- Args: None.
- Returns: FRB distance in meters
"""
dist_m = np.random.uniform(6.4332967e24,1.6849561e26)
return dist_m
|
c38cfa7878020bafd9fa1cafef962ed2b91bc804
| 3,643,540
|
def p10k(n, empty="-"):
"""
Write number as parts per ten thousand.
"""
if n is None or np.isnan(n):
return empty
elif n == 0:
return "0.0‱"
elif np.isinf(n):
return _("inf") if n > 0 else _("-inf")
return format_number(10000 * n) + "‱"
|
6d0ff6e5b48c62ad10207c0f8a72595201042ef4
| 3,643,541
|
from typing import Any
def output_file(filename: str, *codecs: Codec, **kwargs: Any) -> Output:
"""
A shortcut to create proper output file.
:param filename: output file name.
:param codecs: codec list for this output.
:param kwargs: output parameters.
:return: configured ffmpeg output.
"""
return Output(output_file=filename, codecs=list(codecs), **kwargs)
|
c467331d5a2773a014f52326872b7999bf17547c
| 3,643,542
|
import pkg_resources
def selftest_validate_resilient_circuits_installed(attr_dict, **_):
"""
selftest.py validation helper method.
Validates that 'resilient-circuits' is installed in the env
and confirms that the version is >= constants.RESILIENT_LIBRARIES_VERSION
:param attr_dict: (required) dictionary of attributes defined in ``selftest_attributes``
:type attr_dict: dict
:param path_selftest_py_file: (optional) path to selftest.py
:type path_selftest_py_file: str
:param package_name: (optional) name of package being validated
:type package_name: str
:param path_package: (optional) path to package
:type path_package: str
:return: returns a tuple with the status of the validation and an associated SDKValidateIssue
:rtype: (bool, SDKValidateIssue)
"""
LOG.debug("validating that 'resilient-circuits' is installed in the env...\n")
res_circuits_version = sdk_helpers.get_package_version(constants.CIRCUITS_PACKAGE_NAME)
if res_circuits_version and res_circuits_version >= pkg_resources.parse_version(constants.RESILIENT_LIBRARIES_VERSION):
# installed and correct version
return True, SDKValidateIssue(
name=attr_dict.get("name"),
description=attr_dict.get("pass_msg"),
severity=SDKValidateIssue.SEVERITY_LEVEL_DEBUG,
solution=""
)
elif res_circuits_version and res_circuits_version < pkg_resources.parse_version(constants.RESILIENT_LIBRARIES_VERSION):
# resilient-circuits installed but version not supported
return False, SDKValidateIssue(
name=attr_dict.get("name"),
description=attr_dict.get("fail_msg").format(res_circuits_version),
severity=attr_dict.get("severity"),
solution=attr_dict.get("fail_solution")
)
elif not res_circuits_version:
# if 'resilient-circuits' not installed
return False, SDKValidateIssue(
name=attr_dict.get("name"),
description=attr_dict.get("missing_msg"),
severity=attr_dict.get("severity"),
solution=attr_dict.get("missing_solution")
)
else:
# unknown other error
raise SDKException("Unknown error while checking for {0}".format(constants.CIRCUITS_PACKAGE_NAME))
|
d500d2bea15e5ff54fe7830cd7bfef75c5041cd8
| 3,643,543
|
import warnings
def convert_topology(topology, model_name, doc_string, target_opset,
channel_first_inputs=None,
options=None, remove_identity=True,
verbose=0):
"""
This function is used to convert our Topology object defined in
_parser.py into a ONNX model (type: ModelProto).
:param topology: The Topology object we are going to convert
:param model_name: GraphProto's name. Let "model" denote the
returned model. The string "model_name" would be
assigned to "model.graph.name."
:param doc_string: A string attached to the produced model
:param target_opset: number or dictionary,
for example, 7 for ONNX 1.2, and 8 for ONNX 1.3,
a dictionary is used to indicate different opset for
different domains
:param options: see :ref:`l-conv-options`
:param remove_identity: removes identity nodes
include '1.1.2', '1.2', and so on.
:param verbose: displays information while converting
:return: a ONNX ModelProto
"""
if target_opset is None:
target_opset = get_latest_tested_opset_version()
if isinstance(target_opset, dict):
onnx_target_opset = target_opset.get(
'', get_latest_tested_opset_version())
else:
onnx_target_opset = target_opset
if onnx_target_opset > get_opset_number_from_onnx():
found = get_opset_number_from_onnx()
raise RuntimeError(
"Parameter target_opset {} > {} is higher than the "
"version of the installed onnx package. See "
"https://github.com/onnx/onnx/blob/master/docs/"
"Versioning.md#released-versions"
".".format(onnx_target_opset, found))
if onnx_target_opset > get_latest_tested_opset_version():
warnings.warn(
"Parameter target_opset {} > {} is higher than the "
"the latest tested version"
".".format(
onnx_target_opset,
get_latest_tested_opset_version()))
container = ModelComponentContainer(
target_opset, options=options,
registered_models=topology.registered_models,
white_op=topology.raw_model._white_op,
black_op=topology.raw_model._black_op,
verbose=verbose)
# Traverse the graph from roots to leaves
# This loop could eventually be parallelized.
topology.convert_operators(container=container, verbose=verbose)
container.ensure_topological_order()
if len(container.inputs) == 0:
raise RuntimeError("No detected inputs after conversion.")
if len(container.outputs) == 0:
raise RuntimeError("No detected outputs after conversion.")
if verbose >= 2:
print("---NODES---")
for node in container.nodes:
print(" %s - %s: %r -> %r" % (
node.op_type, node.name, node.input, node.output))
# Create a graph from its main components
if container.target_opset_onnx < 9:
# When calling ModelComponentContainer's add_initializer(...),
# nothing is added into the input list. However, for ONNX target
# opset < 9, initializers should also be a part of model's
# (GraphProto) inputs. Thus, we create ValueInfoProto objects
# from initializers (type: TensorProto) directly and then add
# them into model's input list.
extra_inputs = [] # ValueInfoProto list of the initializers
for tensor in container.initializers:
# Sometimes (especially when creating optional input values
# such as RNN's initial hidden state), an initializer is also
# one of the original model's input, so it has been added into
# the container's input list. If this is the case, we need to
# skip one iteration to avoid duplicated inputs.
if tensor.name in [value_info.name for value_info in
container.inputs]:
continue
# Initializers are always tensors so we can just call
# make_tensor_value_info(...).
value_info = make_tensor_value_info(
tensor.name, tensor.data_type, tensor.dims)
extra_inputs.append(value_info)
# Before ONNX opset 9, initializers were needed to be passed in
# with inputs.
graph = make_graph(container.nodes, model_name,
container.inputs + extra_inputs,
container.outputs, container.initializers)
else:
# In ONNX opset 9 and above, initializers are included as
# operator inputs and therefore do not need to be passed as
# extra_inputs.
graph = make_graph(
container.nodes, model_name, container.inputs,
container.outputs, container.initializers)
# Add extra information related to the graph
graph.value_info.extend(container.value_info)
# Create model
onnx_model = make_model(graph)
# Update domain version
opv = min(onnx_target_opset,
_get_main_opset_version(onnx_model) or onnx_target_opset)
if not _update_domain_version(container, onnx_model, verbose=verbose):
# Main opset was not added. Doing it here.
op_set = onnx_model.opset_import.add()
op_set.domain = ''
op_set.version = opv
if verbose > 0:
print('[convert_topology] +opset: name=%r, version=%s' % (
'', opv))
# Add extra information
irv = OPSET_TO_IR_VERSION.get(opv, onnx_proto.IR_VERSION)
onnx_model.ir_version = irv
onnx_model.producer_name = utils.get_producer()
onnx_model.producer_version = utils.get_producer_version()
onnx_model.domain = utils.get_domain()
onnx_model.model_version = utils.get_model_version()
onnx_model.doc_string = doc_string
# Removes many identity nodes,
# the converter may introduct identity nodes
# after a zipmap operator and onnx <= 1.7 does not
# support that. It does not use onnxconverter-common
# as the optimizer only support opset >= 9.
if remove_identity:
onnx_model = onnx_remove_node_identity(onnx_model)
return onnx_model
|
139efc34473518b0403cd0bdbfc85b0b2715d576
| 3,643,544
|
def _get_intermediates(func_graph):
"""Returns all tensors in `func_graph` that should be accumulated."""
# We currently accumulate output tensors of most ops in the function and rely
# on the pruning pass to get rid of the unused accumulators at runtime.
# However, this can bloat the GraphDef and make debugging harder so we perform
# some optimizations.
#
# Optimization we currently perform:
# 1. We do not accumulate tensors which already have an accumulator
# in the loop body.
# 2. We do not accumulate outputs of Identity nodes. When building the
# FuncGraph, we add an Identity node for each output (see
# `AutomaticControlDependencies.mark_as_return`). Accumulating outputs
# of all these nodes bloats the GraphDef quite a bit so we remove those.
# Since the gradient of an Identity node does not rely on its forward op's
# input this is safe to do.
#
# Other possible optimizations:
# 1. Only accumulate tensors that will be required by the backward pass.
# This will require running the gradient pass and hence would increase the
# graph building time for the forward pass.
# 2. Do not accumulate Const nodes created inside the loop body.
# 3. Do not accumulate loop vars that are returned as-is just like captured
# tensors.
intermediates = []
reverse_captures = dict((v.ref(), k) for k, v in func_graph.captures)
for op in func_graph.get_operations():
if op.type == "Identity":
continue
# Accumulating mutexes can cause deadlock.
if op.type == "MutexLock":
continue
for o in op.outputs:
if (o is not func_graph.inputs[0] and # Loop counter.
o.dtype != dtypes.resource and # Do not accumulate resource tensors.
_get_accumulator(o) is None and # Has existing accumulator.
o.ref() not in reverse_captures
): # Captured value, hence loop invariant.
intermediates.append(o)
return intermediates
|
c0ee4b51524e1b4bf912d8293cc5490c4ae2c3b9
| 3,643,545
|
def multivariate_logrank_test(event_durations, groups, event_observed=None,
alpha=0.95, t_0=-1, suppress_print=False, **kwargs):
"""
This test is a generalization of the logrank_test: it can deal with n>2 populations (and should
be equal when n=2):
H_0: all event series are from the same generating processes
H_A: there exist atleast one group that differs from the other.
Parameters:
event_durations: a (n,) numpy array the (partial) lifetimes of all individuals
groups: a (n,) numpy array of unique group labels for each individual.
event_observed: a (n,) numpy array of event observations: 1 if observed death, 0 if censored. Defaults
to all observed.
alpha: the level of signifiance desired.
t_0: the final time to compare the series' up to. Defaults to all.
suppress_print: if True, do not print the summary. Default False.
kwargs: add keywords and meta-data to the experiment summary.
Returns:
summary: a print-friendly summary of the statistical test
p_value: the p-value
test_result: True if reject the null, (pendantically) None if we can't reject the null.
"""
if event_observed is None:
event_observed = np.ones((event_durations.shape[0], 1))
n = max(event_durations.shape)
assert n == max(event_durations.shape) == max(event_observed.shape), "inputs must be of the same length."
groups, event_durations, event_observed = map(lambda x: pd.Series(np.reshape(x, (n,))), [groups, event_durations, event_observed])
unique_groups, rm, obs, _ = group_survival_table_from_events(groups, event_durations, event_observed, np.zeros_like(event_durations), t_0)
n_groups = unique_groups.shape[0]
# compute the factors needed
N_j = obs.sum(0).values
n_ij = (rm.sum(0).values - rm.cumsum(0).shift(1).fillna(0))
d_i = obs.sum(1)
n_i = rm.values.sum() - rm.sum(1).cumsum().shift(1).fillna(0)
ev = n_ij.mul(d_i / n_i, axis='index').sum(0)
# vector of observed minus expected
Z_j = N_j - ev
assert abs(Z_j.sum()) < 10e-8, "Sum is not zero." # this should move to a test eventually.
# compute covariance matrix
V_ = n_ij.mul(np.sqrt(d_i) / n_i, axis='index').fillna(1)
V = -np.dot(V_.T, V_)
ix = np.arange(n_groups)
V[ix, ix] = V[ix, ix] + ev
# take the first n-1 groups
U = Z_j.ix[:-1].dot(np.linalg.pinv(V[:-1, :-1]).dot(Z_j.ix[:-1])) # Z.T*inv(V)*Z
# compute the p-values and tests
test_result, p_value = chisq_test(U, n_groups - 1, alpha)
summary = pretty_print_summary(test_result, p_value, U, t_0=t_0, test='logrank',
alpha=alpha, null_distribution='chi squared',
df=n_groups - 1, **kwargs)
if not suppress_print:
print(summary)
return summary, p_value, test_result
|
2d433c4651828cc962a94802eae72e0ab68e7f0b
| 3,643,546
|
def ae(y, p):
"""Absolute error.
Absolute error can be defined as follows:
.. math::
\sum_i^n abs(y_i - p_i)
where :math:`n` is the number of provided records.
Parameters
----------
y : :class:`ndarray`
One dimensional array with ground truth values.
p : :class:`ndarray`
One dimensional array with predicted values.
Returns
-------
float
Absolute error as desribed above.
"""
return np.abs(y-p).sum()
|
6f08799429c561af37a941e0678ba0c147ba3a9c
| 3,643,547
|
def create_masks_from_plane(normal, dist, shape):
"""
Create a binary mask of given size based on a plane defined by its
normal and a point on the plane (in voxel coordinates).
Parameters
----------
dist: Distance of the plane to the origin (in voxel coordinates).
normal: Normal of the plane (in voxel coordinates).
shape: Shape of the mask that will be created.
Returns
-------
Binary mask of specified shape split in two by the given plane.
"""
grid_x, grid_y, grid_z = np.meshgrid(range(shape[0]),
range(shape[1]),
range(shape[2]),
indexing='ij')
position = np.column_stack((grid_x.ravel(order='F'),
grid_y.ravel(order='F'),
grid_z.ravel(order='F')))
# distance_from_plane = np.dot((position - np.transpose(point)), normal)
distance_from_plane = np.dot(position, normal) + dist
distance_vol = np.array(distance_from_plane).reshape((shape[0],
shape[1],
shape[2]),
order='F')
binary_mask = np.empty(distance_vol.shape, dtype=np.uint8)
binary_mask[:, :, :] = distance_vol[:, :, :] >= 0
return binary_mask
|
c6f3995a12aa98f960364332195ac5caeb1d6fe4
| 3,643,548
|
from typing import List
def untokenize(tokens: List[str], lang: str = "fr") -> str:
"""
Inputs a list of tokens output string.
["J'", 'ai'] >>> "J' ai"
Parameters
----------
lang : string
language code
Returns
-------
string
text
"""
d = MosesDetokenizer(lang=lang)
text: str = d.detokenize(tokens, unescape=False)
return text
|
551ecf233b0869c4912b47ff1dee765647b07acc
| 3,643,549
|
import os
def RSS_LABEL_TO_DIR(label_, is_html_):
"""Return the directory path to store URLs and HTML downloaded from RSS
@param label_: the RSS label being crawled
@param is_html_: True to return HTML directory and FALSE to return URLs directory
"""
bottom_dir_ = '/'.join(label_.split('-'))
ret_ = None
if is_html_:
ret_ = os.path.join(CONST.RSS_HTML_DIR, bottom_dir_)
else:
ret_ = os.path.join(CONST.RSS_URLS_DIR, bottom_dir_)
if not os.path.exists(ret_): os.makedirs(ret_)
return ret_
|
099c91177c6cfcca50782009b2c99542410eff06
| 3,643,550
|
def unwrap_cachable(func):
"""
Converts any HashableNodes in the argument list of a function into their standard node
counterparts.
"""
def inner(*args, **kwargs):
args, kwargs = _transform_by_type(lambda hashable: hashable.node, HashableNode,
*args, **kwargs)
return func(*args, **kwargs)
return inner
|
40b8f4b62045808815c67f0a22b4d8b97c9fbb1e
| 3,643,551
|
def tuples_to_full_paths(tuples):
"""
For a set of tuples of possible end-to-end path [format is:
(up_seg, core_seg, down_seg)], return a list of fullpaths.
"""
res = []
for up_segment, core_segment, down_segment in tuples:
if not up_segment and not core_segment and not down_segment:
continue
if not _check_connected(up_segment, core_segment,
down_segment):
continue
up_iof, up_hofs, up_mtu, up_exp = _copy_segment(
up_segment, False, (core_segment or down_segment))
core_iof, core_hofs, core_mtu, core_exp = _copy_segment(
core_segment, up_segment, down_segment)
down_iof, down_hofs, down_mtu, down_exp = _copy_segment(
down_segment, (up_segment or core_segment), False, cons_dir=True)
args = []
for iof, hofs in [(up_iof, up_hofs), (core_iof, core_hofs),
(down_iof, down_hofs)]:
if iof:
args.extend([iof, hofs])
path = SCIONPath.from_values(*args)
if up_segment:
up_core = list(reversed(list(up_segment.iter_asms())))
else:
up_core = []
if core_segment:
up_core += list(reversed(list(core_segment.iter_asms())))
if_list = _build_interface_list(up_core)
if down_segment:
down_core = list(down_segment.iter_asms())
else:
down_core = []
if_list += _build_interface_list(down_core, cons_dir=True)
mtu = _min_mtu(up_mtu, core_mtu, down_mtu)
exp = min(up_exp, core_exp, down_exp)
path_meta = FwdPathMeta.from_values(path, if_list, mtu, exp)
res.append(path_meta)
return res
|
f5b15e0e2483d194f6cf6c3eb8ec318aadd7b960
| 3,643,552
|
def _fileobj_to_fd(fileobj):
"""Return a file descriptor from a file object.
Parameters:
fileobj -- file object or file descriptor
Returns:
corresponding file descriptor
Raises:
ValueError if the object is invalid
"""
if isinstance(fileobj, int):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError('Invalid file object: {!r}'.format(fileobj)
) from None
if fd < 0:
raise ValueError('Invalid file descriptor: {}'.format(fd))
return fd
|
8b1bea4083c0ecf481c712c8b06c76257cea43db
| 3,643,553
|
def request_changes_pull_request(pull_request=None, body_or_reason=None):
"""
:param pull_request:
:param body_or_reason:
:return:
"""
if not pull_request:
raise ValueError("you must provide pull request")
if not body_or_reason:
raise ValueError("you must provide request changes comment(s)")
return pull_request.create_review(event=PULL_REQUEST_EVENT_REQUEST_CHANGES, body=body_or_reason)
|
6487b8b47a8a33882010083e97ebbd57b464311b
| 3,643,554
|
from typing import Callable
from typing import Union
from typing import Type
from typing import Tuple
def handle(
func: Callable,
exception_type: Union[Type[Exception], Tuple[Type[Exception]]],
*args,
**kwargs
):
"""
Call function with errors handled in cfpm's way.
Before using this function, make sure all of func's errors are known and
can exit saftly after an error is raised whithout cleaning up.
Args:
func: The function to be called.
exception_type: The type(s) of the exceptions that can be handled
safely.
"""
try:
return func(*args, **kwargs)
except exception_type as e:
error(e)
|
d290fa4353a6e608b21464c33adc6f72675d9e6c
| 3,643,555
|
def hrnetv2_w32(**kwargs):
"""
HRNetV2-W32 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w32", model_name="hrnetv2_w32", **kwargs)
|
859642b2631457fd3fd8389370d2618666269ebe
| 3,643,556
|
from .utils.globalcache import c
def medicare_program_engagement():
"""
Produces a wide dataset at the NPI level that shows when a provider entered
and exited the three different medicare databases: Part B, Part D, and
Physician Compare
"""
partd = part_d_files(summary=True,
usecols=['npi', 'total_claim_count'])
partd_engage = (partd.assign(PartD_Max_Year=lambda df: df.Year,
PartD_Min_Year=lambda df: df.Year)
.groupby('npi', as_index=False)
.agg({'PartD_Min_Year': min, 'PartD_Max_Year': max})
)
partb = part_b_files(summary=True,
columns=['National Provider Identifier',
'Number of Medicare Beneficiaries'])
partb_engage = (partb.assign(PartB_Max_Year=lambda df: df.Year,
PartB_Min_Year=lambda df: df.Year)
.groupby('National Provider Identifier',
as_index=False)
.agg({'PartB_Min_Year': min, 'PartB_Max_Year': max})
.rename(columns={'National Provider Identifier':
'npi'}))
pc = c.physician_compare_select_vars([],
drop_duplicates=False,
date_var=True)
pc_engage = (pc.assign(Year=pc.date.dt.year)
.drop(columns='date')
.drop_duplicates())
pc_engage = (pc_engage.assign(PC_Max_Year=lambda df: df.Year,
PC_Min_Year=lambda df: df.Year)
.groupby('NPI', as_index=False)
.agg({'PC_Min_Year': min, 'PC_Max_Year': max})
.rename(columns={'NPI': 'npi'}))
df = (pc_engage
.merge(partd_engage, how='outer')
.merge(partb_engage, how='outer')
.convert_dtypes({x: 'Int64' for x in pc_engage.columns}))
df.loc[((df.PC_Max_Year == 2020)
| (df.PartD_Max_Year == 2017)
| (df.PartB_Max_Year == 2017))
& ~((df.PartD_Max_Year.notnull()
& df.PartB_Max_Year.notnull()
& (df.PC_Max_Year < 2020))), 'maybe_active'] = True
df = df.assign(maybe_active=df.maybe_active.fillna(False))
df.loc[df.PC_Max_Year == 2020, 'active_2020'] = True
df = df.assign(active_2020=df.active_2020.fillna(False))
return df
|
3a4bd0545473f229c8452680fc38c6ded2cb14bf
| 3,643,557
|
def _is_bumf(value):
"""
Return true if this value is filler, en route to skipping over empty lines
:param value: value to check
:type value: object
:return: whether the value is filler
:rtype: bool
"""
if type(value) in (unicode, str):
return value.strip() == ''
return value is None
|
1812e82036ed4bdbdee4e2e032886ac2c788a5ff
| 3,643,558
|
from .perceptron import tag as tag_
from artagger import Tagger
from .unigram import tag as tag_
def pos_tag(words, engine="unigram", corpus="orchid"):
"""
Part of Speech tagging function.
:param list words: a list of tokenized words
:param str engine:
* unigram - unigram tagger (default)
* perceptron - perceptron tagger
* artagger - RDR POS tagger
:param str corpus:
* orchid - annotated Thai academic articles
* pud - Parallel Universal Dependencies (PUD) treebanks
:return: returns a list of labels regarding which part of speech it is
"""
if not words:
return []
if engine == "perceptron":
elif engine == "artagger":
def tag_(words, corpus=None):
if not words:
return []
words_ = Tagger().tag(" ".join(words))
return [(word.word, word.tag) for word in words_]
else: # default, use "unigram" ("old") engine
return tag_(words, corpus=corpus)
|
8c8328950fba9082220d9c6be3b9fc8f9e6c3332
| 3,643,559
|
import warnings
def derive(control):
"""
gui.derive will be removed after mGui 2.2; for now it's going to issue a deprecation warning and call `wrap()`
"""
warnings.warn("gui.derive() should be replaced by gui.wrap()", PendingDeprecationWarning)
return wrap(control)
|
a2f463c9a66425e5066c504803b5754c2260cbc9
| 3,643,560
|
import base64
def hex_to_base64(hex_):
""" Converts hex string to base64 """
return base64.b64encode(bytes.fromhex(hex_))
|
26f42b25c9e804bc1b786aadab033db104882f4b
| 3,643,561
|
def dt2iso(orig_dt):
"""datetime to is8601 format."""
return timeutils.isotime(orig_dt)
|
9887db04c4b3703a4f0c43c874c8d907cc744ea5
| 3,643,562
|
def catalog(access_token, user_id, query=None): # noqa: E501
"""Query the list of all the RDF graphs' names (URIs) and the response will be JSON format.
# noqa: E501
:param access_token: Authorization access token string
:type access_token: dict | bytes
:param user_id: the ID of the organization of the client application
:type user_id: str
:param query: Query GraphsSPARQL Query expression (max 1536). Note the common lowest limit for the entrie url is 2048 as the limit. The query SPARQL string must be url-encoded. The example below is not url-encoded to show the un-encoded SPARQL content.
:type query: str
:rtype: GraphListType
"""
if connexion.request.is_json:
access_token = AccessToken.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
|
f3819e76be5a1559f60140542d151de1f1b50b0e
| 3,643,563
|
import json
def _make_chrome_policy_json():
"""Generates the json string of chrome policy based on values in the db.
This policy string has the following form:
{
"validProxyServers": {"Value": map_of_proxy_server_ips_to_public_key},
"enforceProxyServerValidity": {"Value": boolean}
}
Returns:
A json string of current chrome policy.
"""
proxy_servers = models.ProxyServer.query.all()
proxy_server_dict = {}
for server in proxy_servers:
proxy_server_dict[server.ip_address] = (
server.get_public_key_as_authorization_file_string())
proxy_server_value_dict = {"Value" : proxy_server_dict}
config = ufo.get_user_config()
config_value_dict = {"Value" : config.proxy_server_validity}
policy_dictionary = {
"validProxyServers": proxy_server_value_dict,
"enforceProxyServerValidity": config_value_dict,
}
return json.dumps(policy_dictionary)
|
629450bc9bb0c2c0ce61b25568a4689b20c89766
| 3,643,564
|
def get_rgb_color(party_id):
"""Get RGB color of party."""
if party_id not in PARTY_TO_COLOR_OR_PARTY:
return UNKNOWN_PARTY_COLOR
color_or_party = PARTY_TO_COLOR_OR_PARTY[party_id]
if isinstance(color_or_party, tuple):
return color_or_party
return get_rgb_color(color_or_party)
|
18585d46551e1a1646e28d4371d68537e94975ac
| 3,643,565
|
from datetime import datetime
def view_application(application_id):
"""Views an application with ID.
Args:
application_id (int): ID of the application.
Returns:
str: redirect to the appropriate url.
"""
# Get user application.
application = ApplicationModel.query.filter_by(id=application_id).first()
isPersonalApplication = False
# Redirect if application does not exist.
if not application:
flash("Application with ID {} is not present in the database.".format(str(application_id)), 'danger')
current_app.logger.info("{} tried to view application with ID {} which does not exist in the database".format(current_user.name, str(application_id)))
return redirect(url_for('hr.index'))
# check if application is a personal application.
if current_user.application and current_user.application.id == application_id:
isPersonalApplication = True
# Check if application corp is the user's corp.
if not isPersonalApplication and application.corporation.id is not current_user.get_corp().id:
flash('That application is not to your corp.', 'danger')
current_app.logger.info("{} tried to view application which is not to their corporation.".format(current_user.name))
return redirect(url_for('hr.index'))
# Check if user is viewing a personal application or someone else's application.
if not isPersonalApplication and not current_user.has_permission('read_applications'):
flash("You do not have the required permission to view other people's applications.", "danger")
current_app.logger.info("{} tried to illegally access someone else's application but didn't have the required read_applications permission.".format(current_user.name))
return redirect(url_for('hr.index'))
# Make application forms.
removeApplicationForm = RemoveApplicationForm()
editApplicationForm = EditNoteForm(notes=application.character.notes)
# Removal of applications.
if request.method == 'POST':
# Check if notes were updated.
if 'btn' not in request.form:
if 'notes' in request.form and editApplicationForm.validate_on_submit():
oldNote = application.character.notes
application.character.notes = editApplicationForm.notes.data
Database.session.commit()
flash("Successfully updated note.", "success")
current_app.logger.info("{} updated {}'s note from '{}' to '{}'.".format(current_user.name, application.character.name, oldNote, editApplicationForm.notes.data))
return redirect(url_for('hr.view_application', application_id=application.id))
# Check other button presses.
if request.form['btn'] == "RemoveApplication":
# Check if application is valid.
if not removeApplicationForm.validate_on_submit():
flash('Please make sure you provide a reason when removing an application.', 'danger')
return redirect(url_for('hr.view_application', application_id=application.id))
characterName = application.character.name
corpName = application.corporation.name
rejectionReason = removeApplicationForm.rejection_reason.data
# Add note with rejection reason.
# If there are already notes, add an enter.
if application.character.notes:
application.character.notes += "\n"
application.character.notes += "Application removed ({}) by {}: {}".format(datetime.utcnow().strftime('%Y/%m/%d'), current_user.name, rejectionReason)
Database.session.delete(application)
Database.session.commit()
flash("Successfully removed application of {} to {}.".format(characterName, corpName), 'success')
current_app.logger.info("{} removed application of {} to {} with reason '{}'.".format(current_user.name, characterName, corpName, rejectionReason))
elif request.form['btn'] == "RemovePersonalApplication":
characterName = application.character.name
corpName = application.corporation.name
Database.session.delete(application)
Database.session.commit()
flash("Successfully removed application of {} to {}.".format(characterName, corpName), 'success')
current_app.logger.info("{} removed application of {} to {}.".format(current_user.name, characterName, corpName))
elif request.form['btn'] == "UpdateApplication":
application.ready_accepted = not application.ready_accepted
newStatus = "Ready to be accepted" if application.ready_accepted else "Being processed"
Database.session.commit()
flash("Successfully set {} application status to {}.".format(application.character.name, newStatus), 'success')
current_app.logger.info("{} edited status of {} application to {}".format(current_user.name, application.character.name, newStatus))
return redirect(url_for('hr.view_application', application_id=application.id))
return redirect(url_for('hr.index'))
return render_template('hr/view_application.html', application=application, personal_application=isPersonalApplication,
remove_form=removeApplicationForm, edit_form=editApplicationForm, discord_url=current_app.config['DISCORD_RECRUITMENT_INVITE'],
client_id=EveAPI['full_auth_preston'].client_id, client_secret=EveAPI['full_auth_preston'].client_secret, scopes=EveAPI['full_auth_preston'].scope)
|
dda04250b45a1a166c254b48039155e85ca62ea3
| 3,643,566
|
import logging
def build_save_containers(platforms, bucket) -> int:
"""
Entry point to build and upload all built dockerimages in parallel
:param platforms: List of platforms
:param bucket: S3 bucket name
:return: 1 if error occurred, 0 otherwise
"""
if len(platforms) == 0:
return 0
platform_results = Parallel(n_jobs=len(platforms), backend="multiprocessing")(
delayed(_build_save_container)(platform, bucket)
for platform in platforms)
is_error = False
for platform_result in platform_results:
if platform_result is not None:
logging.error('Failed to generate {}'.format(platform_result))
is_error = True
return 1 if is_error else 0
|
9744577efabbd800c16e9c7f57c9c7b31654cec1
| 3,643,567
|
def get_object_record(obj_key):
"""
Query the object's record.
Args:
obj_key: (string) The key of the object.
Returns:
The object's data record.
"""
record = None
model_names = OBJECT_KEY_HANDLER.get_models(obj_key)
for model_name in model_names:
try:
# Get record.
model_obj = apps.get_model(settings.WORLD_DATA_APP, model_name)
record = model_obj.objects.get(key=obj_key)
break
except Exception, e:
ostring = "Can not get record %s: %s." % (obj_key, e)
print(ostring)
print(traceback.print_exc())
continue
return record
|
c32bd3f12babc4f7c30567d6f2529dd037e3e563
| 3,643,568
|
def diff_cars(c1, c2):
"""
diffs two cars
returns a DiffSet containing DiffItems that tell what's missing in c1
as compared to c2
:param c1: old Booking object
:param c2: new Booking object
:return: DiffSet (c1-c2)
"""
strategy = Differ.get_strategy(CAR_DIFF_STRATEGY)
return strategy.diff(c1, c2)
|
fda0e12bea0fd70fbed1a0e2c445941dc44f8cb7
| 3,643,569
|
import os
def dist_env():
"""
Return a dict of all variable that distributed training may use.
NOTE: you may rewrite this function to suit your cluster environments.
"""
trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
num_trainers = 1
training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER")
assert(training_role == "PSERVER" or training_role == "TRAINER")
# - PADDLE_TRAINER_ENDPOINTS means nccl2 mode.
# - PADDLE_PSERVER_ENDPOINTS means pserver mode.
# - PADDLE_CURRENT_ENDPOINT means current process endpoint.
trainer_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS")
pserver_endpoints = os.getenv("PADDLE_PSERVER_ENDPOINTS")
current_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT")
if trainer_endpoints:
trainer_endpoints = trainer_endpoints.split(",")
num_trainers = len(trainer_endpoints)
elif pserver_endpoints:
num_trainers = int(os.getenv("PADDLE_TRAINERS_NUM"))
return {
"trainer_id": trainer_id,
"num_trainers": num_trainers,
"current_endpoint": current_endpoint,
"training_role": training_role,
"pserver_endpoints": pserver_endpoints,
"trainer_endpoints": trainer_endpoints
}
|
1a7af4fbbea2e0d7e4e90bad770a5c68426689e9
| 3,643,570
|
import json
def main(request, response):
"""Helper handler for Beacon tests.
It handles two forms of requests:
STORE:
A URL with a query string of the form 'cmd=store&sid=<token>&tidx=<test_index>&tid=<test_name>'.
Stores the receipt of a sendBeacon() request along with its validation result, returning HTTP 200 OK.
Parameters:
tidx - the integer index of the test.
tid - a friendly identifier or name for the test, used when returning results.
STAT:
A URL with a query string of the form 'cmd=stat&sid=<token>&tidx_min=<min_test_index>&tidx_max=<max_test_index>'.
Retrieves the results of test with indices [min_test_index, max_test_index] and returns them as
a JSON array and HTTP 200 OK status code. Due to the eventual read-once nature of the stash, results for a given test
are only guaranteed to be returned once, though they may be returned multiple times.
Parameters:
tidx_min - the lower-bounding integer test index.
tidx_max - the upper-bounding integer test index.
Example response body:
[{"id": "Test1", error: null}, {"id": "Test2", error: "some validation details"}]
Common parameters:
cmd - the command, 'store' or 'stat'.
sid - session id used to provide isolation to a test run comprising multiple sendBeacon()
tests.
"""
session_id = request.GET.first("sid");
command = request.GET.first("cmd").lower();
# Workaround to circumvent the limitation that cache keys
# can only be UUID's.
def wrap_key(key, path):
return (str(path), str(key))
request.server.stash._wrap_key = wrap_key
# Append CORS headers if needed.
if "origin" in request.GET:
response.headers.set("Access-Control-Allow-Origin", request.GET.first("origin"))
if "credentials" in request.GET:
response.headers.set("Access-Control-Allow-Credentials", request.GET.first("credentials"))
# Handle the 'store' and 'stat' commands.
if command == "store":
# The test id is just used to make the results more human-readable.
test_id = request.GET.first("tid")
# The test index is used to build a predictable stash key, together
# with the unique session id, in order to retrieve a range of results
# later knowing the index range.
test_idx = request.GET.first("tidx")
test_data_key = build_stash_key(session_id, test_idx)
test_data = { "id": test_id, "error": None }
payload = ""
if "Content-Type" in request.headers and \
"form-data" in request.headers["Content-Type"]:
if "payload" in request.POST:
# The payload was sent as a FormData.
payload = request.POST.first("payload")
else:
# A FormData was sent with an empty payload.
pass
else:
# The payload was sent as either a string, Blob, or BufferSource.
payload = request.body
payload_parts = filter(None, payload.split(":"))
if len(payload_parts) > 0:
payload_size = int(payload_parts[0])
# Confirm the payload size sent matches with the number of characters sent.
if payload_size != len(payload_parts[1]):
test_data["error"] = "expected %d characters but got %d" % (payload_size, len(payload_parts[1]))
else:
# Confirm the payload contains the correct characters.
for i in range(0, payload_size):
if payload_parts[1][i] != "*":
test_data["error"] = "expected '*' at index %d but got '%s''" % (i, payload_parts[1][i])
break
# Store the result in the stash so that it can be retrieved
# later with a 'stat' command.
request.server.stash.put(test_data_key, test_data)
elif command == "stat":
test_idx_min = int(request.GET.first("tidx_min"))
test_idx_max = int(request.GET.first("tidx_max"))
# For each result that has come in, append it to the response.
results = []
for test_idx in range(test_idx_min, test_idx_max+1): # +1 because end is exclusive
test_data_key = build_stash_key(session_id, test_idx)
test_data = request.server.stash.take(test_data_key)
if test_data:
results.append(test_data)
response.headers.set("Content-Type", "text/plain")
response.content = json.dumps(results)
else:
response.status = 400
|
5d970bb10d689bb55f70cd841bd01501d88428c7
| 3,643,571
|
def calc_chi2(model, dof=None):
"""
Calculate chi-square statistic.
Parameters
----------
model : Model
Model.
dof : int, optional
Degrees of freedom statistic. The default is None.
Returns
-------
tuple
chi2 statistic and p-value.
"""
if dof is None:
dof = calc_dof(model)
if model.last_result.name_obj == 'FIML':
stat = model.last_result.fun / model.n_samples
else:
stat = model.n_samples * model.last_result.fun
return stat, 1 - chi2.cdf(stat, dof)
|
46ed27fca1f36fdc8a044136da1ea4a032be1554
| 3,643,572
|
def QuadraticCommandAddControl(builder, control):
"""This method is deprecated. Please switch to AddControl."""
return AddControl(builder, control)
|
9b775f34400a0deeea93fd58a211915462735fed
| 3,643,573
|
def authenticated_api(username, api_root=None, parser=None):
"""Return an oauthenticated tweety API object."""
auth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET)
try:
user = User.objects.get(username__iexact=username)
sa = user.social_auth.all()[0]
auth.set_access_token(sa.tokens['oauth_token'],
sa.tokens['oauth_token_secret'])
return tweepy.API(auth,
api_root=api_root or settings.TWITTER_API_ROOT,
parser=parser or JSONParser())
except:
return None
|
82237d40b89ad860720ae3830fa37de76439a2be
| 3,643,574
|
def get_model_header(fpath):
"""
:param fpath:
:return:
"""
with gz.open(fpath, 'rt') as modelfile:
header = modelfile.readline().strip().strip('#').split()
return header
|
bd3600d831d212821c160b994ea73c24ee04ce6d
| 3,643,575
|
def _parse_vertex(vertex_row):
"""Parses a line in a PLY file which encodes a vertex coordinates.
Args:
vertex_row: string with vertex coordinates and color.
Returns:
2-tuple containing a length-3 array of vertex coordinates (as
floats) and a length-3 array of RGB color values (as ints between 0
and 255, inclusive).
"""
vertex = vertex_row.strip().split()
# The row must contain coordinates with RGB/RGBA color in addition to that.
if len(vertex) >= 6:
# Supports only RGB colors now, alpha channel will be ignored.
# TODO(b/129298103): add support of RGBA in .ply files.
return ([float(coord) for coord in vertex[:3]],
[int(channel) for channel in vertex[3:6]])
raise ValueError('PLY file must contain vertices with colors.')
|
cc8d7d59762464cf03f9caa8d057c690055c939c
| 3,643,576
|
def clean_tag(tag):
"""clean up tag."""
if tag is None:
return None
t = tag
if isinstance(t, list):
t = t[0]
if isinstance(t, tuple):
t = t[0]
if t.startswith('#'):
t = t[1:]
t = t.strip()
t = t.upper()
t = t.replace('O', '0')
t = t.replace('B', '8')
return t
|
1d2709323c4d80f290701d5cdc3a993b4bac25d4
| 3,643,577
|
def massM2(param):
""" Mass term in the neutrino mass basis.
@type param : PhysicsConstants
@param param : set of physical parameters to be used.
@rtype : numpy array
@return : mass matrix in mass basis.
"""
M2 = np.zeros([param.numneu,param.numneu],complex)
for k in np.arange(1,param.numneu,1):
M2[k,k] = param.dmsq[k+1]
return M2
|
38997454d308b4730e4eac5a764977fc72a6b373
| 3,643,578
|
import json
def get_input_data(train_file_path='train.json', train=True):
"""Retrieves training (X) and label (y) matrices. Note that this can take a few seconds to run.
Args:
train_file_path is the path of the file containing training data.
Returns:
A tuple containing the X training matrix in the first position, and the y label matrix in the second position.
X is of shape (N, 75, 75, 3), where N is the number of training images, 75 x 75 is the dimension of the images,
and 3 represents the number of channels for each image.
"""
with open(train_file_path, 'r') as train_file:
json_data = train_file.read()
train_data = json.loads(json_data)
band_1 = [instance['band_1'] for instance in train_data]
band_2 = [instance['band_2'] for instance in train_data]
ids = [instance['id'] for instance in train_data]
band_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in band_1])
band_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in band_2])
# Combine all three channels into an array of 1604 tensors (number of training images) with dimension 75 x 75 x 3
X_train = np.concatenate([band_1[:, :, :, np.newaxis], band_2[:, :, :, np.newaxis]], axis=-1)
if train:
# True labels of data, either iceberg or not iceberg
y_train = np.array([instance['is_iceberg'] for instance in train_data])
return X_train, y_train, ids
else:
return X_train, ids
|
5b42339917f0ec97ae584a03ba415881221e639c
| 3,643,579
|
def dice_coef(y_true, y_pred):
"""
:param y_true: the labeled mask corresponding to a mammogram scan
:param y_pred: the predicted mask of the scan
:return: A metric that accounts for precision and recall
on the scale from 0 - 1. The closer to 1, the
better.
Dice = 2 * (|X & Y|)/ |X|+ |Y|)
= sum(|A*B|)/(sum(|A|)+sum(|B|))
Citation (MIT License): https://github.com/jocicmarko/
ultrasound-nerve-segmentation/blob/
master/train.py
"""
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + SMOOTH) / (K.sum(y_true_f) + K.sum(y_pred_f) + SMOOTH)
|
e0f24abe29771f384e640e9e2f2420add040492f
| 3,643,580
|
def linmsg(x, end_pts_msg=None, max_msg=None, fill_value=1.e20):
"""
Linearly interpolates to fill in missing values.
x = Ngl.linmsg(x,end_pts_msg=None,max_msg=None,fill_value=1.e20)
x -- A numpy or masked array of any dimensionality that contains missing values.
end_pts_msg -- how missing beginning and end points will be
returned. If this value is greater than or equal to 0,
then the beginning and end points will be returned as
missing (default option). If this value is less
than 0, then they will be set to the nearest
non-missing value.
max_msg -- the maximum number of consecutive missing values to be
interpolated. If not set, then this function will try
to interpolate as many values as it can.
fill_value -- The missing value for x. Defaults to 1.e20 if not set.
"""
#
# Set defaults for input parameters not specified by user.
#
if end_pts_msg is None:
end_pts_msg = 0
#
# Setting max_msg to 0 will cause the C wrapper to set this to
# npts before going into the Fortran routine.
#
if max_msg is None:
max_msg = 0
#
# If input array is a numpy masked array, return a numpy masked array.
# Otherwise missing values are dealt with using the fill_value.
#
fv = _get_fill_value(x)
if (any(fv is None)):
return fplib.linmsg(_promote_scalar(x),end_pts_msg,max_msg,fill_value)
else:
aret = fplib.linmsg(x.filled(fv), end_pts_msg, max_msg, fv)
return ma.masked_array(aret, fill_value=fv)
|
342abdc7536d8a1866c156cdc238e06338a20398
| 3,643,581
|
def get_or_create_actor_by_name(name):
"""
Return the actor corresponding to name if it does not exist,
otherwise create actor with name.
:param name: String
"""
return ta.ActorSystem().createActor(MyClass, globalName=name)
|
cc1ad620bc29139d6230e5a134ff72c3639a2bb1
| 3,643,582
|
def client():
"""Client to call tests against"""
options = {
'bind': '%s:%s' % ('0.0.0.0', '8080'),
'workers': str(number_of_workers()),
}
return testing.TestClient(falcon.API(), options)
|
3c075eb528e88a51a8f2c13e1197da6b2831197a
| 3,643,583
|
import math
def hard_negative_mining(loss, labels, neg_pos_ratio=3):
"""
用于训练过程中正负例比例的限制.默认在训练时,负例数量是正例数量的三倍
Args:
loss (N, num_priors): the loss for each example.
labels (N, num_priors): the labels.
neg_pos_ratio: 正负例比例: 负例数量/正例数量
"""
pos_mask = labels > 0
num_pos = pos_mask.long().sum(dim=1, keepdim=True)
num_neg = num_pos * neg_pos_ratio
loss[pos_mask] = -math.inf # 无穷
# 两次sort 找出元素在排序中的位置
_, indexes = loss.sort(dim=1, descending=True) # descending 降序 ,返回 value,index
_, orders = indexes.sort(dim=1)
neg_mask = orders < num_neg # loss 降序排, 背景为-无穷, 选择排前num_neg的 负无穷,也就是 背景
return pos_mask | neg_mask
|
3b2e38ab2b0bbd9732fceafdfd023ea220b3c5eb
| 3,643,584
|
def groups():
"""
Return groups
"""
return _clist(getAddressBook().groups())
|
16db4befa0863b15055fd7b557ecfefa8da55e20
| 3,643,585
|
def round_temp(value):
"""Round temperature for publishing."""
return round(value, dev_fan.round_temp)
|
39f7d5e55d0ba444b675b8ae612f5f38350af050
| 3,643,586
|
def get_key_from_property(prop, key, css_dict=None, include_commented=False):
"""Returns the entry from the dictionary using the given key"""
if css_dict is None:
css_dict = get_css_dict()[0]
cur = css_dict.get(prop) or css_dict.get(prop[1:-1])
if cur is None:
return None
value = cur.get(key)
if value is not None:
return value
for v in cur['values']:
if (v.startswith('<') or (include_commented and v.startswith('<_'))) and v.endswith('>'):
ret = get_key_from_property(v, key, css_dict, include_commented)
if ret is not None:
return ret
|
169a4369a8fc5cc9cfde18b302a308bafa1d4def
| 3,643,587
|
def bbox_area(gt_boxes):
"""
gt_boxes: (K, 4) ndarray of float
area: (k)
"""
K = gt_boxes.size(0)
gt_boxes_area = ((gt_boxes[:,2] - gt_boxes[:,0] + 1) *
(gt_boxes[:,3] - gt_boxes[:,1] + 1)).view(K)
return gt_boxes_area
|
57ad16b8b339e4515dcd7e7126b9c6b35b6c3d8b
| 3,643,588
|
def DecodedMessage(tG,x):
"""
Let G be a coding matrix. tG its transposed matrix. x a n-vector received after decoding.
DecodedMessage Solves the equation on k-bits message v: x = v.G => G'v'= x' by applying GaussElimination on G'.
-------------------------------------
Parameters:
tG: Transposed Coding Matrix. Must have more rows than columns to solve the linear system. Must be full rank.
x: n-array. Must be in the Code (in Ker(H)).
"""
n,k = tG.shape
if n < k:
raise ValueError('Coding matrix G must have more columns than rows to solve the linear system on v\': G\'v\' = x\'')
rtG, rx = GaussElimination(tG,x)
rank = sum([a.any() for a in rtG])
if rank!= k:
raise ValueError('Coding matrix G must have full rank = k to solve G\'v\' = x\'')
message=np.zeros(k).astype(int)
message[k-1]=rx[k-1]
for i in reversed(range(k-1)):
message[i]=abs(rx[i]-BinaryProduct(rtG[i,list(range(i+1,k))],message[list(range(i+1,k))]))
return message
|
47968c4feed23a32abbbf34da1bed4521689f3d2
| 3,643,589
|
def get_ttp_card_info(ttp_number):
"""
Get information from the specified transport card number.
The number is the concatenation of the last 3 numbers of the first row and all the numbers of the second row.
See this image: https://tarjetatransportepublico.crtm.es/CRTM-ABONOS/archivos/img/TTP.jpg
:param str ttp_number: The number that identifies a transport card. It must be a string of the last 3 numbers
of the first row and all the numbers of the second row.
:return dict: A dictionary with information of the transport card. It has information regarding the titles
in that card, expiring dates, purchase dates, title types (young, normal, old, ...), among others.
"""
if ttp_number is not None:
client = Client(Urls.CITRAM_CARD_SERVICE.value)
result = client.service.ConsultaSaldo1(sNumeroTTP=ttp_number)
final_result = {'status': result['iCallLogField'],
'card_info': xmltodict.parse(result['sResulXMLField'])}
return final_result
else:
raise NotEnoughParametersException('You must specify a transport card number.')
|
fc8fb31ae5daf17173567d53a9a122c3d8e11ca5
| 3,643,590
|
import re
def tag_matches(tag, impl_version='trunk', client_version='trunk'):
"""Test if specified versions match the tag.
Args:
tag: skew test expectation tag, e.g. 'impl_lte_5' or 'client_lte_2'.
impl_version: WebLayer implementation version number or 'trunk'.
client_version: WebLayer implementation version number or 'trunk'.
Returns:
True if the specified versions match the tag.
Raises:
AssertionError if the tag is invalid.
"""
# 'All' is special cased to match anything.
if tag == 'all':
return True
# Extract the three components from the tag.
match = re.match(r'(client|impl)_([gl]te)_([0-9]+)', tag)
assert match is not None, (
'tag must be of the form "{client,impl}_{gte,lte}_$version", found %r' %
tag)
target_str, op_str, tag_version_str = match.groups()
# If a version is specified see if the tag refers to the same target or
# return False otherwise.
if impl_version != 'trunk' and target_str != 'impl':
return False
if client_version != 'trunk' and target_str != 'client':
return False
version = impl_version if impl_version != 'trunk' else client_version
assert type(version) == int, 'Specified version must be an integer.'
tag_version = int(tag_version_str)
op = OP_MAP[op_str]
return op(version, tag_version)
|
dab3494063cd382615648d12d5dae03a47963af6
| 3,643,591
|
def split_params(param_string):
"""
Splits a parameter string into its key-value pairs
>>> d = split_params('alpha-0.5_gamma-0.9')
>>> d['alpha']
0.5
>>> d['gamma']
0.9
>>> d = split_params('depth-15_features-a-b-c')
>>> d['depth']
15
>>> d['features']
['a', 'b', 'c']
>>> d = split_params('alpha-0.1_l-a-b_trace_rate-None')
>>> d['alpha']
0.1
>>> d['l']
['a', 'b']
>>> d['trace_rate']
>>> print(d['trace_rate'])
None
>>> split_params('a-b-c')
{'a': ['b', 'c']}
>>> split_params('a_b_c')
{}
"""
#TODO: check for negatives i.e. alpha--1
parts = param_string.split('_')
params = {}
for i in range(len(parts)):
param = split_items(parts[i])
if len(param) < 2:
try:
parts[i+1] = parts[i] + "_" + parts[i+1]
except:
pass
continue
elif len(param) == 2:
params[param[0]] = param[1]
elif len(param) == 3 and len(param[1]) == 0:
params[param[0]] = -param[2]
else:
params[param[0]] = param[1:]
return params
|
d6b1c8b381abe94c1022e1a474b353423e844f55
| 3,643,592
|
def load_suites_from_directory(dir, recursive=True):
# type: (str, bool) -> List[Suite]
"""
Load a list of suites from a directory.
If the recursive argument is set to True, sub suites will be searched in a directory named
from the suite module: if the suite module is "foo.py" then the sub suites directory must be "foo".
Raise SuiteLoadingError if one or more suite could not be loaded.
"""
if not osp.exists(dir):
raise SuiteLoadingError("Directory '%s' does not exist" % dir)
suites = {}
for filename in get_py_files_from_dir(dir):
suite = load_suite_from_file(filename)
if not suite.hidden:
suites[filename] = suite
if recursive:
for dirname in _get_sub_dirs_from_dir(dir):
suite = suites.get(dirname + ".py")
if not suite:
suite_name = osp.basename(dirname)
suite = Suite(None, suite_name, build_description_from_name(suite_name))
suites[suite.name] = suite
for sub_suite in load_suites_from_directory(dirname, recursive=True):
suite.add_suite(sub_suite)
return sorted(sorted(filter(lambda s: not s.is_empty(), suites.values()), key=lambda s: s.name), key=lambda s: s.rank)
|
5bb0c83ee39537b0bb38a663b110f5ef6225833e
| 3,643,593
|
def deep_parameters_back(param, back_node, function_params, count, file_path, lineno=0, vul_function=None,
isback=False):
"""
深层递归分析外层逻辑,主要是部分初始化条件和新递归的确定
:param isback:
:param lineno:
:param vul_function:
:param param:
:param back_node:
:param function_params:
:param count:
:param file_path:
:return:
"""
count += 1
padding = {}
is_co, cp, expr_lineno = parameters_back(param, back_node, function_params, lineno, vul_function=vul_function,
file_path=file_path, isback=isback)
if count > 20:
logger.warning("[Deep AST] depth too big, auto exit...")
return is_co, cp, expr_lineno
return is_co, cp, expr_lineno
|
5cc5669a3c071d14b5d4898f60315da27e397a8b
| 3,643,594
|
from typing import Optional
import re
def get_latest_runtime(dotnet_dir: Optional[str] = None, version_major: Optional[int] = 5,
version_minor: Optional[int] = 0, version_build: Optional[int] = 0) -> Optional[str]:
"""
Search and select the latest installed .NET Core runtime directory.
"""
dotnet_dir = dotnet_dir or get_dotnet_dir()
if not dotnet_dir:
return None
if "DOTNETRUNTIMEVERSION" in dotnet_const.ENVIRON:
tmp = join(dotnet_dir, "shared", "Microsoft.NETCore.App", dotnet_const.ENVIRON["DOTNETRUNTIMEVERSION"])
if isdir(tmp):
return tmp
runtime = None
for r in get_dotnet_runtimes():
if r.name == "Microsoft.NETCore.App":
vmatch = re.match(r"^(?P<major>\d+)\.(?P<minor>\d+)\.(?P<build>\d+)", r.version)
if vmatch:
tmp_major = int(vmatch.group("major"))
tmp_minor = int(vmatch.group("minor"))
tmp_build = int(vmatch.group("build"))
if tmp_major > version_major:
version_major = tmp_major
version_minor = tmp_minor
version_build = tmp_build
runtime = r
continue
if version_major == tmp_major:
if tmp_minor > version_minor:
version_minor = tmp_minor
version_build = tmp_build
runtime = r
continue
if version_minor == tmp_minor:
if tmp_build > version_build:
version_build = tmp_build
runtime = r
continue
if runtime is None:
runtime = r
continue
if runtime is None:
return None
tmp = join(dotnet_dir, "shared", "Microsoft.NETCore.App", runtime.version)
if isdir(tmp):
return tmp
tmp = join(runtime.path, runtime.version)
if isdir(tmp):
return tmp
return None
|
46db4e55163e6110d48264ed5ad4394662ade336
| 3,643,595
|
def choose_action(state, mdp_data):
"""
Choose the next action (0 or 1) that is optimal according to your current
mdp_data. When there is no optimal action, return a random action.
Args:
state: The current state in the MDP
mdp_data: The parameters for your MDP. See initialize_mdp_data.
Returns:
int, 0 or 1. The index of the optimal action according to your current MDP.
"""
# BONUS LEARNING OPPORTUNITY: When you have finished the problem set, try
# un-commenting the following two lines. This will implement a strategy
# called epsilon greedy, which drastically improves performance. Why do you
# think this works so well?
#
# if np.random.uniform() < 0.1: # 10% of the time, choose a random action
# return 0 if np.random.uniform() < 0.5 else 1
action = None
# *** START CODE HERE ***
right = mdp_data['transition_probs'][state, 0, :].dot(mdp_data['value'])
left = mdp_data['transition_probs'][state, 1, :].dot(mdp_data['value'])
if right > left:
action = 0
elif right < left:
action = 1
else:
action = np.random.choice([0, 1])
# *** END CODE HERE ***
return action
|
2cb1f50a62ec006367fb61d8e57eb95005670e31
| 3,643,596
|
def get_formatted_reproduction_help(testcase):
"""Return url to reproduce the bug."""
help_format = get_value_from_job_definition_or_environment(
testcase.job_type, 'HELP_FORMAT')
if not help_format:
return None
# Since this value may be in a job definition, it's non-trivial for it to
# include newlines. Instead, it will contain backslash-escaped characters
# that must be converted here (e.g. \n).
help_format = help_format.decode('unicode-escape')
arguments = get_arguments(testcase)
fuzzer_display = get_fuzzer_display(testcase)
fuzzer_name = fuzzer_display.name or 'NA'
fuzz_target = fuzzer_display.target or 'NA'
engine = fuzzer_display.engine or 'NA'
last_tested_crash_revision = str(
testcase.get_metadata('last_tested_crash_revision') or
testcase.crash_revision)
project_name = get_project_name(testcase.job_type)
testcase_id = str(testcase.key.id())
sanitizer = environment.get_memory_tool_name(testcase.job_type)
sanitizer_options = _get_memory_tool_options(testcase)
sanitizer_options_string = ' '.join(sanitizer_options)
bazel_test_args = _get_bazel_test_args(arguments, sanitizer_options)
result = help_format.replace('%TESTCASE%', testcase_id)
result = result.replace('%PROJECT%', project_name)
result = result.replace('%REVISION%', last_tested_crash_revision)
result = result.replace('%FUZZER_NAME%', fuzzer_name)
result = result.replace('%FUZZ_TARGET%', fuzz_target)
result = result.replace('%ENGINE%', engine)
result = result.replace('%SANITIZER%', sanitizer)
result = result.replace('%SANITIZER_OPTIONS%', sanitizer_options_string)
result = result.replace('%ARGS%', arguments)
result = result.replace('%BAZEL_TEST_ARGS%', bazel_test_args)
return result
|
c1cef28ae82e81e8c1814285dbe32e5d7ebe1ef6
| 3,643,597
|
import inspect
def specialize_on(names, maxsize=None):
"""
A decorator that wraps a function, partially evaluating it with the parameters
defined by ``names`` (can be a string or an iterable of strings) being fixed.
The partially evaluated versions are cached based on the values of these parameters
using ``functools.lru_cache`` with the provided ``maxsize``
(consequently, these values should be hashable).
"""
if isinstance(names, str):
names = [names]
names_set = set(names)
def _specializer(func):
signature = inspect.signature(func)
if not names_set.issubset(signature.parameters):
missing_names = names_set.intersection(signature.parameters)
raise ValueError(
"The provided function does not have parameters: "
+ ", ".join(missing_names))
@lru_cache(maxsize=maxsize)
def get_pevaled_func(args):
return partial_apply(func, **{name:val for name, val in args})
def _wrapper(*args, **kwds):
bargs = signature.bind(*args, **kwds)
call_arguments = bargs.arguments.copy()
for name in list(bargs.arguments):
if name not in names_set:
del bargs.arguments[name] # automatically changes .args and .kwargs
else:
del call_arguments[name]
cache_args = tuple((name, val) for name, val in bargs.arguments.items())
pevaled_func = get_pevaled_func(cache_args)
bargs.arguments = call_arguments # automatically changes .args and .kwargs
return pevaled_func(*bargs.args, **bargs.kwargs)
return _wrapper
return _specializer
|
218cb169661507124acf1dae8076fa47eb313f1a
| 3,643,598
|
def parse_docstring(docstring: str, signature) -> str:
"""
Parse a docstring!
Note:
to try notes.
Args:
docstring: this is the docstring to parse.
Raises:
OSError: no it doesn't lol.
Returns:
markdown: the docstring converted to a nice markdown text.
"""
params = {}
exceptions = {}
returns = ""
lines = docstring.split("\n")
new_lines = []
i = 0
while i < len(lines):
if lines[i].lower() in ("args:", "arguments:", "params:", "parameters:"):
j = i + 1
name = None
while j < len(lines) and lines[j].startswith(" "):
if lines[j].startswith(" ") and params[name]:
params[name] += " " + lines[j].lstrip(" ")
else:
name, description = lines[j].lstrip(" ").split(":", 1)
params[name] = description.lstrip(" ")
j += 1
new_lines.append("**Parameters**\n")
new_lines.append("| Name | Type | Description |")
new_lines.append("| ---- | ---- | ----------- |")
for param_name, param_description in params.items():
param_name, param_default, param_type = get_param_info(signature, param_name)
# if param_default:
# param_default = f"`{param_default}`"
new_lines.append(f"| `{param_name}` | `{param_type}` | {param_description} |")
new_lines.append("")
i = j - 1
elif lines[i].lower() in ("raise:", "raises:", "except:", "exceptions:"):
j = i + 1
name = None
while j < len(lines) and lines[j].startswith(" "):
if lines[j].startswith(" ") and exceptions[name]:
exceptions[name] += " " + lines[j].lstrip(" ")
else:
name, description = lines[j].lstrip(" ").split(":", 1)
exceptions[name] = description.lstrip(" ")
j += 1
new_lines.append("**Exceptions**\n")
new_lines.append("| Type | Description |")
new_lines.append("| ---- | ----------- |")
for exception_name, exception_description in exceptions.items():
new_lines.append(f"| `{exception_name}` | {exception_description} |")
new_lines.append("")
i = j - 1
elif lines[i].lower() in ("return:", "returns:"):
j = i + 1
while j < len(lines) and lines[j].startswith(" "):
description = lines[j].lstrip(" ")
returns += " " + description
j += 1
new_lines.append("**Returns**\n")
new_lines.append("| Type | Description |")
new_lines.append("| ---- | ----------- |")
new_lines.append(f"| `{get_return_type(signature)}` | {returns} |")
new_lines.append("")
i = j - 1
elif lines[i].lower() in ADMONITIONS.keys():
j = i + 1
admonition = []
while j < len(lines) and lines[j].startswith(" ") or lines[j] == "":
admonition.append(lines[j])
j += 1
new_lines.append(f"!!! {ADMONITIONS[lines[i].lower()]}")
new_lines.append("\n".join(admonition))
new_lines.append("")
i = j - 1
else:
new_lines.append(lines[i])
i += 1
return "\n".join(new_lines)
|
f831cda6046853312f6b0afe28683d3fc81dc874
| 3,643,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.