content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def is_anno_end_marker(tag):
"""
Checks for the beginning of a new post
"""
text = tag.get_text()
m = anno_end_marker_regex.match(text)
if m:
return True
else:
return False | 28b7d216c38dabedaef33f4d71f9749e72344b65 | 27,100 |
async def fetch_and_parse(session, url):
"""
Parse a fatality page from a URL.
:param aiohttp.ClientSession session: aiohttp session
:param str url: detail page URL
:return: a dictionary representing a fatality.
:rtype: dict
"""
# Retrieve the page.
# page = await fetch_text(session, url)
page = await fetch_detail_page(session, url)
# Parse it.
d = parse_page(page)
# Add the link.
d[Fields.LINK] = url
# Return the result.
return d | 525bf965854a098507046b3408de5e73bcd4abc9 | 27,101 |
def wmt_diag_base():
"""Set of hyperparameters."""
hparams = iwslt_diag()
hparams.batch_size = 4096
hparams.num_hidden_layers = 6
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.num_heads = 8
# VAE-related flags.
hparams.latent_size = 512
hparams.n_posterior_layers = 4
hparams.n_decoder_layers = 6
hparams.dropout = 0.1
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
return hparams | 384820d2fadc13711968a666a6f4d7b1be0726c5 | 27,102 |
def K_axialbending(EA, EI_x, EI_y, x_C=0, y_C=0, theta_p=0):
"""
Axial bending problem. See KK for notations.
"""
H_xx = EI_x*cos(theta_p)**2 + EI_y*sin(theta_p)**2
H_yy = EI_x*sin(theta_p)**2 + EI_y*cos(theta_p)**2
H_xy = (EI_y-EI_x)*sin(theta_p)*cos(theta_p)
return np.array([
[EA , EA*y_C , -EA*x_C ] ,
[EA*y_C , H_xx + EA*y_C**2 , -H_xy - EA*x_C*y_C ] ,
[-EA*x_C , -H_xy - EA*x_C*y_C , H_yy + EA*x_C**2 ]
]) | f187b35c5324a0aa46e5500a0f37aebbd2b7cc62 | 27,103 |
def get_closest_intersection_pt_dist(path1, path2):
"""Returns the manhattan distance from the start location to the closest
intersection point.
Args:
path1: the first path (list of consecutive (x,y) tuples)
path2: the secong path
Returns:
int of lowest manhattan distance for an intersection
"""
pts1 = get_pts(path1)
pts2 = get_pts(path2)
intersections = get_intersections(pts1, pts2)
return get_closest_dist(intersections) | 07bbe3a2d5f817f28b4e077989a89a78747c676f | 27,104 |
def is_voiced_offset(c_offset):
"""
Is the offset a voiced consonant
"""
return c_offset in VOICED_LIST | 6dfad8859ba8992e2f05c9946e9ad7bf9428d181 | 27,105 |
def dc_generator(noise_dim=NOISE_DIM):
"""Generate images from a random noise vector.
Inputs:
- z: TensorFlow Tensor of random noise with shape [batch_size, noise_dim]
Returns:
TensorFlow Tensor of generated images, with shape [batch_size, 784].
"""
model = tf.keras.models.Sequential()
# TODO: implement architecture
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return model | e9298bdb5bf624bd669676d0e7f34f1f606d9cf2 | 27,106 |
def add_boundary_label(lbl, dtype=np.uint16):
"""
Find boundary labels for a labelled image.
Parameters
----------
lbl : array(int)
lbl is an integer label image (not binarized).
Returns
-------
res : array(int)
res is an integer label image with boundary encoded as 2.
"""
b = find_boundaries(lbl, mode='outer')
res = (lbl > 0).astype(dtype)
res[b] = 2
return res | 31bae32ad08c66a66b19379d30d6210ba2b61ada | 27,107 |
def kmax(array, k):
""" return k largest values of a float32 array """
I = np.zeros(k, dtype='int64')
D = np.zeros(k, dtype='float32')
ha = float_minheap_array_t()
ha.ids = swig_ptr(I)
ha.val = swig_ptr(D)
ha.nh = 1
ha.k = k
ha.heapify()
ha.addn(array.size, swig_ptr(array))
ha.reorder()
return D, I | 41037c924ae240636309f272b95a3c9dcfe10c5e | 27,108 |
def adcp_ins2earth(u, v, w, heading, pitch, roll, vertical):
"""
Description:
This function converts the Instrument Coordinate transformed velocity
profiles to the Earth coordinate system. The calculation is defined in
the Data Product Specification for Velocity Profile and Echo Intensity
- DCN 1341-00750.
Implemented by:
2013-04-10: Christopher Wingard. Initial code.
2014-04-04: Russell Desiderio. Optimized code performance by replacing the for
loops previously used to calculate vectorized matrix multiplication
products with calls to np.einsum (numpy Einstein summation function).
2015-06-24: Russell Desiderio. Changed implementation of 'vertical' in the roll
calculation so that if these values are equal to the CI fill value
(-999999999), when these fill values are replaced with nans, the nans
will propagate through to the data product output.
2015-06-24: Russell Desiderio. Incorporated int fillvalue -> Nan.
Usage:
uu, vu, ww = adcp_ins2earth(u, v, w, heading, pitch, roll, vertical)
where
uu = "east" velocity profiles in earth coordinates [mm s-1]
vv = "north" velocity profiles in earth coordinates [mm s-1]
ww = "vertical" velocity profiles in earth coordinates [mm s-1]
u = east velocity profiles in instrument coordinates [mm s-1]
v = north velocity profiles in instrument coordinates [mm s-1]
w = vertical velocity profiles in instrument coordinates [mm s-1]
heading = instrument's uncorrected magnetic heading [centidegrees]
pitch = instrument pitch [centidegrees]
roll = instrument roll [centidegrees]
vertical = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00050_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
### the input beam data for adcp_ins2earth are always called using the output
### of adcp_beam2ins, so the following lines are not needed.
# insure we are dealing with array inputs
#u = np.atleast_2d(u)
#v = np.atleast_2d(v)
#w = np.atleast_2d(w)
# check for CI fill values before changing units.
# this function 'conditions' (np.atleast_1d) its inputs.
# TRDI does not apply its ADCP fill/bad value sentinels to compass data.
heading, pitch, roll, vertical = replace_fill_with_nan(None, heading, pitch, roll, vertical)
# change units from centidegrees to degrees
heading = heading / 100.0
pitch = pitch / 100.0
roll = roll / 100.0
# better way to calculate roll from the vertical orientation toggle;
# this will propagate R as nans if the vertical variable is missing from the data.
R = roll + vertical * 180.0
# roll
Rrad = np.radians(R)
cos_R = np.cos(Rrad)
sin_R = np.sin(Rrad)
# heading
Hrad = np.radians(heading)
cos_H = np.cos(Hrad)
sin_H = np.sin(Hrad)
# pitch
t1rad = np.radians(pitch)
t2rad = np.radians(roll)
Prad = np.arctan(np.tan(t1rad) * np.cos(t2rad))
cos_P = np.cos(Prad)
sin_P = np.sin(Prad)
# determine array size
n_packets = u.shape[0]
n_uvw = u.shape[1]
# initialize vectors to be used as matrix elements
ones = np.ones(n_packets)
zeros = ones * 0.0
# the rollaxis calls reorient the matrices so that their lead index is
# the data packet index
M1 = np.array([[cos_H, sin_H, zeros],
[-sin_H, cos_H, zeros],
[zeros, zeros, ones]])
M1 = np.rollaxis(M1, 2)
M2 = np.array([[ones, zeros, zeros],
[zeros, cos_P, -sin_P],
[zeros, sin_P, cos_P]])
M2 = np.rollaxis(M2, 2)
M3 = np.array([[cos_R, zeros, sin_R],
[zeros, ones, zeros],
[-sin_R, zeros, cos_R]])
M3 = np.rollaxis(M3, 2)
# construct input array of coordinates (velocities) to be transformed.
# the basis set is 3D (E,N,U) so that the middle dimension is sized at 3.
uvw = np.zeros((n_packets, 3, n_uvw))
# pack the coordinates (velocities) to be transformed into the appropriate
# slices.
uvw[:, 0, :] = u
uvw[:, 1, :] = v
uvw[:, 2, :] = w
# the Einstein summation is here configured to do the matrix
# multiplication MM(i,l) = M1(i,j) * M2(j,k) * M3(k,l) on each slice h.
MM = np.einsum('hij,hjk,hkl->hil', M1, M2, M3)
# the Einstein summation is here configured to do the matrix
# multiplication uvw_earth(i,m) = MM(i,l) * uvw(l,m) on each slice h.
uvw_earth = np.einsum('hil,hlm->him', MM, uvw)
# NOTE:
# these last two executable statements run about a factor of 2
# faster in the 10000 data packet performance tests versus combining
# these operations into the one statement:
# uvw_earth = np.einsum('hij,hjk,hkl,hlm->him', M1, M2, M3, uvw)
# break out the coordinate slices and return them
uu = uvw_earth[:, 0, :]
vv = uvw_earth[:, 1, :]
ww = uvw_earth[:, 2, :]
return (uu, vv, ww) | 0a51db6b5d6186c4f9208e4fa2425160e8c43925 | 27,109 |
import math
def strength(data,l):
"""
Returns the strength of earthquake as tuple (P(z),S(xy))
"""
# FFT
# https://momonoki2017.blogspot.com/2018/03/pythonfft-1-fft.html
# Fast Fourier Transform
# fx = np.fft.fft(data[0])
# fy = np.fft.fft(data[1])
# fz = np.fft.fft(data[2])
# What's is the filter??
# https://www.data.jma.go.jp/svd/eqev/data/kyoshin/kaisetsu/calc_sindo.htm
# Inverse Fast Fourier Transform
# ifx = np.fft.ifft(fx)
# ify = np.fft.ifft(fy)
# ifz = np.fft.ifft(fz)
# rpi-seismometer
# https://github.com/p2pquake/rpi-seismometer
# for i in range(3):
# rv[i] = rv[i] * 0.94 + d[i]*0.06
# gals[i] = (rv[i] - avgs[i]) * 1.13426
avgs = [0,0,0]
for i in range(3):
avgs[i] = sum(data[i][-l:]) / len(data[i][-l:])
gals_z = [] # P wave?
gals_xy = [] # S wave?
for d in np.array(data).T[-l:]:
dd = 0
for i in range(2):
dd += (d[i] - avgs[i])**2
gals_xy.append(math.sqrt(dd))
gals_z.append(math.sqrt((d[2]-avgs[2])**2))
avg_z = sum(gals_z) / len(gals_z) * 100
avg_xy = sum(gals_xy) / len(gals_xy) * 100
return avg_z,avg_xy | 705b04644002c2cf826ca6a03838cab66ccea1f8 | 27,110 |
import sys
import os
def get_packages_by_commits(
repository: str,
commits: list,
package_limit=1,
ecosystem='maven') -> list:
"""Get package name from git repository and commit hash.
A git handler is created and modified files are searched
by the given commit. Package is inferred based on those
modified files.
There can be multiple packages, by default only one child
package is returned.
*NOTE:* Only usable for git repositories.
:param repository: str, path to local repository or url
If url is provided, to repository will be cloned into
a temporary folder (at /tmp)
:param commits: list, commit hashes to search the modified files by
:param package_limit: int or None, limit number of packages
The limit is applied per modified file.
If all packages found in the path should be listed,
provide None or 0
:param ecosystem: ecosystem the repository belongs to
{maven, npm, python}, by default 'maven' is assumed
"""
if repository.startswith('http'):
print('\nCloning repository...\n', file=sys.stderr)
handler = GitHandler.clone(url=repository)
else:
handler = GitHandler(path=repository)
with handler as git:
mod_files = git.get_modified_files(commits=commits)
mod_files = sorted(mod_files, key=len, reverse=True)
eco_namespace = _get_namespace_by_eco(ecosystem)
packages = set()
for mod_file_path in mod_files:
root_dir = os.path.dirname(str(mod_file_path))
found_packages = eco_namespace.find_packages(root_dir, topdown=False)
for p in found_packages[:[None, package_limit][package_limit]]:
packages.add(p)
# the first found package should be the child package belonging to the file
# which has been modified
return list(packages) | d57c6b6584b03900158135c4cb075f3991434371 | 27,111 |
def humanize(tag, value):
"""Make the metadata value human-readable
:param tag: The key of the metadata value
:param value: The actual raw value
:return: Returns ``None`` or a human-readable version ``str``
:rtype: ``str`` or ``None``
"""
for formatter in find_humanizers(tag):
human_readable = formatter(value)
if human_readable is not None:
return human_readable | 42a4e1506b4655a86607495790f555cc318b6d82 | 27,112 |
import itertools
def cartesian(sequences, dtype=None):
"""
Generate a cartesian product of input arrays.
Parameters
----------
sequences : list of array-like
1-D arrays to form the cartesian product of.
dtype : data-type, optional
Desired output data-type.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
if dtype is None:
dtype = np.dtype(type(sequences[0][0]))
return np.array(list(itertools.product(*sequences)), dtype=dtype) | 51e6031c568eee425f2ea86c16b472474ae499eb | 27,113 |
def nasa_date_to_iso(datestr):
"""Convert the day-number based NASA format to ISO.
Parameters
----------
datestr : str
Date string in the form Y-j
Returns
-------
Datestring in ISO standard yyyy-mm-ddTHH:MM:SS.MMMMMM
"""
date = dt.datetime.strptime(datestr, nasa_date_format)
return date.isoformat() | d77114c874fdd41a220aae907ce7eabd6dd239bf | 27,114 |
import googledatastore
import atexit
def with_cloud_emulators(*emulator_names):
"""Decorator for starting cloud emulators from a unittest.TestCase."""
def decorator(cls):
"""Decorator."""
class Wrapped(cls):
"""Wrapped class."""
@classmethod
def setUpClass(cls):
"""Class setup."""
for emulator_name in emulator_names:
if emulator_name not in _emulators:
_emulators[emulator_name] = start_cloud_emulator(emulator_name)
atexit.register(_emulators[emulator_name].cleanup)
if emulator_name == 'datastore':
ndb.get_context().set_memcache_policy(False)
ndb.get_context().set_cache_policy(False)
# Work around bug with App Engine datastore_stub_util.py relying on
# missing protobuf enum.
googledatastore.PropertyFilter.HAS_PARENT = 12
super(Wrapped, cls).setUpClass()
def setUp(self):
for emulator in _emulators.itervalues():
emulator.reset()
super(Wrapped, self).setUp()
Wrapped.__module__ = cls.__module__
Wrapped.__name__ = cls.__name__
return Wrapped
return decorator | 2557d94a6a33c5ff43c4e937b71939d027b1c7dd | 27,115 |
def auto_label_color(labels):
"""
???+ note "Create a label->hex color mapping dict."
"""
use_labels = set(labels)
use_labels.discard(module_config.ABSTAIN_DECODED)
use_labels = sorted(use_labels, reverse=False)
assert len(use_labels) <= 20, "Too many labels to support (max at 20)"
palette = Category10[10] if len(use_labels) <= 10 else Category20[20]
color_dict = {
module_config.ABSTAIN_DECODED: "#dcdcdc", # gainsboro hex code
**{_l: _c for _l, _c in zip(use_labels, palette)},
}
return color_dict | 791de575e500bf2c2e0e1d56c390c59a2f62381c | 27,116 |
import re
def dedentString(text):
"""Dedent the docstring, so that docutils can correctly render it."""
dedent = min([len(match) for match in space_re.findall(text)] or [0])
return re.compile('\n {%i}' % dedent, re.M).sub('\n', text) | a384b0c9700a17a7ce621bca16175464192c9aee | 27,117 |
def preprocess(df):
"""Preprocess the DataFrame, replacing identifiable information"""
# Usernames: <USER_TOKEN>
username_pattern = r"(?<=\B|^)@\w{1,18}"
df.text = df.text.str.replace(username_pattern, "<USERNAME>")
# URLs: <URL_TOKEN>
url_pattern = (
r"https?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]"
r"|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
)
df.text = df.text.str.replace(url_pattern, "<URL>")
# Email: <EMAIL_TOKEN>
email_pattern = r"[-.+\w]+@[-\w]+\.[-.\w]+"
df.text = df.text.str.replace(email_pattern, "<EMAIL>")
# Replace tokens in Wikipedia Talk dataset
df.text = df.text.str.replace("NEWLINE;?_TOKEN", "\n")
df.text = df.text.str.replace("TAB_TOKEN", "\t")
return df | d592d9e56af9ec17dcebede31d458dfdc001c220 | 27,118 |
def mobilenetv3_large_w7d20(**kwargs):
"""
MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs) | 550f8273dfe52c67b712f8cd12d1e916f7a917cc | 27,119 |
def random_forest_classifier(model, inputs, method="predict_proba"):
"""
Creates a SKAST expression corresponding to a given random forest classifier
"""
trees = [decision_tree(estimator.tree_, inputs, method="predict_proba", value_transform=lambda v: v/len(model.estimators_))
for estimator in model.estimators_]
return classifier(sum_(trees), method) | d13e28e05d01a2938116a1bac5ddbd64f7b5438c | 27,120 |
from cowbird.utils import get_settings as real_get_settings
import functools
def mock_get_settings(test):
"""
Decorator to mock :func:`cowbird.utils.get_settings` to allow retrieval of settings from :class:`DummyRequest`.
.. warning::
Only apply on test methods (not on class TestCase) to ensure that :mod:`pytest` can collect them correctly.
"""
def mocked(container):
if isinstance(container, DummyRequest):
return container.registry.settings
return real_get_settings(container)
@functools.wraps(test)
def wrapped(*_, **__):
# mock.patch("cowbird.services.get_settings", side_effect=mocked)
with mock.patch("cowbird.utils.get_settings", side_effect=mocked):
return test(*_, **__)
return wrapped | 8332d08846bcee6e9637f75c5c15fb763d9978a4 | 27,121 |
def _convert_to_interbatch_order(order: pd.Series,
batch: pd.Series) -> pd.Series:
"""
Convert the order values from a per-batch order to a interbatch order.
Parameters
----------
order: pandas.Series
order and batch must share the same index, size and be of dtype int.
batch: pandas.Series
Returns
-------
interbatch_order: pandas.Series
Raises
------
ValueError: if the order values are already unique.
Examples
--------
>>>order = pd.Series([1, 2, 3, 1, 2, 3])
>>>batch = pd.Series([1, 1, 1, 2, 2, 2])
>>>_convert_to_interbatch_order(order, batch)
pd.Series([1, 2, 3, 4, 5, 6])
"""
if order.unique().size == order.size:
return order
# find a value to add to each batch to make unique and sorted order values
max_order = order.groupby(batch).max()
add_to_order = np.roll(max_order, 1)
add_to_order[0] = 0
add_to_order = add_to_order.cumsum()
add_to_order = pd.Series(data=add_to_order, index=max_order.index)
add_to_order = batch.map(add_to_order)
interbatch_order = order + add_to_order
return interbatch_order | 235e99d8a93ebeecde7bfe274b82fe32980288dd | 27,122 |
def CV_INIT_3X3_DELTAS(*args):
"""CV_INIT_3X3_DELTAS(double deltas, int step, int nch)"""
return _cv.CV_INIT_3X3_DELTAS(*args) | cbcbd6de2593d548c8e5bc02992d1df9a3d66460 | 27,123 |
def is_instance_cold_migrated_alarm(alarms, instance, guest_hb=False):
"""
Check if an instance cold-migrated alarm has been raised
"""
expected_alarm = {'alarm_id': fm_constants.FM_ALARM_ID_VM_COLD_MIGRATED,
'severity': fm_constants.FM_ALARM_SEVERITY_CRITICAL}
return _instance_alarm_raised(alarms, expected_alarm, instance) | 8b6db3498d09d4d538382507ffac249226a2912f | 27,124 |
def precision_macro(y_target, y_predicted):
"""
y_target: m x n 2D array. {0, 1}
real labels
y_predicted: m x n 2D array {0, 1}
prediction labels
m (y-axis): # of instances
n (x-axis): # of classes
"""
average = 'macro'
score = precision_score(y_target, y_predicted, average=average)
return score | 4038eb838f35da93b24301809e5f0c3d5c37e2c9 | 27,125 |
def layout(mat,widths=None,heights=None):
"""layout"""
ncol=len(mat[0])
nrow=len(mat)
arr=[]
list(map(lambda m: arr.extend(m),mat))
rscript='layout(matrix(c(%s), %d, %d, byrow = TRUE),' %(str(arr)[1:-1],nrow,ncol)
if widths:
rscript+='widths=c(%s),' %(str(widths)[1:-1])
if heights:
rscript+='heights=c(%s),' %(str(heights)[1:-1])
rscript=rscript[:-1]+')\n'
return rscript | 813fb351b4e09d4762255ecbbe6f9ee7e050efd0 | 27,126 |
def get_file_language(filename, text=None):
"""Get file language from filename"""
ext = osp.splitext(filename)[1]
if ext.startswith('.'):
ext = ext[1:] # file extension with leading dot
language = ext
if not ext:
if text is None:
text, _enc = encoding.read(filename)
for line in text.splitlines():
if not line.strip():
continue
if line.startswith('#!'):
shebang = line[2:]
if 'python' in shebang:
language = 'python'
else:
break
return language | 7cfcd49d94cc1c2246f03946cfea1c99b866f941 | 27,127 |
import re
def _get_output_name(fpattern,file_ind,ind):
""" Returns an output name for volumetric image
This function returns a file output name for the image volume
based on the names of the file names of the individual z-slices.
All variables are kept the same as in the original filename,
but the z values are transformed into a range surrounded by <>.
For example, if the following files are processed:
image_c000_z000.ome.tif
image_c000_z001.ome.tif
image_c000_z002.ome.tif
then the output file will be:
image_c000_z<000-002>.ome.tif
Inputs:
fpattern - A filename pattern indicating variables in filenames
file_ind - A parsed dictionary of file names
ind - A dictionary containing the indices for the file name (i.e. {'r':1,'t':1})
Outputs:
fname - an output file name
"""
# If no regex was supplied, return default image name
if fpattern==None or fpattern=='':
return 'image.ome.tif'
for key in ind.keys():
assert key in VARIABLES, "Input dictionary key not a valid variable: {}".format(key)
# Parse variables
expr = []
variables = []
for g in re.finditer(r"\{[pxyzctr]+\}",fpattern):
expr.append(g.group(0))
variables.append(expr[-1][1])
# Return an output file name
fname = fpattern
for e,v in zip(expr,variables):
if v not in STATICS:
minval = min([int(z) for z in file_ind.keys()])
maxval = max([int(z) for z in file_ind.keys()])
fname = fname.replace(e,'<' + str(minval).zfill(len(e)-2) +
'-' + str(maxval).zfill(len(e)-2) + '>')
elif v not in ind.keys():
fname = fname.replace(e,str(0).zfill(len(e)-2))
else:
fname = fname.replace(e,str(ind[v]).zfill(len(e)-2))
return fname | 8ce392acab2984b5012d8de7a0aa205f9a5e5e3b | 27,128 |
import re
def MatchNameComponent(key, name_list, case_sensitive=True):
"""Try to match a name against a list.
This function will try to match a name like test1 against a list
like C{['test1.example.com', 'test2.example.com', ...]}. Against
this list, I{'test1'} as well as I{'test1.example'} will match, but
not I{'test1.ex'}. A multiple match will be considered as no match
at all (e.g. I{'test1'} against C{['test1.example.com',
'test1.example.org']}), except when the key fully matches an entry
(e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
@type key: str
@param key: the name to be searched
@type name_list: list
@param name_list: the list of strings against which to search the key
@type case_sensitive: boolean
@param case_sensitive: whether to provide a case-sensitive match
@rtype: None or str
@return: None if there is no match I{or} if there are multiple matches,
otherwise the element from the list which matches
"""
if key in name_list:
return key
re_flags = 0
if not case_sensitive:
re_flags |= re.IGNORECASE
key = key.upper()
name_re = re.compile(r"^%s(\..*)?$" % re.escape(key), re_flags)
names_filtered = []
string_matches = []
for name in name_list:
if name_re.match(name) is not None:
names_filtered.append(name)
if not case_sensitive and key == name.upper():
string_matches.append(name)
if len(string_matches) == 1:
return string_matches[0]
if len(names_filtered) == 1:
return names_filtered[0]
return None | ad522feba9cabb3407e3b8e1e8c221f3e9800e16 | 27,129 |
import requests
def news_api():
"""Uses news API and returns a dictionary containing news """
news_base_url = "https://newsapi.org/v2/top-headlines?"
news_api_key = keys["news"]
country = location["country"]
news_url = news_base_url + "country=" + country + "&apiKey=" + news_api_key
n_api = requests.get(news_url)
return n_api.json() | 45e8a9d42d64066e2259fc95727d52e6b5bdfc9e | 27,130 |
def compress(mesh, engine_name="draco"):
""" Compress mesh data.
Args:
mesh (:class:`Mesh`): Input mesh.
engine_name (``string``): Valid engines are:
* ``draco``: `Google's Draco engine <https://google.github.io/draco/>`_
[#]_
Returns:
A binary string representing the compressed mesh data.
A simple usage example:
>>> mesh = pymesh.generate_icosphere(1.0, [0, 0, 0])
>>> data = pymesh.compress(mesh)
>>> with open("out.drc", 'wb') as fout:
... fout.write(data)
.. [#] Draco uses lossy compression. Both accuarcy and
vertices/face order will be lost due to compression. Draco only works
with triangular mesh or point cloud.
"""
engine = PyMesh.CompressionEngine.create(engine_name)
data = engine.compress(mesh.raw_mesh)
return data | 67d8ec030d006f6720bacffad7bacd0c36b9df42 | 27,131 |
import os
def _is_syntax_is_missing(language):
"""
download the grammar for a specific language if
the files is missing
@param language language: python, sqlite, ...
@return grammar file
"""
locations = {
"R": "https://github.com/antlr/grammars-v4/tree/master/r/R.g4",
"SQLite": "https://github.com/antlr/grammars-v4/blob/master/sqlite/SQLite.g4",
"Pig": "http://wiki.apache.org/pig/PigLexer",
"CSharp4": "https://github.com/antlr/grammars-v4/tree/master/csharp",
}
folder = os.path.dirname(__file__)
filename = os.path.join(folder, language + ".g4")
if os.path.exists(filename):
return filename
if language in locations:
raise FileNotFoundError(
"The grammar {0} is not available, you should get it from {1}".format(
language,
locations[language]))
else:
raise KeyError(
"unexpected language: {0}, not in {1}".format(
language,
",".join(
locations.keys()))) | 8e6a51ec1b8b9b886778b961dafe0d9b995fb4c0 | 27,132 |
import configparser
def get_hotkey_next(config: configparser.RawConfigParser):
"""
获取热键:下一个桌面背景
"""
return __get_hotkey(config, 'Hotkey', 'hk_next') | 3af499c01778a1defb0a440d042538885d829398 | 27,133 |
def abs_to_rel_f(vector, cell, pbc):
"""
Converts a position vector in absolute coordinates to relative coordinates
for a film system.
"""
# TODO this currently only works if the z-coordinate is the one with no pbc
# Therefore if a structure with x non pbc is given this should also work.
# maybe write a 'tranform film to fleur_film routine'?
if len(vector) == 3:
if not pbc[2]:
# leave z coordinate absolute
# convert only x and y.
postionR = np.array(vector)
postionR_f = np.array(postionR[:2])
cell_np = np.array(cell)
cell_np = np.array(cell_np[0:2, 0:2])
inv_cell_np = np.linalg.inv(cell_np)
# np.matmul(inv_cell_np, postionR_f)]
new_xy = [i for i in np.matmul(postionR_f, inv_cell_np)]
new_rel_pos_f = [new_xy[0], new_xy[1], postionR[2]]
return new_rel_pos_f
else:
print('FLEUR can not handle this type of film coordinate')
else:
return False | ccd1cc4282464b2a5bf4bc9110ba219d93a66b06 | 27,134 |
def get_soup(url):
""" Returns beautiful soup object of given url.
get_soup(str) -> object(?)
"""
req = urllib2.Request(url)
response = urllib2.urlopen(req)
html = response.read()
soup = bs4(html)
return soup | 8d0bb43ae1d404cef5a3873dfd089b88461bf9fd | 27,135 |
def internal_server_error(error):
""" Handles unexpected server error with 500_SERVER_ERROR """
message = error.message or str(error)
app.logger.info(message)
return make_response(jsonify(status=500, error='Internal Server Error', message=message), 500) | 8e80a4502a4656a1ccdb2c720177090dd7bcf53a | 27,136 |
import math
def diffsnorms(A, S, V, n_iter=20):
"""
2-norm accuracy of a Schur decomp. of a matrix.
Computes an estimate snorm of the spectral norm (the operator norm
induced by the Euclidean vector norm) of A-VSV', using n_iter
iterations of the power method started with a random vector;
n_iter must be a positive integer.
Increasing n_iter improves the accuracy of the estimate snorm of
the spectral norm of A-VSV'.
Notes
-----
To obtain repeatable results, reset the seed for the pseudorandom
number generator.
Parameters
----------
A : array_like
first matrix in A-VSV' whose spectral norm is being estimated
S : array_like
third matrix in A-VSV' whose spectral norm is being estimated
V : array_like
second matrix in A-VSV' whose spectral norm is being estimated
n_iter : int, optional
number of iterations of the power method to conduct;
n_iter must be a positive integer, and defaults to 20
Returns
-------
float
an estimate of the spectral norm of A-VSV' (the estimate fails
to be accurate with exponentially low probability as n_iter
increases; see references DS1_, DS2_, and DS3_ below)
Examples
--------
>>> from fbpca import diffsnorms, eigenn
>>> from numpy import diag
>>> from numpy.random import uniform
>>> from scipy.linalg import svd
>>>
>>> A = uniform(low=-1.0, high=1.0, size=(2, 100))
>>> A = A.T.dot(A)
>>> (U, s, Va) = svd(A, full_matrices=False)
>>> A = A / s[0]
>>>
>>> (d, V) = eigenn(A, 2)
>>> err = diffsnorms(A, diag(d), V)
>>> print(err)
This example produces a rank-2 approximation V diag(d) V' to A
such that the columns of V are orthonormal and the entries of d
are nonnegative and are nonincreasing.
diffsnorms(A, diag(d), V) outputs an estimate of the spectral norm
of A - V diag(d) V', which should be close to the machine
precision.
References
----------
.. [DS1] Jacek Kuczynski and Henryk Wozniakowski, Estimating the
largest eigenvalues by the power and Lanczos methods with
a random start, SIAM Journal on Matrix Analysis and
Applications, 13 (4): 1094-1122, 1992.
.. [DS2] Edo Liberty, Franco Woolfe, Per-Gunnar Martinsson,
Vladimir Rokhlin, and Mark Tygert, Randomized algorithms
for the low-rank approximation of matrices, Proceedings of
the National Academy of Sciences (USA), 104 (51):
20167-20172, 2007. (See the appendix.)
.. [DS3] Franco Woolfe, Edo Liberty, Vladimir Rokhlin, and Mark
Tygert, A fast randomized algorithm for the approximation
of matrices, Applied and Computational Harmonic Analysis,
25 (3): 335-366, 2008. (See Section 3.4.)
See also
--------
eigenn, eigens
"""
(m, n) = A.shape
(m2, k) = V.shape
(k2, k3) = S.shape
assert m == n
assert m == m2
assert k == k2
assert k2 == k3
assert n_iter >= 1
if np.isrealobj(A) and np.isrealobj(V) and np.isrealobj(S):
isreal = True
else:
isreal = False
# Promote the types of integer data to float data.
dtype = (A * 1.0).dtype
#
# Generate a random vector x.
#
if isreal:
x = np.random.normal(size=(n, 1)).astype(dtype)
else:
x = np.random.normal(size=(n, 1)).astype(dtype) \
+ 1j * np.random.normal(size=(n, 1)).astype(dtype)
x = x / norm(x)
#
# Run n_iter iterations of the power method.
#
for it in range(n_iter):
#
# Set y = (A-VSV')x.
#
y = mult(A, x) - V.dot(S.dot(V.conj().T.dot(x)))
#
# Set x = (A'-VS'V')y.
#
x = mult(y.conj().T, A).conj().T \
- V.dot(S.conj().T.dot(V.conj().T.dot(y)))
#
# Normalize x, memorizing its Euclidean norm.
#
snorm = norm(x)
if snorm == 0:
return 0
x = x / snorm
snorm = math.sqrt(snorm)
return snorm | 2f446a08c6ff5d8377cca22ffcd1570c68f46748 | 27,137 |
from typing import Iterator
from typing import Tuple
def data_selection(workload: spec.Workload,
input_queue: Iterator[Tuple[spec.Tensor, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
hyperparameters: spec.Hyperparamters,
global_step: int,
rng: spec.RandomState) -> Tuple[spec.Tensor, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a single training example and label.
We left out `current_params_types` because we do not believe that it would
# be necessary for this function.
Return a tuple of input label batches.
"""
del workload
del optimizer_state
del current_param_container
del hyperparameters
del global_step
del rng
return next(input_queue) | 6daa0950e5ce82da081b71a01572dc29374f17f8 | 27,138 |
def graph_cases_factory(selenium):
"""
:type selenium: selenium.webdriver.remote.webdriver.WebDriver
:rtype: callable
:return: Constructor method to create a graph cases factory with a custom
host.
"""
return lambda host: GraphCaseFactory(selenium=selenium, host=host) | b41b02c148b340c07859e707cbaf4810db3b6004 | 27,139 |
def clean_scene_from_file(file_name):
"""
Args:
file_name: The name of the input sequence file
Returns:
Name of the scene used in the sequence file
"""
scene = scenename_from_file(file_name)
print('Scene: ', scene)
mesh_file = SCENE_PATH + scene + '/10M_clean.ply'
return mesh_file | cd706c900ca3e3fce6736ce5c4288cce6079b3e0 | 27,140 |
def _non_overlapping_chunks(seq, size):
"""
This function takes an input sequence and produces chunks of chosen size
that strictly do not overlap. This is a much faster implemetnation than
_overlapping_chunks and should be preferred if running on very large seq.
Parameters
----------
seq : tuple or list
Sequence of integers.
size : int
Length of each produced chunk.
Returns
-------
zip
zip object that produces chunks of specified size, one at a time.
"""
return zip(*[iter(seq)] * size) | 15b5d2b4a7d8df9785ccc02b5369a3f162704e9e | 27,141 |
import sys
import torch
def load_data(path="/home/bumsoo/Data/Planetoid", dataset="cora"):
"""
ind.[:dataset].x => the feature vectors of the training instances (scipy.sparse.csr.csr_matrix)
ind.[:dataset].y => the one-hot labels of the labeled training instances (numpy.ndarray)
ind.[:dataset].allx => the feature vectors of both labeled and unlabeled training instances (csr_matrix)
ind.[:dataset].ally => the labels for instances in ind.dataset_str.allx (numpy.ndarray)
ind.[:dataset].graph => the dict in the format {index: [index of neighbor nodes]} (collections.defaultdict)
ind.[:dataset].tx => the feature vectors of the test instances (scipy.sparse.csr.csr_matrix)
ind.[:dataset].ty => the one-hot labels of the test instances (numpy.ndarray)
ind.[:dataset].test.index => indices of test instances in graph, for the inductive setting
"""
print("\n[STEP 1]: Upload {} dataset.".format(dataset))
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("{}/ind.{}.{}".format(path, dataset, names[i]), 'rb') as f:
if (sys.version_info > (3,0)):
objects.append(pkl.load(f, encoding='latin1')) # python3 compatibility
else:
objects.append(pkl.load(f)) # python2
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx = parse_index_file("{}/ind.{}.test.index".format(path, dataset))
test_idx_range = np.sort(test_idx)
if dataset == 'citeseer':
#Citeseer dataset contains some isolated nodes in the graph
test_idx_range_full = range(min(test_idx), max(test_idx)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
# Feature & Adjacency Matrix
features = sp.vstack((allx, tx)).tolil()
features[test_idx, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
print("| # of nodes : {}".format(adj.shape[0]))
print("| # of edges : {}".format(int(adj.sum().sum()/2 + adj.diagonal().sum()/2)))
# Normalization
features = normalize_sparse_features(features)
adj = normalize_sparse_adj(adj + sp.eye(adj.shape[0])) # Input is A_hat
print("| # of features : {}".format(features.shape[1]))
print("| # of clases : {}".format(ally.shape[1]))
features = torch.FloatTensor(np.array(features.todense()))
sparse_mx = adj.tocoo().astype(np.float32)
adj = torch.FloatTensor(np.array(adj.todense()))
labels = np.vstack((ally, ty))
labels[test_idx, :] = labels[test_idx_range, :]
if dataset == 'citeseer':
save_label = np.where(labels)[1]
labels = torch.LongTensor(np.where(labels)[1])
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
idx_test = test_idx_range.tolist()
print("| # of train set : {}".format(len(idx_train)))
print("| # of val set : {}".format(len(idx_val)))
print("| # of test set : {}".format(len(idx_test)))
idx_train, idx_val, idx_test = list(map(lambda x: torch.LongTensor(x), [idx_train, idx_val, idx_test]))
if dataset == 'citeseer':
L = np.sort(idx_test)
missing = missing_elements(L)
for element in missing:
save_label = np.insert(save_label, element, 0)
labels = torch.LongTensor(save_label)
return adj, features, labels, idx_train, idx_val, idx_test | dab7f3899dec86d849b5ae93ce7f2832edccec98 | 27,142 |
import logging
def Compute_Error(X_data, pinn, K, mu, Lf, deltamean, epsilon, ndim) :
"""
Function to determine error for input data X_data
:param array X_data: input data for PINN
:param PINN pinn: PINN under investigation
:param float K: key parameter for using trapezoidal rule and estimating the number of required subintervals
:param float mu: smoothening parameter for creating delta from deviation R
:param float Lf: Lipschitz constant or spectral abscissa of system under investigation
:param float deltamean: a priori determined average deviation in ODE/PDE
:param float epsilon: contribution the error of the numerical integration may give to the overall a posteriori error
:param int ndim: dimensions of input data
"""
# initialize variables for error and number of support points
E_pred = np.zeros((X_data.shape[0], 2))
N_SP = np.repeat(0, X_data.shape[0], axis=0)
# compute target value and error for all times
for x_index in range(X_data.shape[0]):
# get current item
x_item = np.reshape(X_data[x_index], (1, X_data.shape[1]))
# predict value at time 0 and compare to input values to get r0
t = x_item[0,0]
x_item[0,0] = 0
r0 = np.sqrt(np.sum((pinn.predict(x_item)[0] - x_item[0, -ndim:])**2))
x_item[0,0] = t
# compute predicted machine learning error and number of required support points
E_ML = np.exp(Lf * x_item[0,0])*(r0 + (1-np.exp(-x_item[0,0]*Lf))*deltamean/Lf)
N_SP[x_index] = np.ceil(np.sqrt(K*x_item[0,0]**3 / (12*E_ML*epsilon))).astype(int)
# compute prediction of support points
T_test = np.transpose(np.array([np.linspace(0,x_item[0,0],2*(N_SP[x_index]+1))]))
X_test = np.repeat(x_item, T_test.shape[0], axis=0)
X_test[:,0] = T_test[:,0]
_, F_pred = pinn.predict(X_test)
# compute integral for error
targetfun = (np.sqrt(np.reshape(np.sum(F_pred**2, axis=1),(F_pred.shape[0],1)) + np.full((F_pred.shape[0],1), mu, dtype="float64")**2) * np.exp(-Lf*T_test))
I_1 = compute_integral_trpz(targetfun, T_test[1]-T_test[0])
if x_item[0,0] > 0:
I_2 = compute_integral_trpz(targetfun[0::2], T_test[2]-T_test[0])
# determine error
E_pred[x_index, 0] = np.exp(Lf*x_item[0,0])*(r0)
if x_item[0,0] == 0:
E_pred[x_index, 1] = 0
else:
E_pred[x_index, 1] = np.exp(Lf*x_item[0,0])*(I_1 + 0.75*np.absolute(I_1-I_2))
if x_index % 100 == 0:
logging.info(f'Predicted error for index {x_index}: {E_pred[x_index]}')
return E_pred, N_SP | 0789d7c52c96aed5cb40aa45c44c4df09f5cffaf | 27,143 |
import itertools
def sort_fiducials(qr_a, qr_b):
"""Sort 2d fiducial markers in a consistent ordering based on their relative positions.
In general, when we find fiducials in an image, we don't expect them to be
returned in a consistent order. Additionally, the image coordinate may be
rotated from image to image. Here we match fiducials by trying all permutations
of matches and taking the best fit. We assume that the fiducials are all
aligned in similar directions; this is a constraint on fiducials placement.
"""
qr_a = np.array(qr_a)
qr_b = np.array(qr_b)
# Get unit vectors defining our common coordinate system in each image
ux_a = np.array([0.0, 0.0])
ux_b = np.array([0.0, 0.0])
for qr in qr_a:
ux_a += qr[1] - qr[0]
for qr in qr_b:
ux_b += qr[1] - qr[0]
ux_a /= np.linalg.norm(ux_a)
ux_b /= np.linalg.norm(ux_b)
def displacements(qrcodes, ux):
uy = np.array([ux[1], ux[0]])
#uy_b = np.array([ux_b[1], ux_b[0]])
displacements = []
for i in range(1, len(qrcodes)):
d = qrcodes[i][0] - qrcodes[0][0]
d2 = np.array([np.dot(ux, d), np.dot(uy, d)])
displacements.append(d2)
return np.array(displacements)
best_error = float("inf")
best_permutation = []
d_a = displacements(qr_a, ux_a)
for perm in itertools.permutations(qr_b):
d_perm = displacements(perm, ux_b)
error = np.sum(np.square(d_perm - d_a))
if error < best_error:
best_error = error
best_permutation = perm
return qr_a.tolist(), [p.tolist() for p in list(best_permutation)] | daa96f12ef2e94fed86970979e4d140f8a3fa3d5 | 27,144 |
from pathlib import Path
import jinja2
def form_render(path: str, **kwargs) -> str:
""" Just jinja2 """
file_text = Path(path).read_text()
template = jinja2.Template(file_text)
return template.render(**kwargs) | b5da5afdedcac922c164f644eabeae5f038f9169 | 27,145 |
def _names(fg, bg):
"""3/4 bit encoding part
c.f. https://en.wikipedia.org/wiki/ANSI_escape_code#3.2F4_bit
Parameters:
"""
if not (fg is None or fg in _FOREGROUNDS):
raise ValueError('Invalid color name fg = "{}"'.format(fg))
if not (bg is None or bg in _BACKGROUNDS):
raise ValueError('Invalid color name bg = "{}"'.format(bg))
fg_ = _FOREGROUNDS.get(fg, '')
bg_ = _BACKGROUNDS.get(bg, '')
return _join_codes(fg_, bg_) | 50e4dfe9aa56c1f3fc7622c468045b26da9b4175 | 27,146 |
def preprocess(code):
"""Preprocess a code by removing comments, version and merging includes."""
if code:
#code = remove_comments(code)
code = merge_includes(code)
return code | b4ecbf28fa2043559b744e7351f268a2ba1e8200 | 27,147 |
import types
def _from_schemas_get_model(
stay_within_model: bool, schemas: _oa_types.Schemas, schema: _oa_types.Schema
) -> types.ModelArtifacts:
"""
Get artifacts for a model.
Assume the schema is valid.
Args:
schema: The schema of the model to get artifacts for.
schemas: All defined schemas used to resolve any $ref.
stay_within_model: Whether only properties from within a model should be
included.
Returns:
The artifacts of the model.
"""
model_artifacts = model.get(schemas, schema)
properties_artifacts = _from_schemas_get_properties_artifacts(
stay_within_model, schemas, schema
)
return types.ModelArtifacts(
tablename=model_artifacts.tablename,
inherits=model_artifacts.inherits,
parent=model_artifacts.parent,
description=model_artifacts.description,
mixins=model_artifacts.mixins,
kwargs=model_artifacts.kwargs,
composite_index=model_artifacts.composite_index,
composite_unique=model_artifacts.composite_unique,
backrefs=model_artifacts.backrefs,
properties=list(properties_artifacts),
) | 0c5166c6baaabda64795729554b7bb3444a902c9 | 27,148 |
def solution2(inp):
"""Solves the second part of the challenge"""
return "done" | 8e20e1a81911b3f2e54fac058df8a44e54945af0 | 27,149 |
import math
def juld_to_grdt(juld: JulianDay) -> GregorianDateTime:
"""ユリウス通日をグレゴリオ曆の日時に變換する."""
A = math.floor(juld.julian_day + 68569.5)
B = juld.julian_day + 0.5
a = math.floor(A / 36524.25)
b = A - math.floor(36524.25 * a + 0.75)
c = math.floor((b + 1) / 365.25025)
d = b - math.floor(365.25 * c) + 31
e = math.floor(d / 30.59)
f = math.floor(e / 11.0)
u = 100 * (a - 49) + c + f
v = e - 12 * f + 2
w = d - math.floor(30.59 * e) + (B % 1)
(hour, minute) = divmod(round(juld.second), 60 * 60)
hour = (hour + 12) % 24
(minute, second) = divmod(minute, 60)
return GregorianDateTime(u, v, math.floor(w), hour, minute, second, None) | 94559bbec7fef45e6c7f6d8594d20c8039b58672 | 27,150 |
def users_all(request):
"""
Returns name + surname and email of all users
Note: This type of function can only be justified
when considering the current circumstances:
An *INTERNAL* file sharing app (used by staff)
Hence, all names and emails may be fetched be other
authenticated users
For Example: Select the users you want to share a folder with
"""
users = User.objects.exclude(id=request.user.id).values(
"id", "first_name", "last_name", "email")
resp = {
"users": list(users)
}
return JsonResponse(resp) | 53302d074ee1bbbc1156ffa2f94da4f834e9cb3c | 27,151 |
def _resolve_categorical_entities(request, responder):
"""
This function retrieves all categorical entities as listed below and filters
the knowledge base using these entities as filters. The final search object
containing the shortlisted employee data is returned back to the calling function.
"""
# Finding all categorical entities
categorical_entities = [e for e in request.entities if e['type'] in
('state', 'sex', 'maritaldesc', 'citizendesc', 'racedesc',
'performance_score', 'employment_status', 'employee_source',
'position', 'department', 'reason_for_termination')]
# Building custom search
qa = app.question_answerer.build_search(index='employee_data')
# Querying the knowledge base for all categorical filters
if categorical_entities:
try:
for categorical_entity in categorical_entities:
key = categorical_entity['type']
if key == 'reason_for_termination':
key = 'rft'
val = categorical_entity['value'][0]['cname']
kw = {key: val}
qa = qa.filter(**kw)
except KeyError:
pass
# return size of the whole dataset to prevent the execute function from restricting
# the responses to 10 (Which is the default)
return qa, SIZE | d6671d030699df1b0400b1d478dc98f86be06c29 | 27,152 |
def filter_c13(df):
""" Filter predicted formulas with 13C.
Returns filtered df and n excluded """
shape_i = df.shape[0]
df = df[df['C13'] == 0]
df = df.reset_index(drop=True)
shape_f = df.shape[0]
n_excluded = shape_i - shape_f
return df, n_excluded | 4f0d3eb6c9de7c07bc2e3f285ad5502bb6d6dd06 | 27,153 |
import random
import gzip
def getContent(url):
"""
此函数用于抓取返回403禁止访问的网页
"""
random_header = random.choice(HEARDERS)
"""
对于Request中的第二个参数headers,它是字典型参数,所以在传入时
也可以直接将个字典传入,字典中就是下面元组的键值对应
"""
req = Request(url)
req.add_header("User-Agent", random_header)
req.add_header("Host", "datachart.500.com")
content = urlopen(req).read()
html = gzip.decompress(content)
html = html.decode('gbk')
return html | da396d664fb23737ea2d87b6548521948adad709 | 27,154 |
def neighbour(x,y,image):
"""Return 8-neighbours of image point P1(x,y), in a clockwise order"""
img = image.copy()
x_1, y_1, x1, y1 = x-1, y-1, x+1, y+1;
return [img[x_1][y], img[x_1][y1], img[x][y1], img[x1][y1], img[x1][y], img[x1][y_1], img[x][y_1], img[x_1][y_1]] | 8e645f7634d089a0e65335f6ea3363d4ed66235b | 27,155 |
from typing import Optional
from typing import Literal
import os
import pickle
import torch
def _load_saved_files(
dir_path: str,
load_adata: bool,
map_location: Optional[Literal["cpu", "cuda"]] = None,
):
"""Helper to load saved files."""
setup_dict_path = os.path.join(dir_path, "attr.pkl")
adata_path = os.path.join(dir_path, "adata.h5ad")
varnames_path = os.path.join(dir_path, "var_names.csv")
model_path = os.path.join(dir_path, "model_params.pt")
if os.path.exists(adata_path) and load_adata:
adata = read(adata_path)
elif not os.path.exists(adata_path) and load_adata:
raise ValueError("Save path contains no saved anndata and no adata was passed.")
else:
adata = None
var_names = np.genfromtxt(varnames_path, delimiter=",", dtype=str)
with open(setup_dict_path, "rb") as handle:
attr_dict = pickle.load(handle)
scvi_setup_dict = attr_dict.pop("scvi_setup_dict_")
model_state_dict = torch.load(model_path, map_location=map_location)
return scvi_setup_dict, attr_dict, var_names, model_state_dict, adata | a65a34ebaba1c9480356b585768ab5b5c63c1269 | 27,156 |
def deconv2d(x, kernel, output_shape, strides=(1, 1),
border_mode='valid',
dim_ordering='default',
image_shape=None, filter_shape=None):
"""2D deconvolution (i.e. transposed convolution).
# Arguments
x: input tensor.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
border_mode: string, `"same"` or `"valid"`.
dim_ordering: `"tf"` or `"th"`.
Whether to use Theano or TensorFlow dimension ordering
for inputs/kernels/ouputs.
# Returns
A tensor, result of transposed 2D convolution.
"""
if dim_ordering == 'default':
dim_ordering = image_dim_ordering()
x = _preprocess_convnd_input(x, dim_ordering)
layout_kernel, nb_filter = _layout_kernel(dim_ordering, kernel.shape)
kernel = _preprocess_deconvnd_kernel(kernel, dim_ordering)
output_shape = _preprocess_deconvnd_output(output_shape, dim_ordering)
s = mx.sym.Deconvolution(data=x.symbol, name=kernel.name, kernel=layout_kernel, stride=strides,
num_filter=nb_filter, weight=kernel.symbol, no_bias=True, target_shape=output_shape)
out = _postprocess_convnd_output(KerasSymbol(s), dim_ordering)
return out | d1ed452b627764f0f08c669e4bea749886ebd0a6 | 27,157 |
def template(m1, m2):
"""
:param m1:
:param m2:
:return:
"""
c_mass = chirp_mass(m1, m2)
B = 16.6 # In seconds to - 5/8
t = np.linspace(0, 0.45, 10000)
tc = 0.48
gw_frequency = B * c_mass ** (-5 / 8) * (tc - t) ** (-3 / 8)
t_h = np.linspace(-450, 0, 10000)
t_merge_h = 10
phase = 2 * np.pi * B * c_mass ** (-5 / 8) * (-3 / 8) * (t_merge_h - t_h) ** (5 / 8)
f = B * c_mass ** (-5 / 8) * (t_merge_h - t_h) ** (-3 / 8)
h = f ** (2 / 3) * np.cos(phase)
return t, gw_frequency, t_h, h, c_mass | 64e81538e8b37472c7142e9c21d047bf10a19bc7 | 27,158 |
def _como_hasheable(matriz):
"""Retorna una copia hasheable (y por tanto inmutable) de `matriz`."""
return tuple(tuple(fila) for fila in matriz) | a6a1c4371536636d45cfabaf0e2d6938b26a8e08 | 27,159 |
def diff(*args, **kwargs):
"""
Return a diff between two hex list
:param args:
:param kwargs:
:return:
"""
skip_if_same = True
if kwargs.get("skip_if_same", False):
skip_if_same = kwargs["skip_if_same"]
if len(args) != 2:
raise NotImplementedError("Only comparison of the two list are supported")
result_list = ([], [])
diff_indexes = []
diff_list = []
for a in args:
diff_list.append(_prepare(a))
_fill_empty_elements(*diff_list)
for e, _ in enumerate(diff_list[0]):
if diff_list[0][e].lower() != diff_list[1][e].lower():
for i in range(2):
result_list[i].append(diff_list[i][e])
diff_indexes.append(e)
continue
if skip_if_same:
for i in range(2):
result_list[i].append("__")
else:
for i in range(2):
result_list[i].append(diff_list[i][e])
return result_list[0], result_list[1], diff_indexes | c7ec1cc92ef3143798e675576dcc2924e24159bb | 27,160 |
def gravity_effect(position, other_position):
"""Return effect other_position has on position."""
if position == other_position:
return 0
elif position > other_position:
return -1
return 1 | 25130c253cb888057e9b52817cac9cf3778a4c69 | 27,161 |
def add_task(request):
"""add new task"""
if request.method == "POST":
#check whether name is empty or not
if request.POST.get('name') != '':
name = request.POST.get('name')
priority = request.POST.get('priority')
task = Task(name=name,priority=priority)
task.save()
#return to home page
return redirect('/')
else:
#if empty, show error message and load all the list again.
msg = 'Please enter the task name.'
tasks = Task.objects.all()
return render(request,'todo_app/home.html',{'msg':msg, 'tasks':tasks}) | c79e9d78367159af544ae011539b449e70bde4be | 27,162 |
from operator import mul
def rots(n_phi_pairs):
"""
From the provided list of (axis,angle) pairs, construct the
product of rotations roti(axis0,angle0) *** roti(axis1,angle1) ...
Because rotation of q by A is achieved through A***q***conj(A),
rotate( A *** B, q )
is the same as
rotate( A, rotate(B, q) )
"""
if len(n_phi_pairs) == 0:
return unit()
out = roti(*n_phi_pairs[0])
for n_phi in n_phi_pairs[1:]:
out = mul(out, roti(*n_phi))
return out | 743ebb3a7a8a68f1178ef4f9116607f33dcdb9cf | 27,163 |
def n_cr_shell(
thickness,
radius,
length
):
"""
Critical compressive load for cylindrical shell.
Calculates the critical load for a cylindrical shell under pure
compression and assumes uniform stress distribution. Calculation
according to EN1993-1-6 [1], Annex D.
Parameters
----------
thickness : float
[mm] Shell thickness
radius : float
[mm] Cylinder radius
length : float
[mm] Cylnder length
Returns
-------
float
[N] Critical load
References
----------
.. [1] Eurocode 3: Design of steel structures - Part 1-6: Strength and stability of shell structures.
Brussels: CEN, 2006.
"""
# Convert inputs to floats
thickness, radius, length = float(thickness), float(radius), float(length)
# Elastic critical load acc to EN3-1-6 Annex D
nn_cr_shell = 2 * np.pi * radius * thickness * sigma_x_rcr(thickness, radius, length)
# Return value
return nn_cr_shell | 1210c3f19a801a7ddf3bae7b478c47732c701433 | 27,164 |
def unique_slug(s, model, num_chars=50):
"""
Return slug of num_chars length unique to model
`s` is the string to turn into a slug
`model` is the model we need to use to check for uniqueness
"""
slug = slugify(s)
slug = slug[:num_chars].strip('-')
while True:
dup = model.objects.filter(slug=slug)
if not dup:
return slug
slug = slug[:39] + '-' + random_string(10) | ef34215722cca23417c9e944f6320dba79188c8c | 27,165 |
def _to_vertexes(data):
"""create points at every vertex, incl holes"""
# create new file
outfile = GeoTable()
outfile.fields = list(data.fields)
# loop points
if "LineString" in data.type:
for feat in data:
if "Multi" in feat.geometry["type"]:
for linestring in feat.geometry["coordinates"]:
for point in linsetring:
geoj = {"type": "Point",
"coordinates": point}
outfile.add_feature(feat.row, geoj)
else:
for point in feat.geometry["coordinates"]:
geoj = {"type": "Point",
"coordinates": point}
outfile.add_feature(feat.row, geoj)
return outfile
elif "Polygon" in data.type:
for feat in data:
if "Multi" in feat.geometry["type"]:
for polygon in feat.geometry["coordinates"]:
for ext_or_hole in polygon:
for point in ext_or_hole:
geoj = {"type": "Point",
"coordinates": point}
outfile.add_feature(feat.row, geoj)
else:
for ext_or_hole in feat.geometry["coordinates"]:
for point in ext_or_hole:
geoj = {"type": "Point",
"coordinates": point}
outfile.add_feature(feat.row, geoj)
return outfile
else:
return data.copy() | 8c82eac68399e10b1cf87155f6c2b9e8318a8205 | 27,166 |
import pickle
def _load(fname) :
"""
Load a cached file and return the resulting object
@param fname: file name
"""
try :
f = open(fname)
return pickle.load(f)
finally :
f.close() | 5fd5496d226c2ff8265b3dafa0b038bb8015ec5d | 27,167 |
import warnings
def reale(x, com="error", tol=None, msg=None, xp=None):
"""Return real part of complex data (with error checking).
Parameters
----------
x : array-like
The data to check.
com : {'warn', 'error', 'display', 'report'}
Control rather to raise a warning, an error, or to just display to the
console. If ``com == 'report'``, the relative magnitude of the
imaginary component is printed to the console.
tol : float or None
Allow complex values below ``tol`` in magnitude. If None, ``tol`` will
be ``1000*eps``.
msg : str or None
Additional message to print upon encountering complex values.
Notes
-----
Based on Jeff Fessler's Matlab function of the same name.
Python port by Gregory Lee.
"""
xp, on_gpu = get_array_module(x, xp)
if not xp.iscomplexobj(x):
return x
if tol is None:
tol = 1000 * xp.finfo(x.dtype).eps
if com not in ["warn", "error", "display", "report"]:
raise ValueError(
(
"Bad com: {}. It must be one of {'warn', 'error', 'display', "
"'report'}"
).format(com)
)
max_abs_x = xp.max(xp.abs(x))
if max_abs_x == 0:
if xp.any(xp.abs(xp.imag(x)) > 0):
raise RuntimeError("max real 0, but imaginary!")
else:
return xp.real(x)
frac = xp.max(xp.abs(x.imag)) / max_abs_x
if com == "report":
print("imaginary part %g%%" % frac * 100)
if frac > tol:
t = "imaginary fraction of x is %g (tol=%g)" % (frac, tol)
if msg is not None:
t += "\n" + msg
if com == "display":
print(t)
elif com == "warn":
warnings.warn(t)
else:
raise RuntimeError(t)
return xp.real(x) | 0823cdc3cb989f7b29dc70e010ad0ae0d2132fbd | 27,168 |
def find_less_than_or_equal(series_key, value):
"""Find the largest value less-than or equal-to the given value.
Args:
series_key: An E-Series key such as E24.
value: The query value.
Returns:
The largest value from the specified series which is less-than
or equal-to the query value.
Raises:
ValueError: If series_key is not known.
ValueError: If value is not finite.
ValueError: If value is out of range.
"""
candidates = find_nearest_few(series_key, value, num=3)
for candidate in reversed(candidates):
if candidate <= value:
return candidate | 4bc6f00910c8d5453d7db82869fe37cd3244cd45 | 27,169 |
def GetCommentsByMigration(migration):
"""Get the comments for a migration"""
q = db.Query(models.Comment).filter('migration =', migration)
return list(q.fetch(1000)) | 555f5c8d2df5b05c579b8e20d30e54a035056081 | 27,170 |
def get_proxy_list(html_response):
"""
Returns list of proxies scraped from html_response.
:param html_response: Raw HTML text
:type html_response: unicode
:rtype: list[unicode]
"""
try:
tmp = IPS_REGEXP.findall(html_response.replace("\n", ","))[0]
proxies = tmp.split("</textarea>")[0].split(",")
except Exception:
raise ParserError()
return proxies | 38978d1c65022f2575fd9ce94cddb02782fb82fd | 27,171 |
import fnmatch
def ignore_paths(path_list, ignore_patterns, process=str):
"""
Go through the `path_list` and ignore any paths that match the patterns in `ignore_patterns`
:param path_list: List of file/directory paths.
:param ignore_patterns: List of nukeignore patterns.
:param process: Function to apply to every element in the path list before performing match.
:return: The updated path list
"""
for pattern in ignore_patterns:
path_list = [
n for n in path_list if not fnmatch.fnmatch(process(n), pattern)
]
return path_list | 63196e54eb4505cbe12ebf77d2a42fede68c1d0b | 27,172 |
import requests
def check_static(url):
"""
Check viability of static links on cf.gov home page and sub-pages.
Example call to check static assets in production:
./cfgov/scripts/static_asset_smoke_test.py -v /ask-cfpb/ /owning-a-home/
Example of local check of home page:
./cfgov/scripts/static_asset_smoke_test.py -v --base http://localhost:8000
"""
count = 0
failures = []
response = requests.get(url)
if not response.ok:
return "\x1B[91mFAIL! Request to {} failed ({})".format(
url, response.reason
)
static_links = extract_static_links(response.content)
for link in static_links:
count += 1
if link.startswith("/"):
final_url = "{}{}".format(CFPB_BASE, link)
else:
final_url = "{}{}".format(url, link)
code = requests.get(
final_url, headers={"referer": CFPB_BASE}
).status_code
if code == 200:
logger.info("checked {}".format(final_url))
else:
failures.append((link, code))
if failures:
if len(failures) > 2: # allow for font failures when testing locally
return (
"\x1B[91mFAIL! {} static links out of {} failed "
"for {}: {}\x1B[0m\n".format(
len(failures), count, url, failures
)
)
else:
return (
"\x1B[91mPartial failure: {} static links out of {} failed"
" for {}: {}\x1B[0m\n".format(
len(failures), count, url, failures
)
)
else:
return "\x1B[32m{} static links passed " "for {}\x1B[0m\n".format(
count, url
) | bccc85254c21471447c15c6fd6a9f2aaaa6ce10d | 27,173 |
def validation_error_handler(err):
"""
Used to parse use_kwargs validation errors
"""
headers = err.data.get("headers", None)
messages = err.data.get("messages", ["Invalid request."])
schema = ResponseWrapper()
data = messages.get("json", None)
error_msg = "Sorry validation errors occurred"
if headers:
return schema.dump({"data": data, "message": error_msg}), 400, headers
else:
return schema.dump({"data": data, "message": error_msg}), 400 | 3b7ef977b0cf4ec892314923e988509f17e7f49c | 27,174 |
def is_element(a, symbol="C"):
""" Is the atom of a given element """
return element(a) == symbol | a04068346d8872f2f3d6228c0f862bcc11d0ff1b | 27,175 |
def create_call_status(job, internal_storage):
""" Creates a call status class based on the monitoring backend"""
monitoring_backend = job.config['lithops']['monitoring']
Status = getattr(lithops.worker.status, '{}CallStatus'
.format(monitoring_backend.capitalize()))
return Status(job, internal_storage) | 63c9903d5facff8512c40b2838b0796869cdb9ff | 27,176 |
from pathlib import Path
import logging
def file_handler() -> RotatingFileHandler:
"""Create a file-based error handler."""
handler = RotatingFileHandler(
Path("log") / "error.log",
maxBytes=50_000,
backupCount=5,
delay=True,
)
handler.setLevel(logging.ERROR)
handler.setFormatter(
logging.Formatter("[%(asctime)s] %(levelname)s in %(module)s: %(message)s")
)
return handler | 62de46cf48e99dabc04a0f3c5ed7083f9261ed6a | 27,177 |
def get_additional_node_groups(node_name, deployment_id):
"""This enables users to reuse hosts in multiple groups."""
groups = []
try:
client = get_rest_client()
except KeyError:
return groups
deployment = client.deployments.get(deployment_id)
for group_name, group in deployment.get('groups', {}).items():
if node_name in group.get('members', []) and group_name:
groups.append(group_name)
return groups | 76cc8f8b98adde91a75c6ab6f39fa6ca17ceabe0 | 27,178 |
def is_requirement(line):
"""
Return True if the requirement line is a package requirement.
Returns:
bool: True if the line is not blank, a comment, a URL, or
an included file
"""
return line and not line.startswith(('-r', '#', '-e', 'git+', '-c')) | 2b89ced1920ac136e9437fda2fd2f8841debf847 | 27,179 |
def pden(s, t, p, pr=0):
"""
Calculates potential density of water mass relative to the specified
reference pressure by pden = dens(S, ptmp, PR).
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature [℃ (ITS-90)]
p : array_like
pressure [db].
pr : number
reference pressure [db], default = 0
Returns
-------
pden : array_like
potential density relative to the ref. pressure [kg m :sup:3]
Examples
--------
>>> # Data from Unesco Tech. Paper in Marine Sci. No. 44, p22.
>>> import seawater as sw
>>> from seawater.library import T90conv
>>> s = [0, 0, 0, 0, 35, 35, 35, 35]
>>> t = T90conv([0, 0, 30, 30, 0, 0, 30, 30])
>>> p = [0, 10000, 0, 10000, 0, 10000, 0, 10000]
>>> sw.pden(s, t, p)
array([ 999.842594 , 999.79523994, 995.65113374, 996.36115932,
1028.10633141, 1028.15738545, 1021.72863949, 1022.59634627])
:math:`\sigma_{4}` (at 4000 db)
>>> sw.pden(s, t, p, 4000) - 1000
array([ 19.2895493 , 19.33422519, 12.43271053, 13.27563816,
46.30976432, 46.48818851, 37.76150878, 38.74500757])
References
----------
.. [1] A.E. Gill 1982. p.54 Eqn. 3.7.15 "Atmosphere-Ocean Dynamics"
Academic Press: New York. ISBN: 0-12-283522-0
"""
s, t, p, pr = map(np.asanyarray, (s, t, p, pr))
pt = ptmp(s, t, p, pr)
return dens(s, pt, pr) | c2b64dbfc3ed554a8929f420792ea56df3858cc0 | 27,180 |
import typing
def SWAP(first: int, second: int, control: typing.Union[int, list] = None, power: float = None) -> QCircuit:
"""
Notes
----------
SWAP gate, order of targets does not matter
Parameters
----------
first: int
target qubit
second: int
target qubit
control
int or list of ints
power
numeric type (fixed exponent) or hashable type (parametrized exponent)
Returns
-------
QCircuit
"""
return _initialize_power_gate(name="SWAP", target=[first, second], control=control, power=power) | f5a7af3cc4c9618a17465c3890be81d3251750da | 27,181 |
def stable_normalize(x, etha=1.0e-8):
""" Numerically stable vector normalization
"""
n = np.linalg.norm(x, axis=-1, keepdims=True)
if n < etha:
n = etha
return x / n | 36428ecde3993c225b19ccc19278d34c4d9bac36 | 27,182 |
import time
from functools import reduce
def Word2VecFeatureGenerator(df):
"""
Finds and returns word embedding for the head and body
and computes cosine similarity.
Input: DataFrame
Returns list(headlineVec, bodyVec, simVec)"""
t0 = time()
print("\n---Generating Word2Vector Features:---")
df["Headline_unigram_vec"] = df["Headline"].map(lambda x: preprocess_data(x, exclude_stopword=False, stem=False))
df["articleBody_unigram_vec"] = df["articleBody"].map(lambda x: preprocess_data(x, exclude_stopword=False, stem=False))
# Document vector built by multiplying together all the word vectors
# using Google's pre-trained word vectors
Headline_unigram_array = df['Headline_unigram_vec'].values
# word vectors weighted by normalized tf-idf coefficient?
headlineVec = list(map(lambda x: reduce(np.add, [model[y] for y in x if y in model], [0.]*50), Headline_unigram_array))
headlineVec = np.array(headlineVec)
#headlineVec = np.exp(headlineVec)
headlineVec = normalize(headlineVec)
print ('headlineVec.shape: ', headlineVec.shape)
Body_unigram_array = df['articleBody_unigram_vec'].values
bodyVec = list(map(lambda x: reduce(np.add, [model[y] for y in x if y in model], [0.]*50), Body_unigram_array))
bodyVec = np.array(bodyVec)
bodyVec = normalize(bodyVec)
print ('bodyVec.shape: ', bodyVec.shape)
# compute cosine similarity between headline/body word2vec features
simVec = np.asarray(list(map(cosine_sim, headlineVec, bodyVec)))[:, np.newaxis]
print ('simVec.shape: ', simVec.shape)
print("---Word2Vector Features is complete---")
print("Time taken {} seconds\n".format(time() - t0))
return [headlineVec, bodyVec, simVec] | 0ff2c339ed953592173c0d5afafc72daeeef86a2 | 27,183 |
def reachable(Adj, s, t):
"""
Adj is adjacency list rep of graph
Return True if edges in Adj have directed path from s to t.
Note that this routine is one of the most-used and most time-consuming
of this whole procedure, which is why it is passed an adjacency list
rep rather than a list of vertices and edges, since the adjacency list
rep is easy to update when a new edge is committed to in RP.
"""
# search for path
Q = [ s ] # vertices to expand
R = set([s]) # reachable
while Q:
i = Q.pop()
for j in Adj[i]:
if j == t:
return True
if j not in R:
R.add(j)
Q.append(j)
return False | dc0ea0c6d2314fa1c40c3f3aa257a1c77892141f | 27,184 |
def remove_links(
actor: Actor, company: Company, *, facebook=False, linkedin=False, twitter=False
) -> Response:
"""Remove links to all existing Online Profiles."""
response, _ = update_profiles(
actor,
company,
facebook=facebook,
linkedin=linkedin,
twitter=twitter,
specific_facebook="",
specific_linkedin="",
specific_twitter="",
)
return response | bdfeefa8366031022a3a108c385f244e6fd740bf | 27,185 |
def get_basis_psd(psd_array, notes):
"""Get avg psd from the training set (will serve as a basis)"""
psd_dict = {}
psd_basis_list = []
syl_basis_list = []
unique_note = unique(notes) # convert note string into a list of unique syllables
# Remove unidentifiable note (e.g., '0' or 'x')
if '0' in unique_note:
unique_note.remove('0')
if 'x' in unique_note:
unique_note.remove('x')
for note in unique_note:
ind = find_str(notes, note)
if len(ind) >= num_note_crit_basis: # number should exceed the criteria
syl_pow_array = psd_array[ind, :]
syl_pow_avg = syl_pow_array.mean(axis=0)
temp_dict = {note: syl_pow_avg}
psd_basis_list.append(syl_pow_avg)
syl_basis_list.append(note)
psd_dict.update(temp_dict) # basis
# plt.plot(psd_dict[note])
# plt.show()
return psd_basis_list, syl_basis_list | f8b1e596fdda1125159a963d3362e87abe7b4bfe | 27,186 |
def _get_columns(statement):
"""Get the available columns in the query `statement`.
:param statement: A SQL SELECT statement.
:returns: A list of columns that are being selected.
"""
expecting_columns = False
for token in statement.tokens:
if token.is_whitespace():
pass
elif token.ttype is sqlparse.tokens.DML and token.value.upper() == "SELECT":
expecting_columns = True
elif expecting_columns:
return [x.value for x in token.flatten() if x.ttype is sqlparse.tokens.Name] | 49db28bd92d05f4d6e35c32f87e0be4a04e80a92 | 27,187 |
from pathlib import Path
import logging
def config_logging(level=logging.INFO, section="main", mp=False) -> Path:
"""Configures logging to log to a file and screen
mp stands for multiprocessing, didn't want to override that package
"""
# NOTE: it turns out that subprocess calls, pytest, etc
# Seems to automatically add handlers, even if they are not set
# The only real way to check if we have handlers set
# Is to check if we have specific handlers that are set, or a global
global logging_set
if not logging_set:
path = _get_log_path(section)
# without this it doesn't work
logging.root.handlers = []
logging.basicConfig(level=level,
format='%(asctime)s-%(levelname)s: %(message)s',
handlers=[logging.StreamHandler(),
logging.FileHandler(path)])
logging.captureWarnings(True)
# If you need multiprocessing install this
# Otherwise it slows it down, and additionally doesn't flush
# after every call, which ruins logging unit tests
# . See: https://github.com/jruere/
# multiprocessing-logging/issues/51#issue-925800880
if mp:
multiprocessing_logging.install_mp_handler()
logging.debug("initialized logger")
logging_set = True
return path | 5c9b12f674f801ce0a2bb0989a5f740513f57ba8 | 27,188 |
def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=1):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
order (string): order of things, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int): add zero-padding to the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, "Conv layer MUST be present"
assert order[0] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1, inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
# add learnable bias only in the absence of gatchnorm/groupnorm
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels, kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
# number of groups must be less or equal the number of channels
if out_channels < num_groups:
num_groups = out_channels
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups, num_channels=out_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']")
return modules | 43ccd6342b0598ab0715960cbae8ad9efb7e29ce | 27,189 |
def expand_json(metadata, context=DEFAULT_CONTEXT):
"""
Expand json, but be sure to use our documentLoader.
By default this expands with DEFAULT_CONTEXT, but if you do not need this,
you can safely set this to None.
# @@: Is the above a good idea? Maybe it should be set to None by
# default.
"""
options = {
"documentLoader": load_context}
if context is not None:
options["expandContext"] = context
return jsonld.expand(metadata, options=options) | 6fa1f5c4f93f75e45c9a535fe56190ea869dfaa0 | 27,190 |
import os
def prepare_test_data():
"""
Load data01.nc and manipulate to create additional test data.
Used to load data into data_dict below.
"""
# Dictionary in which to store data
data_dict = {}
# Load data01.nc Dataset
data01 = xr.open_dataset(os.path.dirname(__file__)+'/data/data01.nc',
decode_times=False, autoclose=True)
data_dict['data01'] = data01.copy()
# Extract two *DataArrays* - to test functions with DataArrays
da_ts = data01['TS'].copy()
da_precl = data01['PRECL'].copy()
data_dict['da_ts'] = da_ts.copy()
data_dict['da_precl'] = da_precl.copy()
# Dataset with *shifted* longitudes
ds_shift_lon = climapy.xr_shift_lon(data01.copy())
data_dict['ds_shift_lon'] = ds_shift_lon.copy()
# Datasets with *reversed* lon/lat coordinates and data
ds_rev_lon = data01.copy()
ds_rev_lon['lon'].values = ds_rev_lon['lon'].values[::-1]
for var_name in ['TS', 'PRECL']: # array order: time, lat, lon
ds_rev_lon[var_name].values = ds_rev_lon[var_name].values[:, :, ::-1]
ds_rev_lat = data01.copy()
ds_rev_lat['lat'].values = ds_rev_lat['lat'].values[::-1]
for var_name in ['TS', 'PRECL']:
ds_rev_lat[var_name].values = ds_rev_lat[var_name].values[:, ::-1, :]
ds_rev_both = data01.copy()
ds_rev_both['lat'].values = ds_rev_both['lat'].values[::-1]
ds_rev_both['lon'].values = ds_rev_both['lon'].values[::-1]
for var_name in ['TS', 'PRECL']:
ds_rev_both[var_name].values = ds_rev_both[var_name].values[:, ::-1, ::-1]
data_dict['ds_rev_lon'] = ds_rev_lon.copy()
data_dict['ds_rev_lat'] = ds_rev_lat.copy()
data_dict['ds_rev_both'] = ds_rev_both.copy()
# Dataset with *transposed* lon/lat coords
ds_transposed = data01.copy()
ds_transposed = ds_transposed.transpose()
data_dict['ds_transposed'] = ds_transposed.copy()
# Dataset with *renamed* longitude and latitude coords
ds_renamed = data01.copy()
ds_renamed = ds_renamed.rename({'lon': 'longitude', 'lat': 'latitude'})
data_dict['ds_renamed'] = ds_renamed.copy()
# Datasets with slightly *irregular* lon/lat coords, yet still monotonic
nx, ny = data01['lon'].size, data01['lat'].size
lon_irr = (data01['lon'].values +
np_rand.uniform(low=-0.5, high=0.5, size=nx)) # add small amount of noise
lon_irr[[0, -1]] = data01['lon'].values[[0, -1]] # keep end values unchanged
lat_irr = (data01['lat'].values +
np_rand.uniform(low=-0.5, high=0.5, size=ny))
lat_irr[[0, -1]] = data01['lat'].values[[0, -1]]
ds_irr_lon = data01.copy()
ds_irr_lon['lon'].values = lon_irr.copy()
ds_irr_lat = data01.copy()
ds_irr_lat['lat'].values = lat_irr.copy()
ds_irr_both = data01.copy()
ds_irr_both['lon'].values = lon_irr.copy()
ds_irr_both['lat'].values = lat_irr.copy()
data_dict['ds_irr_lon'] = ds_irr_lon.copy()
data_dict['ds_irr_lat'] = ds_irr_lat.copy()
data_dict['ds_irr_both'] = ds_irr_both.copy()
# Dataset with *strange* lon/lat coords - very irregular and not monotonic
lon_strange = (data01['lon'].values +
np_rand.uniform(low=-10, high=10, size=nx)) # add large amount of noise
lon_strange[[0, -1]] = data01['lon'].values[[0, -1]] # keep end values unchanged
lat_strange = (data01['lat'].values + np_rand.uniform(low=-10, high=10, size=ny))
lat_strange[[0, -1]] = data01['lat'].values[[0, -1]] # keep end values unchanged
ds_strange = data01.copy()
ds_strange['lon'].values = lon_strange.copy()
ds_strange['lat'].values = lat_strange.copy()
data_dict['ds_strange'] = ds_strange.copy()
# Return dictionary of data
return data_dict | cf690ce73fcf46a65a6bdd1b51de11488fbe4461 | 27,191 |
import os
def path(path_list):
"""
Returns an absolute path for the given folders structure
:path_list: receives a list type object where each element is an folder and
it can end in a file name
"""
path = MAIN_FOLDER
for folder in path_list:
path = os.path.join(path, folder)
return path | 826546670e0daece681023c1a8122a4a20bfe6e1 | 27,192 |
def get_kubernetes_bearer_token():
"""Reads the bearer token required to call the Kubernetes master from a file.
The file is installed in every container within a Kubernetes pod by the
Kubelet. The path to the file is documented at
https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/accessing-the-cluster.md.
Returns:
The contents of the token file as a string for use in the Authorization
header as a bearer token: 'Authorization: Bearer <token>'
Raises:
IOError: if cannot open the token file.
CollectorError: if the file is empty.
"""
# TODO(eran): add a lock around the global KUBERNETES_BEARER_TOKEN.
global KUBERNETES_BEARER_TOKEN
if not KUBERNETES_BEARER_TOKEN:
with open(KUBERNETES_BEARER_TOKEN_FILE, 'r') as token_file:
KUBERNETES_BEARER_TOKEN = token_file.read()
if not KUBERNETES_BEARER_TOKEN:
raise collector_error.CollectorError(
'Cannot read Kubernetes bearer token from %s' %
KUBERNETES_BEARER_TOKEN_FILE)
return KUBERNETES_BEARER_TOKEN | 7de7199a7dbdab68f519a4767f176d0a396c8120 | 27,193 |
import os
import copy
from datetime import datetime
def show(c, verbose, config):
"""Show current position."""
file = os.path.expanduser(config["file"])
move = config["move"]
level = int(config["level"])
pointer = config['pointer']
maxtime = int(config['maxtime'])
bookmark = config["bookmark"]
# If filename is written in bookmark, read bookmark from the file
if type(config['bookmark']) is str:
bookmark = ConfigObj(os.path.expanduser(bookmark),
encoding='utf-8')['bookmark']
n = pointer[level]
datadir = config["datadir"]
infile = open(file, 'r')
no = 0
if level == 0: # bookmark
if n not in bookmark:
print('Label {0} not found in bookmark.'.format(n))
return config, False
if 'problem' not in bookmark[n]:
print('Label {0} broken.'.format(n))
return config, False
problem = bookmark[n]['problem']
else:
n = int(n)
infile = open(file, 'r')
no = 0
problem = ''
for lin in infile:
data = lin.strip().split(' ')
if int(data[0]) == level:
no += 1
if no == n:
infile.close
problem = data[1]
break
infile.close
if problem == '':
print('Level {0} no. {1} not found.'.format(level, n))
return config, True # not found
s, err = conv(problem)
if err:
print(s)
print('This problem is not valid.')
return config, True
if level == 0:
if 'comment' in bookmark[n]:
label = bookmark[n]['comment']
else:
label = str(n)
else:
label = 'Level ' + str(level) + ' No. ' + str(n)
if c == 'bord':
app = GUIBord()
app.display(s,move)
config['move'] = app.move
if len(move) > 0:
s, move, message, err = current(s, move)
if err:
config['move'] = move
print(message)
if blank(s) == 0:
label += ': solution'
else:
label += ': move ' + str(len(move))
if c == 'c' or c == 'n' or c == 'p' or c == 'j' or c == 'initial' or c.isdigit():
print(label)
print(output(s))
if blank(s) == 0:
print('Now this problem is solved !')
else:
print('Type 3 digits (row, column, number) to put a number. i for hint.')
if datadir != '':
datadir = checkdatadir(datadir)
config['datadir'] = datadir
move = config['move']
if len(move) > 0:
lastmove = int(move[len(move) - 1]) // 10
lastmove = (lastmove // 10 - 1) * 9 + lastmove % 10 - 1
else:
lastmove = 100
imgfile = datadir + '/current.jpg'
figure = config['figure']
err = drawimage(s, '', lastmove, label, imgfile,
figure, False)
if not err:
print('See image by "html".')
if c == 'a' or c == 'ac':
print('\n' + label)
solveprint(s, verbose, blank(s), maxtime)
if c == 'u':
print(url(s))
if c == 'jpg' or c == 'jm': # jpg
p = possible(s)
datadir = checkdatadir(datadir)
config['datadir'] = datadir
imgfile = datadir + '/current.jpg'
if c == 'jpg':
mark = False
if c == 'jm':
mark = True
move = config['move']
if len(move) > 0:
lastmove = int(move[len(move) - 1]) // 10
lastmove = (lastmove // 10 - 1) * 9 + lastmove % 10 - 1
else:
lastmove = 100
figure = config['figure']
err = drawimage(s, p, lastmove, label, imgfile, figure, mark)
if not err:
print('See image by "html".')
if c == 'i' or c == 'ii' or c == 'iii' or c == 'sp': # prepare solving
if blank(s) == 0:
print('Already solved.')
return config, True
s2 = copy.copy(s)
s2, message, level2, solved, err = solve(s2, 0, blank(s), maxtime)
if err:
if solved:
print('This position has multiple solutions.')
else:
print(
'There is no solution for this position. '
+ 'You can take back one move with b.')
return config, True
p = possible(s)
b = box()
pb = pbox()
blank1 = blank(s)
linescan = line()
start = datetime.datetime.now()
endtime = start + datetime.timedelta(seconds=maxtime)
if c == 'i' or c == 'ii' or c == 'iii': # show hint
s2 = copy.copy(s)
s2, p, message, logic, depth, found, err = solveone(
s2, p, 4, 0, blank1, endtime, b, pb, linescan)
if logic == 'Naked single' or logic == 'Hidden single':
if logic == 'Naked single':
print('Look at {0}. What number is available?'.format(
message[14:18]))
else:
print(message[:message.index(':')] + 'can be found.')
else:
if c == 'i':
print('Think candidates of the cells.')
if datadir == '':
print('Use jm command to see the diagram of candidates.')
else:
datadir = checkdatadir(datadir)
config['datadir'] = datadir
imgfile = datadir + '/current.jpg'
p = possible(s)
move = config['move']
if len(move) > 0:
lastmove = int(move[len(move) - 1]) // 10
lastmove = (lastmove // 10 - 1) * 9 + lastmove % 10 - 1
else:
lastmove = 100
figure = config['figure']
err = drawimage(s, p, lastmove, label, imgfile,
figure, True)
if not err:
print('See image by "html".')
print('For more hints, type ii.')
if c == 'ii' or c == 'iii':
logi = [logic]
mes = [message]
while blank(s2) == blank1:
s2, p, message, logic, depth, found, err = solveone(
s2, p, 4, 0, blank1, endtime, b, pb, linescan)
logi.append(logic)
mes.append(message)
if c == 'ii':
if len(logi) > 1:
print('Following logics are successively used.')
for i in range(len(logi)):
print(logi[i])
print('See full explanation by typing iii.')
else:
for message in mes:
print(message)
if c == 'sp':
logic = 'Naked single'
while (logic == 'Naked single' or logic == 'Hidden single') and blank(s) > 0:
s2 = copy.copy(s)
s, p, message, logic, depth, found, err = solveone(
s, p, 4, 0, blank(s), endtime, b, pb, linescan)
if logic == 'Naked single' or logic == 'Hidden single':
print(message)
for i in range(81):
if s[i] != s2[i]:
j = (i // 9) * 10 + i % 9 + 11
m = j * 10 + s[i]
move.append(m)
else:
s = s2
config, err = show('c', 0, config)
return config, False | c6c1083a8193171518381cdd439413ebdfb98a17 | 27,194 |
def parsemsg(s, encoding="utf-8"):
"""Parse an IRC Message from s
:param s bytes: bytes to parse
:param encoding str: encoding to use (Default: utf-8)
:returns tuple: parsed message in the form of (prefix, command, args)
"""
s = s.decode(encoding, 'replace')
prefix = u("")
trailing = []
if s and s[0] == u(":"):
prefix, s = s[1:].split(u(" "), 1)
prefix = parseprefix(prefix)
if s.find(u(" :")) != -1:
s, trailing = s.split(u(" :"), 1)
args = s.split()
args.append(trailing)
else:
args = s.split()
args = iter(args)
command = next(args, None)
command = command and str(command)
return prefix, command, list(args) | b0205609724eb91d6b53fe37cc9be508a640a95a | 27,195 |
def concatenate_unique(la, lb):
"""Add all the elements of `lb` to `la` if they are not there already.
The elements added to `la` maintain ordering with respect to `lb`.
Args:
la: List of Python objects.
lb: List of Python objects.
Returns:
`la`: The list `la` with missing elements from `lb`.
"""
la_set = set(la)
for l in lb:
if l not in la_set:
la.append(l)
la_set.add(l)
return la | 307fde291233727c59e2211afc3e0eed7c8ea092 | 27,196 |
from datetime import datetime
def email_last_send_for_sub(sub_id):
"""Return when an email was last sent for a subscription, or None."""
last_sent = db.get('email_sub_last_sent:{}'.format(sub_id))
if last_sent is not None:
return datetime.datetime.strptime(last_sent, '%Y-%m-%dT%H:%M:%SZ') | e44740a38a2af35c92474aa8da92481d439cc94c | 27,197 |
def is_output_op(node):
"""Return true when the node is the output of the graph."""
return node.WhichOneof("op_type") == "output_conf" | 9a20a471a397a480cc2b295cc96961e030f71e43 | 27,198 |
import torchvision
def plot_samples_close_to_score(ood_dict: dict, dataset_name: str, min_score: float, max_score: float, n: int = 32,
do_lesional: bool = True, show_ground_truth: bool = False,
print_score: bool = False) -> None:
"""Arrange slices in a grid such that each slice displayed is closest to the interpolation OOD score from
linspace which goes from min_score to max_score with n samples."""
ood_dict = ood_dict[dataset_name]
ref_scores = np.linspace(min_score, max_score, n)
def create_ood_grids(healthy_leasional: str):
scores = ood_dict[healthy_leasional]
slices = ood_dict[f'{healthy_leasional}_scans']
masks = ood_dict['masks']
segmentations = ood_dict[f'{healthy_leasional}_segmentations']
final_scores = []
final_slices = []
final_masks = []
final_segmentations = []
for ref_score in ref_scores:
scores_idx = get_idx_of_closest_value(scores, ref_score)
final_scores.append(scores[scores_idx])
final_slices.append(slices[scores_idx])
final_masks.append(masks[scores_idx])
if show_ground_truth:
final_segmentations.append(segmentations[scores_idx])
final_slices = [normalize_to_0_1(s) for s in final_slices]
final_slices = [mask_background_to_zero(s, m) for s, m in zip(final_slices, final_masks)]
slices_grid = torchvision.utils.make_grid(final_slices, padding=0, normalize=False)
segmentations_grid = None
if show_ground_truth:
segmentations_grid = torchvision.utils.make_grid(final_segmentations, padding=0, normalize=False)
if print_score:
formatted_scores = [f'{val:.2f}' for val in final_scores]
LOG.info(f'Scores: {formatted_scores}')
return slices_grid, segmentations_grid
healthy_slices_grid, healthy_segmentations_grid = create_ood_grids('healthy')
imshow_grid(healthy_slices_grid, one_channel=True, figsize=(12, 8),
title=f'Healthy {dataset_name} {min_score}-{max_score}', axis='off')
if show_ground_truth:
imshow_grid(healthy_segmentations_grid, one_channel=True, figsize=(12, 8),
title=f'Healthy Ground Truth {dataset_name} {min_score}-{max_score}', axis='off')
if do_lesional:
lesional_slices_grid, lesional_segmentations_grid = create_ood_grids('lesional')
imshow_grid(lesional_slices_grid, one_channel=True, figsize=(12, 8),
title=f'Lesional {dataset_name} {min_score}-{max_score}', axis='off')
if show_ground_truth:
imshow_grid(lesional_segmentations_grid, one_channel=True, figsize=(12, 8),
title=f'Lesional Ground Truth {dataset_name} {min_score}-{max_score}', axis='off') | 33149237d3d36cbae04a1902994487107447a5e5 | 27,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.