content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def jitterer(out, z):
"""This function jitters the x axis
1: matrix of layer activations of the form:
2. which layer number to do
outputs a transposed matrix of no of neurons rows and no of data columns"""
Jx=np.ones(out[z].T.shape)
for i in range(out[z].T.shape[0]):
'this is the number of neurons'
for j in range(out[z].T.shape[1]):
'this is the number of data'
Jx[i,j] = i + 1 + np.random.uniform(-0.25,0.25)
return Jx | 65b1337e42dab802a0c91bc27d93249171569281 | 25,100 |
from typing import Tuple
def deconstruct_full_path(filename: str) -> Tuple[str, str]:
"""
Returns a tuple with the parent folder of the file and the file's name.
Parameters
----------
filename : str
The path (with filename) that will be deconstructed.
Returns
-------
Tuple[str, str]
A tuple where the first element is the path of the parent folder, and the second is the
file's name.
"""
posix_path = PurePosixPath("/") / filename
return str(posix_path.parent), posix_path.name | d33a8fc71beb39d56dc0aa9bf94264164e8bf1a9 | 25,101 |
def bbx_to_world(cords, vehicle):
"""
Convert bounding box coordinate at vehicle reference to world reference.
Parameters
----------
cords : np.ndarray
Bounding box coordinates with 8 vertices, shape (8, 4)
vehicle : opencda object
Opencda ObstacleVehicle.
Returns
-------
bb_world_cords : np.ndarray
Bounding box coordinates under world reference.
"""
bb_transform = Transform(vehicle.bounding_box.location)
# bounding box to vehicle transformation matrix
bb_vehicle_matrix = x_to_world_transformation(bb_transform)
# vehicle to world transformation matrix
vehicle_world_matrix = x_to_world_transformation(vehicle.get_transform())
# bounding box to world transformation matrix
bb_world_matrix = np.dot(vehicle_world_matrix, bb_vehicle_matrix)
# 8 vertices are relative to bbx center, thus multiply with bbx_2_world to
# get the world coords.
bb_world_cords = np.dot(bb_world_matrix, np.transpose(cords))
return bb_world_cords | 3d7438beccca9635fc15b266d2e8ada6bbc053c7 | 25,102 |
def load_data():
""" Loading data and padding """
training_set, testing_set = imdb.load_data(num_words = 10000)
x_train, y_train = training_set
x_test, y_test = testing_set
x_train_padded = sequence.pad_sequences(x_train, maxlen = 100)
x_test_padded = sequence.pad_sequences(x_test, maxlen = 100)
return x_train_padded, y_train, x_test_padded, y_test | ca52118f7038a70386e9ca552faf24dac6be9faf | 25,103 |
def rotate_to_calibrated_axis(
data: np.ndarray, ref_val_0: complex, ref_val_1: complex
) -> np.ndarray:
"""
Rotates, normalizes and offsets complex valued data based on calibration points.
Parameters
----------
data
An array of complex valued data points.
ref_val_0
The reference value corresponding to the 0 state.
ref_val_1
The reference value corresponding to the 1 state.
Returns
-------
:
Calibrated array of complex data points.
"""
rotation_anle = np.angle(ref_val_1 - ref_val_0)
norm = np.abs(ref_val_1 - ref_val_0)
offset = ref_val_0 * np.exp(-1j * rotation_anle) / norm
corrected_data = data * np.exp(-1j * rotation_anle) / norm - offset
return corrected_data | 82adf83c9565ec56ae6f13e1eec15c1be90f5dc4 | 25,104 |
from typing import Optional
from typing import Tuple
from typing import List
def filter_graph_data(df: pd.DataFrame, x_col: str, x_range: Optional[Tuple[int, int]], file_cols: List[str],
file_tuple: FileTuple) -> Optional[pd.DataFrame]:
"""
Filter data relevant for the graph from the dataframe.
:param df: The dataframe to filter
:param x_col: Name of the column that has the data for the x-axis, only used if x_range is given
:param x_range: (min, max) tuple for filtering the values for the x-axis, or None for no filter
:param file_cols: Column names that define values for which separate graphs are generated
:param file_tuple: The set of values for the file_cols that are used in this graph
:return:
"""
gdf_filter = True
if x_range is not None:
gdf_filter = (df[x_col] >= x_range[0]) & (df[x_col] < x_range[1])
for col_name, col_val in zip(file_cols, file_tuple):
gdf_filter &= df[col_name] == col_val
gdf = df.loc[gdf_filter]
return None if gdf.empty else gdf | 200e19d73ae04c4ceabae6d0d65ccd034f368e15 | 25,105 |
def get_question_summary_from_model(question_summary_model):
"""Returns a domain object for an Oppia question summary given a
question summary model.
Args:
question_summary_model: QuestionSummaryModel. The QuestionSummary model
object to fetch corresponding QuestionSummary domain object.
Returns:
QuestionSummary. The domain object corresponding to the given question
summary model.
"""
return question_domain.QuestionSummary(
question_summary_model.id,
question_summary_model.question_content,
question_summary_model.misconception_ids,
question_summary_model.interaction_id,
question_summary_model.question_model_created_on,
question_summary_model.question_model_last_updated
) | 65cce3d4440ebea81f5a777dcdec80c61b06e83b | 25,106 |
from collections import defaultdict
def processLine(line):
"""Process a single line of input, returning a single line of output as a string.
Input on stdin is
<input path>\t<output fmt>\t<aligner>\t<fiducials>\t<output parameters>
where:
- <input path> is a local path of the input image to align (not a url),
- <output fmt> is a format string which will generate the output path. It's given a dict with:
dfij: doifj
blah: difj
- <aligner> is the name of the aligner to use,
- <fiducials> is a list of 'key@value' pairs, joined using ::
These are used for determining feature locations, which the aligners are defined relative to.
Any extra fiducials (not needed by the given aligner) are ignored.
If there is a missing fiducial, an error is returned.
- <output parameters> is an optional list of 'key@value' pairs, joined using '::'
These are used for defining parameters about the output. Currently, we support:
crop: 'x0,y0,x1,y1' rect from which to extract features from. This is
first cut from the image and provides the extents relative to which
the feature locations are assumed to be located.
[default: no crop]
width: the width to scale the cropped input to (aspect ratio NOT preserved)
[default: original resolution]
height: the height to scale the cropped input to (aspect ratio NOT preserved)
[default: original resolution]
drawfids: how to draw fiducials on output. options:
none: don't draw fiducials [default]
circle: draw a circle
rectangle: draw a rectangle
drawfidsline: the color to draw fiducial outlines in, as any valid color string (only if drawfids is on)
[default: green]
drawfidsfill: the color to fill drawn fiducials in, as any valid color string (only if drawfids is on)
[default: green]
drawfidsr: the radius of the circle to draw fiducials in
[default: 3]
outfmt: the output format to print on stdout. This is a standard python format string,
to which we'll pass a dictionary with the following fields:
basename: input file basename
inpath: input file path
outpath: output file path
outfmt: the passed-in output file format string
aligner: the passed-in aligner string
fiducials: the passed-in input parameters string
outparams: the passed-in output parameters string
[default: '%(inpath)s\t%(outpath)s']
errfmt: what to print in case of error, again as a python format string.
The fmtdict is like in 'fmt', and also containing:
errortype: a python exception type name
errormsg: the error message
[default: 'error']
A full input string might look like:
FIXME
"""
#TODO test out various outfmt options
#TODO how to specify if we want to write EXIF or not?
fmtdict = defaultdict(str)
DEFAULT_OUTPARAMS = defaultdict(str)
DEFAULT_OUTPARAMS['outfmt'] = DEFAULT_OUTPUT_FMT
DEFAULT_OUTPARAMS['errfmt'] = DEFAULT_ERROR_FMT
DEFAULT_OUTPARAMS['drawfids'] = 'none'
DEFAULT_OUTPARAMS['drawfidsline'] = 'green'
DEFAULT_OUTPARAMS['drawfidsfill'] = 'green'
DEFAULT_OUTPARAMS['drawfidsr'] = 3
# parse elements
els = line.split('\t')
try:
# input and output
fmtdict['inpath'] = inpath = els.pop(0)
fmtdict['basename'] = basename = os.path.basename(inpath)
fmtdict['outpathfmt'] = outpathfmt = els.pop(0)
#print path, basename, fmtdict, outfmt
# aligner
fmtdict['aligner'] = aligner = els.pop(0)
#print aligner
# fiducials
fmtdict['fiducials'] = fiducials = els.pop(0)
fiducials = parseFiducials(fiducials)
# output params
outparams = dict(**DEFAULT_OUTPARAMS)
#print outparams
if els:
# output params are optional, so we don't want to raise an exception here
fmtdict['outparams'] = els.pop(0)
#print fmtdict['outparams']
outparams.update(str2kvdict(fmtdict['outparams'], sep='@', dlm='::'))
#print outparams
# at this stage, we have everything we need
# first make sure the file exists and open it
if not os.path.exists(inpath): raise IOError('Image does not exist')
im = Image.open(inpath)
# process the image
a = Aligner(name=aligner)
aligned, params = a.align(im, fiducials=fiducials, outparams=outparams)
fmtdict.update(params)
outparams['outfmt'] = outparams['outfmt'].replace(r'\t', '\t').replace(r'\n', '\n')
# save the output image
fmtdict['outpath'] = outpath = outpathfmt % fmtdict
#print outpathfmt, inpath, basename, fmtdict, outpath
fmtdict['outpathfmt'] = fmtdict['outpathfmt'].replace(r'\t', '\t').replace(r'\n', '\n')
saveImage(aligned, outpath, params)
# generate the output string
ret = outparams['outfmt'] % (fmtdict)
return ret
except Exception, e:
raise
# add the error values to the fmtdict
fmtdict['errortype'] = type(e).__name__
try:
fmtdict['errormsg'] = e
except Exception:
pass
# generate and return the error string
errstr = outparams['errfmt'] % fmtdict
return errstr | 32fbdccf76f43943475551d9bd4816ea851da1f7 | 25,107 |
def refraction(alt_degrees, temperature_C, pressure_mbar):
"""Given an observed altitude, return how much the image is refracted.
Zero refraction is returned both for objects very near the zenith,
as well as for objects more than one degree below the horizon.
"""
r = 0.016667 / tan((alt_degrees + 7.31 / (alt_degrees + 4.4)) * DEG2RAD)
d = r * (0.28 * pressure_mbar / (temperature_C + 273.0))
return where((-1.0 <= alt_degrees) & (alt_degrees <= 89.9), d, 0.0) | d413aba8e238b81c5a8076460cc35ae56617f148 | 25,108 |
def reduce_mem_usage(df):
"""
iterate through all the columns of a dataframe and modify the data type to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024**2
logger.info('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
logger.info('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
logger.info('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df | d78e0de83c85bc7495141be428bd54a0b86a2564 | 25,109 |
def retry_pattern():
"""Retry pattern decorator used when connecting to snowflake
"""
return backoff.on_exception(backoff.expo,
snowflake.connector.errors.OperationalError,
max_tries=5,
on_backoff=log_backoff_attempt,
factor=2) | 78375f5f634f2826edd9c72fa20e2bb2d760b534 | 25,110 |
def get_vrf_route_targets(
device, address_family, rt_type, vrf=None, route_distinguisher=None
):
""" Get route target value from a device
Args:
address_family ('str'): address family value
rt_type ('str'): route target type
ex.) rt_type = 'import' OR
rt_type = 'export' OR
rt_type = 'both'
vrf('str'): vrf name
route_distinguisher ('str'): route distinguisher value
Returns:
Route target value
None
Raises:
None
"""
log.info(
"Getting route target of type {rt_type} for device {dev_name}".format(
rt_type=rt_type, dev_name=device.name
)
)
cli_command = ["show vrf detail {vrf}", "show vrf detail"]
if vrf:
cmd = cli_command[0].format(vrf=vrf)
else:
cmd = cli_command[1]
try:
raw_out = device.execute(cmd)
out = device.parse(cmd, output=raw_out)
except SchemaEmptyParserError:
return None
if not vrf:
vrf = "default"
try:
if not route_distinguisher:
route_distinguisher = out[vrf]["route_distinguisher"]
if "multicast" not in raw_out:
address_family = address_family.split()[0]
route_targets = out[vrf]["address_family"][address_family][
"route_targets"
][route_distinguisher]
if (
route_targets["rt_type"] == rt_type
or route_targets["rt_type"] == "both"
):
return route_targets["route_target"]
except KeyError as e:
return None
return None | d06b40220c8cc5c44c5ef4ab1e7a60057791dda5 | 25,111 |
def privGetElevationLocs(locs, dataProvider, dataProviderArgs):
"""
FIXME
"""
try:
dataProvider = dataProvider.lower()
except:
pass
# NOTE: Neither mapquest, pgRouting, nor OSRM are supported.
# FIXME -- None is not allowed
if (elevDataProviderDictionary[dataProvider] == 'ors-online'):
locsWithAlt = orsGetElevation(locs, dataProviderArgs['APIkey'])
return locsWithAlt
elif (elevDataProviderDictionary[dataProvider] == 'usgs'):
locsWithAlt = usgsGetElevation(locs)
return locsWithAlt
elif (elevDataProviderDictionary[dataProvider] == 'elevapi'):
locsWithAlt = elevapiGetElevation(locs, dataProviderArgs['APIkey'])
return locsWithAlt | 43ca4639e1f504d76c65340910ae6b953d9c0d11 | 25,112 |
def _format_field(value, parts, conv, spec, want_bytes=False):
"""Format a replacement field."""
for k, part, _ in parts:
if k:
if part.isdigit():
value = value[int(part)]
else:
value = value[part]
else:
value = getattr(value, part)
if conv:
value = ((conv == 'r') and '%r' or '%s') % (value,)
if hasattr(value, '__format__'):
value = value.__format__(spec)
elif hasattr(value, 'strftime') and spec:
value = value.strftime(str(spec))
else:
value = _strformat(value, spec)
if want_bytes and isinstance(value, unicode):
return str(value)
return value | d7c7bdf86b3b09800a4147d166584e81d7300c4f | 25,113 |
def mixed_string_list_one_valid():
"""Return mixed strings."""
return _MIXED_STRING_LISTS_ONE_VALID_ | c1f0ae91f761213a6d7674ec80e24befc0b959a4 | 25,114 |
def make_parser():
"""Create the argument parser, derived from the general scripts parser."""
parser = get_parser(
__doc__,
('A file containing a list of files/file paths to be read. These '
'should be nxml or txt files.')
)
parser.add_argument(
dest='output_name',
help=('Results will be pickled in files '
'<output_name>_stmts.pkl and <output_name>_readings.pkl.')
)
return parser | 4bb2320708728bbf277bd9de380bdf6b1ead5a8b | 25,115 |
import pathlib
def script_names():
"""Returns the sequence of example script names."""
result = [str(pathlib.Path(s).with_suffix('.py')) for s in _stem_names()]
return result | cdb4ab63718135fa98adbfbe8a1a237f6f5ad031 | 25,116 |
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / (union)
#print('jac: ', jaccard.max(), jaccard.min())
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard | 5226660a77d5753346bedaecf786644eda296b74 | 25,117 |
def expand_image(_img, block, stride, deform=True):
"""
Args:
_img: numpy array
block: size of the blocks required
stride: step size
Returns: array of blocks
"""
if deform:
_img=_img.astype('float32')
ims_Z=np.zeros([_img.shape[0],block[1],block[0]])
f_img=np.zeros([block[2],block[1],block[0]])
for z in range(0,_img.shape[0]):
ims_Z[z,:,:]=cv2.resize(_img[z,:,:], (block[0],block[1]))
for x in range(0,ims_Z.shape[2]):
f_img[:,:,x]=cv2.resize(ims_Z[:,:,x], (block[1],block[2]))
f_img=[f_img]
else:
to_pad = []
pad = False
for i in range(len(_img.shape)):
if _img.shape[i] < block[i]:
pad = True
to_pad.append(block[i])
else:
to_pad.append(_img.shape[i])
if pad:
print(f"Enttire image must be padded: {_img.shape}, must be padded")
_img = pad_nd_image(_img, new_shape=to_pad)
a_img = view_as_windows(_img, block, step=stride)
f_img = a_img.reshape(-1, *a_img.shape[-3:])
# Make sure blocks are padded
for s in f_img:
if s.shape != block:
print(f"Shape: {s.shape}, must be padded to match: {block}")
s = pad_nd_image(s, new_shape=block)
assert s.shape == block, "Padding failed"
return f_img | 0ffbbfe2691be69980a334d1d32f4743dfd79de0 | 25,118 |
def disabled(reason='No reason given'):
"""Decorator that disables a command."""
# pylint:disable=missing-docstring,unused-argument
def actual_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
raise DisabledCommandException('This command is disabled: %s' % reason)
wrapper.tag = Tag.disabled
wrapper.original_func = func
return wrapper
return actual_decorator | 32753d4d58ee11f12eb32acabdb692640b93bab7 | 25,119 |
def get_tensor_model_parallel_group():
"""Get the tensor model parallel group the caller rank belongs to."""
assert _TENSOR_MODEL_PARALLEL_GROUP is not None, \
'intra_layer_model parallel group is not initialized'
return _TENSOR_MODEL_PARALLEL_GROUP | ecf9e212995f09fe9d6a5482213dba3d1071ba80 | 25,120 |
import re
import string
def normalize(data):
"""Normalizes the values of incoming data
Args:
data (dict): Dictionary of response data
Returns:
dict
"""
normalized_data = {}
for key in data:
value = str(data[key])
key = key.lower()
# Strip all fields and reduce multiple spaces to a single whitespace
value = value.strip()
value = re.sub(r"\s+", " ", value)
if key == "name":
value = string.capwords(value)
elif key == "age":
if value is not None and len(value) > 0:
value = int(value)
else:
value = None
elif key in ("gender", "favorite_colors"):
value = value.lower()
if key in ("email", "favorite_colors", "finished"):
value = value.replace(" ", "")
if key == "finished":
value = bool(value.capitalize())
normalized_data[key] = value
return normalized_data | ed88050001e6ea65b77d8381b2fd247918ed8f37 | 25,121 |
def five_fold(data_set):
"""[summary]
Args:
data_set (List of Sample objects): The Samples to be partitioned
Returns:
fold: where fold is list of len n in n-fold of (train,test) where train and test are lists of Samples
"""
partition_index = int( len(data_set) / 5 )
s = 0
fold = []
for i in range(5): #0-4
tr = data_set.copy()
n = s + partition_index # was -1
te = tr[s:n]
del tr[s:s + partition_index]
fold.append( (tr,te) )
s += partition_index
return fold | d4179c238da3e9ebe05ab3513b80bcce982c8728 | 25,122 |
from typing import List
import os
import requests
def _query_trembl(accessions: List[str], format: str) -> str:
"""Searches TrEMBL server for UniProt entries based on accession.
The server to use is set as an environment variable 'TREMBL_SERVER'.
Normally this would be the internal TrEMBL server which contains the most
up-to-date version of the database.
Args:
accessions: list of UniProt accessions to be passed as query
parameter.
format: format of matched UniProt entries (txt, fasta, xml, list are
valid formats).
Returns:
str: UniProt entries in flat file format.
"""
server = os.environ["TREMBL_SERVER"]
url = f"{server}/uniprot/?"
query = f"id:{' OR id:'.join(i for i in accessions)}"
params = {"query": query, "format": format}
uniprot_query = requests.get(url, params=params)
uniprot_query.raise_for_status()
return uniprot_query.text | 625f2fdc2054a1ed864e6a3258d00fe33f43787e | 25,123 |
import re
def get_english_info(content_section):
"""
The english source section can have multiple publishers and volume counts. The criteria is that
the publisher with the largest volume count is most likely the one we want so sort the lines in
the section and grab data from the first line.
"""
english_section = [m.strip("\n") for m in content_section[24] if type(m) is bs4.element.NavigableString and m != "\n"]
english_section.sort()
eng_status, eng_volumes = None, None
try:
eng_volumes = int(re.search(r'\d+', english_section[0]).group())
#obj.eng_status = "Complete" if "Complete" in english_section else "Ongoing"
if ("Complete" or "Completed") in english_section[0]:
eng_status = "Complete"
elif "Ongoing" in english_section[0]:
eng_status = "Ongoing"
elif ("Cancelled" or "Canceled") in english_section[0]:
eng_status = "Cancelled"
elif "Hiatus" in english_section[0]:
eng_status = "Hiatus"
elif "Dropped" in english_section[0]:
eng_status = "Dropped"
else:
eng_status = "Unknown"
except AttributeError:
print(f"\t---> Attribute error: No english volumes")
except IndexError:
print("\t---> Index Error: No english volumes")
return eng_status, eng_volumes | c71751d863a4407fa409b18d7cced44c6044cb10 | 25,124 |
import numpy
def load_factual_vec(fname, vocab, k):
"""
Loads 300x1 word vecs from FACTBANK compiled word embeddings
"""
word_vecs = {}
with open(fname, "rb") as f:
header = f.readline()
vocab_size, layer1_size = map(int, header.split())
binary_len = numpy.dtype('float32').itemsize * layer1_size
for line in xrange(vocab_size):
word = []
while True:
ch = f.read(1)
if ch == ' ':
word = ''.join(word)
break
if ch != '\n':
word.append(ch)
if word in vocab:
word_vecs[word] = numpy.fromstring(f.read(binary_len), dtype='float32')
else:
f.read(binary_len)
return word_vecs | f37e030b4b8412a96652e67a673204e13c3cb3dc | 25,125 |
from datetime import datetime
def vaccine(date):
"""
Auxiliary function.
Download data about vaccination in Cantabria from the Ministry of Health, Consumer Affairs and Social Welfare.
https://www.mscbs.gob.es
Args:
date(str): Date in format %Y%m%d
Returns: DataFrame with vaccination data from first day (2021/02/04) to the present day.
"""
try:
prefix_url = 'https://www.mscbs.gob.es/profesionales/saludPublica/ccayes/alertasActual/nCov/documentos/' \
'Informe_Comunicacion_'
suffix_url = '.ods'
nfile = f'{prefix_url}{date}{suffix_url}'
file_vaccine = pd.read_excel(nfile, engine='odf')
file_vaccine.set_index('Unnamed: 0', inplace=True)
vcant = file_vaccine.loc['Cantabria']
vcant = pd.DataFrame(vcant).T
vcant.index = [datetime.datetime.strptime(date, "%Y%m%d").strftime("%Y/%m/%d")]
return vcant
except Exception as e:
date = datetime.datetime.strptime(date, "%Y%m%d").strftime("%Y/%m/%d")
print(f"Error downloading vaccination data for {date}")
# print(e) | 5e3f9ffc3106b76ab637ab23fb6e8e6f487a48f1 | 25,126 |
def cache(f):
"""A decorator to cache results for a given function call.
Note: The caching is only done on the first argument, usually "self".
"""
ret = {}
def _Wrapper(*args, **kwargs):
self = args[0]
if self not in ret:
ret[self] = f(*args, **kwargs)
return ret[self]
return _Wrapper | 786218b8c248bcb7c9d519a843dd4542a9b612b0 | 25,127 |
def home():
"""Return the home page."""
response = flask.render_template(
'index.html',
metrics=SUPPORTED_METRICS.keys())
return response, 200 | aeb98484b580ceab6f45d7f52e05dda0b97ddb2b | 25,128 |
def data_to_segments_uniform(x, n_segments, segment_ranges=True):
""" Split data into segments of equal size (number of observations)."""
return split_equal_bins(x, n_segments) | d787bdad8604f4dbf327576f655a9e408b314766 | 25,129 |
from pathlib import Path
def load_all_sheets(file_name):
"""
Load from a xls(x) file all its sheets to a pandas.DataFrame as values to sheet_names as keys in a dictionary
Parameters
----------
file_name : str, Path
file_name to load from
Returns
-------
dict
dictionary containing the sheet_names as keys and pandas.DataFrame representing the xls(x) sheets
``{sheet_name: pandas.DataFrame}``
"""
file_name = Path(file_name)
excel_file = ExcelFile(file_name)
return load_these_sheets(file_name, list(excel_file.sheet_names)) | 84452af6d81c7b44c0669af637950e8b1c1dbda8 | 25,130 |
from datetime import datetime
def rate_limit(limit=1000, interval=60):
"""Rate limit for API endpoints.
If the user has exceeded the limit, then return the response 429.
"""
def rate_limit_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
key: str = f"Limit::{request.remote_addr}:{datetime.datetime.now().minute}"
current_request_count = cache.get(key=key)
if current_request_count and int(current_request_count) >= limit:
return {
"message": f"Too many requests. Limit {limit} in {interval} seconds",
}, HTTPStatus.TOO_MANY_REQUESTS
else:
pipe = cache.pipeline()
pipe.incr(key, 1)
pipe.expire(key, interval + 1)
pipe.execute()
return func(*args, **kwargs)
return wrapper
return rate_limit_decorator | 3f609d2bfe4a90fcf822df50e6c81032ad7d0d03 | 25,131 |
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(filteredapp):
return AuthProtocol(filteredapp, conf)
return auth_filter | b4b3b64093998865cf6a1846935c1a10db37b0ea | 25,132 |
def parse(stylesheet):
"""Parse a stylesheet using tinycss2 and return a StyleSheet instance.
:param stylesheet: A string of an existing stylesheet.
"""
parsed_stylesheet = tinycss2.parse_stylesheet(
stylesheet, skip_comments=True, skip_whitespace=True
)
css = qstylizer.style.StyleSheet()
for node in parsed_stylesheet:
if node.type == "error":
raise ValueError("Cannot parse Stylesheet: " + node.message)
selector = tinycss2.serialize(node.prelude).strip()
declaration_list = tinycss2.parse_declaration_list(
node.content, skip_comments=True, skip_whitespace=True
)
for declaration in declaration_list:
if declaration.type == "declaration":
prop = declaration.name.strip()
css[selector][prop] = tinycss2.serialize(declaration.value).strip()
return css | 3df3901c06e861b03746c21a056bae51bb93ebd6 | 25,133 |
import unittest
def test_suite():
"""Returns a test suite of all the tests in this module."""
test_classes = [TestNetCDFPointUtilsConstructor,
TestNetCDFPointUtilsFunctions1,
TestNetCDFPointUtilsGridFunctions
]
suite_list = map(unittest.defaultTestLoader.loadTestsFromTestCase,
test_classes)
suite = unittest.TestSuite(suite_list)
return suite | ebe3b28968def1131be19aedc963263f3277a5fb | 25,134 |
from typing import List
def compute_max_cut(n: int, nodes: List[int]) -> int:
"""Compute (inefficiently) the max cut, exhaustively."""
max_cut = -1000
for bits in helper.bitprod(n):
# Collect in/out sets.
iset = []
oset = []
for idx, val in enumerate(bits):
iset.append(idx) if val == 0 else oset.append(idx)
# Compute costs for this cut, record maximum.
cut = 0
for node in nodes:
if node[0] in iset and node[1] in oset:
cut += node[2]
if node[1] in iset and node[0] in oset:
cut += node[2]
if cut > max_cut:
max_cut_in, max_cut_out = iset.copy(), oset.copy()
max_cut = cut
max_bits = bits
state = bin(helper.bits2val(max_bits))[2:].zfill(n)
print('Max Cut. N: {}, Max: {:.1f}, {}-{}, |{}>'
.format(n, np.real(max_cut), max_cut_in, max_cut_out,
state))
return helper.bits2val(max_bits) | 30acd71267cd213e559bf43b8296333530736624 | 25,135 |
def get_capacity_potential_from_enspreso(tech: str) -> pd.Series:
"""
Return capacity potential (in GW) per NUTS2 region for a given technology, based on the ENSPRESO dataset.
Parameters
----------
tech : str
Technology name among 'wind_onshore', 'wind_offshore', 'wind_floating', 'pv_utility' and 'pv_residential'
Returns
-------
nuts2_capacity_potentials: pd.Series
Series storing technical potential per NUTS2 region.
"""
accepted_techs = ['wind_onshore', 'wind_offshore', 'wind_floating', 'pv_utility', 'pv_residential']
assert tech in accepted_techs, f"Error: tech {tech} is not in {accepted_techs}"
path_potential_data = f"{data_path}generation/vres/potentials/source/ENSPRESO"
# For wind, summing over all wind conditions is similar to considering taking all available land and a capacity per
# area of 5MW/km2
if tech == 'wind_onshore':
cap_potential_file = pd.read_excel(join(path_potential_data, 'ENSPRESO_WIND_ONSHORE_OFFSHORE.XLSX'),
sheet_name='Raw data', index_col=1, skiprows=5)
onshore_wind = cap_potential_file[
(cap_potential_file['ONOFF'] == 'Onshore') &
(cap_potential_file['Scenario'] == 'EU-Wide high restrictions') &
(cap_potential_file['Subscenario - not cumulative'] == '2000m setback distance')]
nuts2_capacity_potentials_ds = onshore_wind['GW_Morethan25%_2030_100m_ALLTIMESLICESAVERAGE_V112'].copy()
elif tech == 'wind_offshore':
offshore_categories = ['12nm zone, water depth 0-30m', '12nm zone, water depth 30-60m',
'Water depth 0-30m', 'Water depth 30-60m']
cap_potential_file = pd.read_excel(join(path_potential_data, 'ENSPRESO_WIND_ONSHORE_OFFSHORE.XLSX'),
sheet_name='Wind Potential EU28 Full', index_col=1)
offshore_wind = cap_potential_file[
(cap_potential_file['Unit'] == 'GWe') &
(cap_potential_file['Onshore Offshore'] == 'Offshore') &
(cap_potential_file['Scenario'] == 'EU-Wide low restrictions') &
(cap_potential_file['Wind condition'] == 'CF > 25%') &
(cap_potential_file['Offshore categories'].isin(offshore_categories))]
nuts2_capacity_potentials_ds = offshore_wind.groupby(offshore_wind.index)['Value'].sum()
elif tech == 'wind_floating':
floating_categories = ['12nm zone, water depth 60-100m Floating',
'Water depth 60-100m Floating', 'Water depth 100-1000m Floating']
cap_potential_file = pd.read_excel(join(path_potential_data, 'ENSPRESO_WIND_ONSHORE_OFFSHORE.XLSX'),
sheet_name='Wind Potential EU28 Full', index_col=1)
offshore_wind = cap_potential_file[
(cap_potential_file['Unit'] == 'GWe') &
(cap_potential_file['Onshore Offshore'] == 'Offshore') &
(cap_potential_file['Scenario'] == 'EU-Wide low restrictions') &
(cap_potential_file['Wind condition'] == 'CF > 25%') &
(cap_potential_file['Offshore categories'].isin(floating_categories))]
nuts2_capacity_potentials_ds = offshore_wind.groupby(offshore_wind.index)['Value'].sum()
elif tech == 'pv_utility':
# ODO: maybe parametrize this, if we decide to stick with it
land_use_high_irradiance_potential = 0.05
land_use_low_irradiance_potential = 0.00
cap_potential_file = pd.read_excel(join(path_potential_data, 'ENSPRESO_SOLAR_PV_CSP_85W.XLSX'),
sheet_name='Raw Data Available Areas', index_col=0,
skiprows=[0, 1, 2, 3], usecols=[1, 43, 44, 45, 46],
names=["NUTS2", "Agricultural HI", "Agricultural LI",
"Non-Agricultural HI", "Non-Agricultural LI"])
capacity_potential_high = cap_potential_file[["Agricultural HI", "Non-Agricultural HI"]].sum(axis=1)
capacity_potential_low = cap_potential_file[["Agricultural LI", "Non-Agricultural LI"]].sum(axis=1)
nuts2_capacity_potentials_ds = capacity_potential_high * land_use_high_irradiance_potential + \
capacity_potential_low * land_use_low_irradiance_potential
else: # 'pv_residential'
cap_potential_file = pd.read_excel(join(path_potential_data, 'ENSPRESO_SOLAR_PV_CSP.XLSX'),
sheet_name='NUTS2 170 W per m2 and 3%', skiprows=2, index_col=2)
nuts2_capacity_potentials_ds = cap_potential_file['PV - roof/facades']
updated_potential_per_tech = update_enspreso_capacity_potential(nuts2_capacity_potentials_ds, tech).round(6)
return updated_potential_per_tech | 4daaf38ca9f54aa162b79682ed981d3ba3ab3167 | 25,136 |
import time
import urllib
import json
def use_nearby_search(url, next_page=False, request_count=0):
"""Call nearby search API request.
Parameters
----------
url: str
URL to use to send a Nearby Search Request in Google Maps Place Search API
next_page: boolean, optional(default=False)
whether or not the URL is to request next page using next_page_token
request_count: int, optional(default=0)
the count of the previously-sent same requests; used only when next_page=True
Returns
-------
data: dict
returned API response
check https://developers.google.com/places/web-service/search#find-place-responses for its structure
status: str
status of the API response
check https://developers.google.com/places/web-service/search#PlaceSearchStatusCodes for details
"""
while True:
if next_page:
time.sleep(3)
try:
# get API response
print("API request made.")
response = urllib.request.urlopen(url)
except IOError:
pass # retry
else: # if no IOError occurs
data = json.loads(response.read().decode('utf-8'))
status = data['status']
if status == "OK":
break
elif (status == "INVALID_REQUEST") & next_page: # if next_page_token is not valid yet
if request_count >= 3:
print(f"Failed to receive a valid API response for 3 times for {url}.")
break # stop requesting after 3 trials
else:
print("...Key is not valid yet.")
request_count += 1
data, status = use_nearby_search(url + "&request_count=" + str(request_count), next_page,
request_count)
break
else:
break
return data, status | f579288356c4330a3af5ec2ac94cf31242669ba8 | 25,137 |
import urllib
def _GetGoogleAuthtoken(account_type, user, password, service, source):
"""This function authenticates the user in the specified service using
the provided authentication data.
Args:
account_type: Type of the account to login, could be GOOGLE or any other
string if the account is external.
user: Name of the user to be logged in.
password: Password of the user to be logged in.
service: Service where the user wants to log in, for example, 'ah'.
source: Name of the application requesting the user authentication.
Returns:
The authentatication token for the user if the supplied data is correct.
Raises:
lib.AuthenticationError: This exception is raised if the HTTP response is
403 - Forbidden, in this case the error is parsed and returned to the
user in the exception.
urllib2.HTTPError: This exception is raised for any other HTTP error.
"""
# Create a request for Google's Client login, with the specied data.
auth_request_data_map = {
'accountType': account_type,
'Email': user,
'Passwd': password,
'service': service,
'source': source
}
auth_request_data = urllib.urlencode(auth_request_data_map)
auth_url = 'https://www.google.com/accounts/ClientLogin'
auth_request = urllib2.Request(auth_url, auth_request_data)
try:
# Create a custom opener, make the request and extract the body.
http_opener = _GetHTTPOpener()
auth_response = http_opener.open(auth_request)
auth_response_body = auth_response.read()
# Parse the response data as a dictionary and return the 'Auth' key.
auth_response_data = _ParseBodyAsDict(auth_response_body)
return auth_response_data['Auth']
except urllib2.HTTPError as e:
# Check if the error was a 403 - Forbidden. In that case, forward the
# exception as an authentication error. Otherwise, just forward the
# exception.
if e.code == 403:
# Parse the error body as a dictionary and forward the exception as an
# authentication error.
response_dict = _ParseBodyAsDict(e.read())
raise AuthenticationError(auth_request.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise | 9852968100d116e27cf50f4b047661ef3135074d | 25,138 |
def trim_filters(response):
"""Trim the leading and trailing zeros from a 1-D array or sequence, leaving
one zero on each side. This is a modified version of numpy.trim_zeros.
Parameters
----------
response : 1-D array or sequence
Input array.
Returns
-------
first : int
Index of the last leading zero.
last : int
Index of the first trailing zero.
"""
first = 0
for i in response:
if i != 0.:
if first == 0:
first += 1 # to avoid filters with non-zero edges
break
else:
first = first + 1
last = len(response)
for i in response[::-1]:
if i != 0.:
if last == len(response):
last -= 1 # to avoid filters with non-zero edges
break
else:
last = last - 1
first -= 1
last += 1
return first, last | 2582c5821bd5c8487c0f9d2f55d2d982767d2669 | 25,139 |
def is_spanning(graph, subgraph):
"""
Return True or False by passing graph and subgraph through function V
to check if the subgraph uses all verticies of the original graph
Parameters
----------
graph = A networkx graph.
subgraph = A networkx subgraph of 'graph'.
Returns
-------
True if the subgraph is spanning.
False if the subgraph is not spanning.
"""
return V(graph) == V(subgraph) | 371388bd6657165451216c4c65c5ea43ef19fed5 | 25,140 |
import subprocess
def get_commit_msg() -> str:
"""
Return the last commit message.
"""
result = subprocess.run(
'git show -s --format=%s'.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
if result.stderr: # no commit yet
return '\n'
return result.stdout | aaaf8ff98e7d633cb6b9c116191d1ff9f7b5d754 | 25,141 |
from pathlib import Path
from typing import List
import os
def relative_paths(root: Path, paths: list) -> List[str]:
"""
Normalises paths from incoming configuration and ensures
they are all strings relative to root
"""
result = []
for path in paths:
# more hacks for exclusions I'm not happy about
# maybe we should subclass Path to make this cleaner?
exclusion = path.startswith("!")
if exclusion:
path = path[1:]
# make sure paths are relative!
if isinstance(path, Path):
inp = str(path.relative_to(root))
elif isinstance(path, str):
inp = path
if os.path.isabs(path):
inp = os.path.relpath(path, root)
else:
raise NotImplementedError()
if exclusion:
inp = "!" + inp
result.append(inp)
return result | 8193669491bc33b3014f1b44ca7ef4157e760af0 | 25,142 |
from re import DEBUG
import torch
def domain_loss_roi(pred, domain_label):
"""
ROI-level domain adversarial loss
"""
if DEBUG:
print('\tDA-ROI loss')
device_id = pred.get_device()
target_label = Variable(
torch.FloatTensor(pred.data.size()).fill_(float(domain_label))
).cuda(device_id)
loss_da_roi = F.binary_cross_entropy_with_logits(pred, target_label)
if net_utils.is_nan_loss(loss_da_roi):
loss_da_roi *= 0
return loss_da_roi | fd7cb841840d023b4e86ca2e9daf79fb9c4dc760 | 25,143 |
from bs4 import BeautifulSoup
import re
def clean_text(text):
"""
A function to pre-process text
Parameters
----------
text : string
the string to be processed
Returns
-------
text : string
a clean string
"""
tok = WordPunctTokenizer()
pat1 = r'@[A-Za-z0-9]+'
pat2 = r'https?://[A-Za-z0-9./]+'
combined_pat = r'|'.join((pat1, pat2))
soup = BeautifulSoup(text, 'lxml')
souped = soup.get_text()
stripped = re.sub(combined_pat, '', souped)
try:
clean = stripped.decode("utf-8-sig").replace(u"\ufffd", "?")
except:
clean = stripped
letters_only = re.sub("[^a-zA-Z]", " ", clean)
lower_case = letters_only.lower()
words = tok.tokenize(lower_case)
return (" ".join(words)).strip() | dc9d243d4c57ec1ea1af20325be57db536ec4286 | 25,144 |
import tqdm
import torch
def train_one_epoch(dataloader, model, optimizer, device, writer, epoch, cfg):
""" Trains the model for one epoch. """
model.train()
optimizer.zero_grad()
metrics = []
n_batches = len(dataloader)
progress = tqdm(dataloader, desc='TRAIN', leave=False)
for i, sample in enumerate(progress):
input_and_target = sample[0]
input_and_target = input_and_target.to(device)
# split channels to get input, target, and loss weights
n_channels = input_and_target.shape[1]
images, targets, weights = input_and_target.split((n_channels - 2, 1, 1), dim=1)
logits = model(images)
loss = F.binary_cross_entropy_with_logits(logits, targets, weights)
predictions = torch.sigmoid(logits)
soft_dice, soft_jaccard = dice_jaccard(targets, predictions)
loss.backward()
batch_metrics = {
'loss': loss.item(),
'soft_dice': soft_dice.item(),
'soft_jaccard': soft_jaccard.item()
}
metrics.append(batch_metrics)
postfix = {metric: f'{value:.3f}' for metric, value in batch_metrics.items()}
progress.set_postfix(postfix)
if (i + 1) % cfg.optim.batch_accumulation == 0:
optimizer.step()
optimizer.zero_grad()
if (i + 1) % cfg.optim.log_every == 0:
batch_metrics.update({'lr': optimizer.param_groups[0]['lr']})
n_iter = epoch * n_batches + i
for metric, value in batch_metrics.items():
writer.add_scalar(f'train/{metric}', value, n_iter)
if cfg.optim.debug and (i + 1) % cfg.optim.debug_freq == 0:
writer.add_images('train/inputs', images, n_iter)
writer.add_images('train/targets', targets, n_iter)
writer.add_images('train/predictions', predictions, n_iter)
metrics = pd.DataFrame(metrics).mean(axis=0).to_dict()
metrics = {k: {'value': v, 'threshold': None} for k, v in metrics.items()}
return metrics | 9bccf2edfa3f87db60c30bfb785ed25d89c6577c | 25,145 |
def test_alias_function():
"""Test 4: Generate markup based on an element using an (alias w/function) parameter to explicitly correlate data and elements"""
template = get_template('contacts-alias')
def alias_name(p, e, k, v):
eq_(k, 'name')
return 'foo'
weld(template('.contact')[0], data, dict(alias=dict(name=alias_name,\
title='title')))
check_contacts(template) | 69275c3e6676ac7b0473676fc7fe65d387edfecd | 25,146 |
def baseurl(request):
"""
Return a BASE_URL template context for the current request.
"""
if request.is_secure():
scheme = 'https://'
else:
scheme = 'http://'
return {'BASE_URL': scheme + request.get_host(), } | 76f3d75008eb996d1da226dbb4a7bd6e228fbcd1 | 25,147 |
import requests
def geolocate(address, bounds=None, country=None, administrative_area=None, sensor=False):
"""
Resolves address using Google Maps API, and performs some massaging to the output result.
Provided for convenience, as Uber relies on this heavily, and the desire to give a simple 'batteries included' experience.
See https://developers.google.com/maps/documentation/geocoding/ for more details
"""
params = {
'address': address,
'sensor': str(sensor).lower()
}
components = []
if country:
components.append('country:' + country)
if administrative_area:
components.append('administrative_area:' + administrative_area)
if bounds:
params['bounds'] = '|'.join(['{},{}'.format(x.latitude, x.longitude) for x in bounds])
if components:
params['components'] = '|'.join(components)
response = requests.get('http://maps.googleapis.com/maps/api/geocode/json', params=params)
if not response.ok:
raise GeolocationExcetion(response.text)
data = response.json()
if data['status'] not in ['OK', 'ZERO_RESULTS']:
raise GeolocationExcetion(data)
all_results = data.get('results', [])
for result in all_results:
coords = result.get('geometry', {}).get('location')
if coords:
result['latitude'] = coords['lat']
result['longitude'] = coords['lng']
return all_results | 71edf37429aa10420e070d0cacf4e9ade1e6a75f | 25,148 |
def _cmdy_hook_class(cls):
"""Put hooks into the original class for extending"""
# store the functions with the same name
# that defined by different plugins
# Note that current (most recently added) is not in the stack
cls._plugin_stacks = {}
def _original(self, fname):
"""Get the original function of self, if it is overridden"""
# callframe is oringally -1
frame = self._plugin_callframe.setdefault(fname, -1)
frame += 1
self._plugin_callframe[fname] = frame
return cls._plugin_stacks[fname][frame]
cls._original = _original
orig_init = cls.__init__
def __init__(self, *args, **kwargs):
self._plugin_callframe = {}
orig_init(self, *args, **kwargs)
cls.__init__ = __init__
if cls.__name__ == "CmdyHolding":
orig_reset = cls.reset
@wraps(orig_reset)
def reset(self, *args, **kwargs):
# clear the callframes as well
self._plugin_callframe = {}
orig_reset(self, *args, **kwargs)
return self
cls.reset = reset
# self is not a decorator, we don't return cls | 5653a71aeafc184751edcb8fddecd503c4aa2ee9 | 25,149 |
def pair_verify(
credentials: HapCredentials, connection: HttpConnection
) -> PairVerifyProcedure:
"""Return procedure object used for Pair-Verify."""
_LOGGER.debug(
"Setting up new AirPlay Pair-Verify procedure with type %s", credentials.type
)
if credentials.type == AuthenticationType.Null:
return NullPairVerifyProcedure()
if credentials.type == AuthenticationType.Legacy:
srp = LegacySRPAuthHandler(credentials)
srp.initialize()
return AirPlayLegacyPairVerifyProcedure(connection, srp)
srp = SRPAuthHandler()
srp.initialize()
if credentials.type == AuthenticationType.HAP:
return AirPlayHapPairVerifyProcedure(connection, srp, credentials)
return AirPlayHapTransientPairVerifyProcedure(connection, srp) | 6269a3f2e14a860cdba15be9e21d3718ceebfe94 | 25,150 |
import os
def get_file_type_and_ext(filename):
"""
Return file type and extension if the file can be previewd online,
otherwise, return unknown type.
"""
fileExt = os.path.splitext(filename)[1][1:].lower()
if fileExt in get_conf_text_ext():
return (TEXT, fileExt)
filetype = FILEEXT_TYPE_MAP.get(fileExt)
if filetype:
return (filetype, fileExt)
else:
return ('Unknown', fileExt) | a3e5835ee49c8f8bb966ff26363e4d7ea64ab363 | 25,151 |
def load_data(
datapath=None,
minstorms=3,
minbmps=3,
combine_nox=True,
combine_WB_RP=True,
remove_grabs=True,
grab_ok_bmps="default",
balanced_only=True,
fix_PFCs=True,
excluded_bmps=None,
excluded_params=None,
as_dataframe=False,
**dc_kwargs
):
"""Prepare data for categorical summaries
Parameter
---------
datapath : Path-like, optional
Path to the raw data CSV. If not provided, the latest data will be
downloaded.
minstorms : int (default = 3)
Minimum number of storms (monitoring events) for a BMP study to be included
minbmps : int (default = 3)
Minimum number of BMP studies for a parameter to be included
combine_nox : bool (default = True)
Toggles combining NO3 and NO2+NO3 into as new parameter NOx, giving
preference to NO2+NO3 when both parameters are observed for an event.
The underlying assuption is that NO2 concentrations are typically much
smaller than NO3, thus NO2+NO3 ~ NO3.
combine_WB_RP : bool (default = True)
Toggles combining Retention Pond and Wetland Basin data into a new
BMP category: Retention Pond/Wetland Basin.
remove_grabs : bool (default = True)
Toggles removing grab samples from the dataset except for:
- biological parameters
- BMPs categories that are whitelisted via *grab_ok_bmps*
grab_ok_bmps : sequence of str, optional
BMP categories for which grab data should be included. By default, this
inclues Retention Ponds, Wetland Basins, and the combined
Retention Pond/Wetland Basin category created when *combine_WB_RP* is
True.
balanced_only : bool (default = True)
Toggles removing BMP studies which have only influent or effluent data,
exclusively.
fix_PFCs : bool (default = True)
Makes correction to the category of Permeable Friction Course BMPs
excluded_bmps, excluded_params : sequence of str, optional
List of BMPs studies and parameters to exclude from the data.
as_dataframe : bool (default = False)
When False, a wqio.DataCollection is returned
Additional Parameters
---------------------
Any additional keword arguments will be passed to wqio.DataCollection.
Returns
-------
bmp : pandas.DataFrame or wqio.DataCollection
"""
othergroups = dc_kwargs.pop("othergroups", ["category", "units"])
pairgroups = dc_kwargs.pop("pairgroups", ["category", "units", "bmp_id", "site_id", "storm"])
rescol = dc_kwargs.pop("rescol", "res")
qualcol = dc_kwargs.pop("qualcol", "qual")
ndval = dc_kwargs.pop("ndval", ["ND", "<"])
stationcol = dc_kwargs.pop("stationcol", "station")
paramcol = dc_kwargs.pop("paramcol", "parameter")
bmp = (
_load_raw_data(datapath)
.pipe(_clean_raw_data)
.pipe(
_prepare_for_summary,
minstorms=minstorms,
minbmps=minbmps,
combine_nox=combine_nox,
combine_WB_RP=combine_WB_RP,
remove_grabs=remove_grabs,
grab_ok_bmps=grab_ok_bmps,
balanced_only=balanced_only,
fix_PFCs=fix_PFCs,
excluded_bmps=excluded_bmps,
excluded_params=excluded_params,
)
)
if as_dataframe:
return bmp
return wqio.DataCollection(
bmp,
rescol=rescol,
qualcol=qualcol,
ndval=ndval,
stationcol=stationcol,
paramcol=paramcol,
othergroups=othergroups,
pairgroups=pairgroups,
**dc_kwargs
) | 72c7e6be0eabddeba79c681c5da30e0b855b6496 | 25,152 |
import requests
import json
def sign(request):
"""
Returns a signed URL (for file upload) and an OTP
"""
credentials, project_id = auth.default()
if credentials.token is None:
# Perform a refresh request to populate the access token of the
# current credentials.
credentials.refresh(requests.Request())
# Connecting to the bucket
client = Client()
bucket = client.get_bucket(BUCKET)
#
file_name = generate_filename()
object = bucket.blob(file_name)
# Mandatory header
headers = {
"X-Goog-Content-Length-Range": f"{MIN_SIZE},{MAX_SIZE}" # limitting the upload file size
}
# mandatory fields
sign_request = {
"version": "v4",
"expiration": timedelta(seconds=EXPIRE_AFTER_SECONDS),
"service_account_email": credentials.service_account_email,
"access_token": credentials.token,
"method": "PUT",
"virtual_hosted_style": True
}
# Adding information in the request
request_json = request.get_json()
# Content MD5 is a standard integrity check in GCS
content_md5 = ''
# If the use-case requires stronger checks a stronger hashing algorithm
# such as SHA-256 should be used, but the check has to be done after the object
# has landed in the bucket as Google Cloud Storage does not support SHA256 as
# as an integrity checking machanism as of Jan 2022
if request_json and 'content-md5' in request_json:
content_md5 = request_json['content-md5']
sign_request['content_md5'] = content_md5
content_sha256 = ""
# GCS API will not perform the hash validation for PUT requests. Ideally this must be stored
# somewhere else (e.g. in a database) so that the files content can be read and SHA256 hash
# of the content can be calculated after the object lands in the bucket. This code avoides
# that step
if request_json and 'content-sha256' in request_json:
content_sha256 = request_json['content-sha256']
headers['x-content-sha256'] = content_sha256
uid = 0
if request_json and 'user-id' in request_json:
uid = int(request_json['user-id'])
# Adding custom headers in the request
if "headers" in request_json:
try:
for key, val in request_json['headers'].iteritems():
headers[key] = str(val)
except:
#TODO: log what the issue is. but this is just for a PoC
pass
# adding the OTP
OTP = generate_otp(
file_name,
user_id=uid,
contet_hash=content_sha256 if len(content_sha256) > 0 else content_md5 # prefer SHA256 if present
)
headers['x-otp'] = OTP
# Adding headers to the request
sign_request['headers']=headers
# Debugging
# debug = sign_request.copy()
# debug['access_token']='###'
# debug['expiration']=str(EXP)
return json.dumps({
'url': object.generate_signed_url(**sign_request),
'otp': OTP,
#'debug': debug,
#'request': request.get_json()
}) | 8ead2fb48821d7869c42f0173b83358b80e6e712 | 25,153 |
def _parse_squeue_state(squeue_out, job_id):
"""Parse "state" column from squeue output for given job_id
Returns state for the *first* job matching job_id. Returns 'u' if
`squeue` output is empty or job_id is not found.
"""
invalid_job_str = "Invalid job id specified"
if invalid_job_str in squeue_out:
return "u"
lines = squeue_out.split('\n')
for line in lines:
if "JOBID" in line:
continue
elif len(line.strip()) == 0:
continue
else:
returned_id = line.split()[0]
state = line.split()[4]
logger.debug("Squeue for job %i returned ID: %s, State: %s" % (job_id, returned_id, state))
return state
return "u" | c3bdb8fa296f670d3f302d9ef9441262ca0da105 | 25,154 |
def parser_IBP_Descriptor(data,i,length,end):
"""\
parser_IBP_Descriptor(data,i,length,end) -> dict(parsed descriptor elements).
This descriptor is not parsed at the moment. The dict returned is:
{ "type": "IBP", "contents" : unparsed_descriptor_contents }
(Defined in ISO 13818-1 specification)
"""
return { "type" : "IBP", "contents" : data[i+2:end] } | 12379af2260dd461751c59e2023b7e5e9d68b979 | 25,155 |
import logging
def build_county_list(state):
"""
Build the and return the fips list
"""
state_obj = us.states.lookup(state)
logging.info(f"Get fips list for state {state_obj.name}")
df_whitelist = load_data.load_whitelist()
df_whitelist = df_whitelist[df_whitelist["inference_ok"] == True]
all_fips = df_whitelist[
df_whitelist["state"].str.lower() == state_obj.name.lower()
].fips.tolist()
return all_fips | b11a0b831b7b89d04896ed1914acf993ef1d48ba | 25,156 |
import bs4
def is_comment(obj):
"""Is comment."""
return isinstance(obj, bs4.Comment) | e56749b3d5f95754a031cc7286229d942333a22e | 25,157 |
from typing import Tuple
import sqlite3
def insert_user(username: str) -> Tuple[int, str]:
"""
Inserts a new user. If the desired username is already taken,
appends integers incrementally until an open name is found.
:param username: The desired username for the new user.
:return A tuple containing the id and name of the new user.
"""
db = get_db()
new_user_id = None
count = 0
while new_user_id is None:
temp_name = username
if count != 0:
temp_name += str(count)
try:
cur = db.execute('INSERT INTO user (username) VALUES (?)', [temp_name])
new_user_id = cur.lastrowid
except sqlite3.IntegrityError:
count += 1
db.commit()
cur.close()
return new_user_id, temp_name | 9eeeb6755251183de5ad775acfceef17c9f582a3 | 25,158 |
def get_gateway(ctx, name):
"""Get the sdk's gateway resource.
It will restore sessions if expired. It will read the client and vdc
from context and make get_gateway call to VDC for gateway object.
"""
restore_session(ctx, vdc_required=True)
client = ctx.obj['client']
vdc_href = ctx.obj['profiles'].get('vdc_href')
vdc = VDC(client, href=vdc_href)
gateway = vdc.get_gateway(name)
gateway_resource = Gateway(client, href=gateway.get('href'))
return gateway_resource | 998f1a6a600797164c3e053eed206d99d20b338c | 25,159 |
def fn_getdatetime(fn):
"""Extract datetime from input filename
"""
dt_list = fn_getdatetime_list(fn)
if dt_list:
return dt_list[0]
else:
return None | efea54154d318e0e5ef71c6147057b461696c677 | 25,160 |
def greenplum_kill_process(process_id):
"""
:param process_id: int
:return: None
"""
query = """
select pg_cancel_backend({0});
select pg_terminate_backend({0});
""".format(process_id)
return greenplum_read(query) | 1bcd362bf2ed3d4cb5773c5539d0e9bb43bc96ab | 25,161 |
import base64
import re
def check_app_auth(headers):
"""Authenticate an application from Authorization HTTP header"""
try:
auth_header = headers["Authorization"]
except KeyError:
return False
# Only handle HTTP Basic authentication
m = re.match("Basic (\w+==)", auth_header)
if not m:
return False
encoded = m.groups()[0].encode('ascii')
decoded = base64.decodestring(encoded).decode('ascii')
m = re.match("([^:]+):(.+)", decoded)
if not m:
# Invalid authorization format
return False
app_user, app_pass = m.groups()
global app_auth
try:
if app_auth[app_user] == app_pass:
return True
except KeyError:
# No such user, fall through
pass
return False | 5378f70041294fdad591ffbb941991cb03a7fb3d | 25,162 |
def setupConnection():
"""
Create connection to database, to be shared by table classes. The file
will be created if it does not exist.
"""
dbPath = conf.get('db', 'path')
conn = builder()(dbPath)
return conn | 3437cf04622acf32b974b1ecc406daa594d30650 | 25,163 |
def max_pool(ip):
"""does a 2x2 max pool, crops off ends if not divisible by 2
ip is DxHxW
op is DxH/2xW/2
"""
height = ip.shape[1] - ip.shape[1]%2
width = ip.shape[2] - ip.shape[2]%2
h_max = np.maximum(ip[:,:height:2,:], ip[:,1:height:2,:])
op = np.maximum(h_max[:,:,:width:2], h_max[:,:,1:width:2])
return op | c270c4128842e33e69e0861f0010b0903c9728d3 | 25,164 |
import torch
def get_bin_vals(global_config):
"""
Creates bin values for grasping widths according to bounds defined in config
Arguments:
global_config {dict} -- config
Returns:
tf.constant -- bin value tensor
"""
bins_bounds = np.array(global_config['DATA']['labels']['offset_bins'])
if global_config['TEST']['bin_vals'] == 'max':
bin_vals = (bins_bounds[1:] + bins_bounds[:-1]) / 2
bin_vals[-1] = bins_bounds[-1]
elif global_config['TEST']['bin_vals'] == 'mean':
bin_vals = bins_bounds[1:]
else:
raise NotImplementedError
if not global_config['TEST']['allow_zero_margin']:
bin_vals = np.minimum(bin_vals, global_config['DATA']['gripper_width'] - global_config['TEST']['extra_opening'])
torch_bin_vals = torch.tensor(bin_vals, dtype=torch.float32)
return torch_bin_vals | 07acdcb0329c1002983ca021fbc84c60f7474758 | 25,165 |
def dphi_dop(t, profile, r0_vec, v_vec, d_hat, use_form=False, form_fun=None,
interp_table=None):
"""
Returns the phase shift due to the Doppler delay for subhalos of mass, mass
TODO: add use_closest option
"""
v_mag = np.linalg.norm(v_vec, axis=1)
r0_v = np.einsum("ij, ij -> i", r0_vec, v_vec) # kpc^2/yr
t0 = -r0_v / np.square(v_mag) # year
b_vec = r0_vec + v_vec * t0[:, np.newaxis] # (N, 3), kpc
b_mag = np.linalg.norm(b_vec, axis=1) # (N)
tau = b_mag / v_mag # year
b_hat = b_vec / b_mag[:, np.newaxis]
v_hat = v_vec / v_mag[:, np.newaxis]
b_d = np.dot(b_hat, d_hat)
v_d = np.dot(v_hat, d_hat)
x = np.subtract.outer(t, t0) / tau
x0 = -t0 / tau
prefactor = (
const.yr_to_s
* const.GN
/ (const.km_s_to_kpc_yr * const.c_light * np.square(v_mag))
)
if interp_table is None:
bd_term = (np.sqrt(1 + x ** 2) + x) - (np.sqrt(1 + x0 ** 2) + x0)
vd_term = np.arcsinh(x) - np.arcsinh(x0)
sig = bd_term * b_d - vd_term * v_d
if 'M' in list(profile):
prefactor *= profile['M']
if use_form:
t_cl = np.maximum(np.minimum(t0, t[-1]), 0)
x_cl = (t_cl - t0) / tau
r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)
rv = ((3 * profile['M'] / (4 * np.pi)) * (1 / 200) * (1 / const.rho_crit)) ** (1 / 3)
form_func = np.where(r_cl<rv, form(r_cl / rv, profile['c']), 1) # (N)
sig = form_func * sig
else:
if form_fun is not None:
t_cl = np.maximum(np.minimum(t0, t[-1]), 0)
x_cl = (t_cl - t0) / tau
r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)
form_func = form_fun(r_cl, profile['rs'], profile['rhos'])
sig = form_func * sig
else:
raise ValueError('rho_s, r_s halo description currently requires custom density profile ("USE_FORMTAB")')
else:
y = b_mag / profile['rs']
bd_term0, vd_term0 = interp_table.bd_vd_terms(x0, y)
y.shape = (1,-1)
y = np.broadcast_to(y,x.shape)
bd_term, vd_term = interp_table.bd_vd_terms(x, y)
bd_term -= bd_term0
vd_term -= vd_term0
sig = profile['rhos'] * profile['rs']**3 * (bd_term * b_d + vd_term * v_d)
sig = prefactor * sig
# sum the signal over all the events
return np.sum(sig, axis=-1) | 2dfe7fa2257591f60ce5b9041cb3bae85bb69abd | 25,166 |
def calculate_Hubble_flow_velocity_from_cMpc(cMpc, cosmology="Planck15"):
"""
Calculates the Hubble flow recession velocity from comoving distance
Parameters
----------
cMpc : array-like, shape (N, )
The distance in units of comoving megaparsecs. Must be 1D or scalar.
cosmology : string or astropy.cosmology.core.FLRW
The cosmology to assume whilst calculating distance. Default: Planck15.
Returns
-------
a : array-like, shape (N, )
The scale factor.
"""
cosmo = utils.get_cosmology_from_name(cosmology)
H0 = cosmo.H0
scale_factor = utils.calculate_scale_factor_from_cMpc(cMpc, cosmology=cosmology)
proper_dist = cMpc * apu.Mpc / scale_factor
velocity = proper_dist * H0
return velocity | 823d94faa682f3b5fb123ad00fe2a7d02eedd355 | 25,167 |
import itertools
def CollapseDictionary(mapping):
"""
Takes a dictionary mapping prefixes to URIs
and removes prefix mappings that begin with _ and
there is already a map to their value
>>> from rdflib import URIRef
>>> a = {'ex': URIRef('http://example.com/')}
>>> a['_1'] = a['ex']
>>> len(a)
2
>>> a.values()
[rdflib.term.URIRef(%(u)s'http://example.com/'), rdflib.term.URIRef(%(u)s'http://example.com/')]
>>> CollapseDictionary(a)
{'ex': rdflib.term.URIRef(%(u)s'http://example.com/')}
>>> a
{'ex': rdflib.term.URIRef(%(u)s'http://example.com/'), '_1': rdflib.term.URIRef(%(u)s'http://example.com/')}
"""
def originalPrefixes(item):
return item.find('_') + 1 == 1
revDict = {}
for k, v in list(mapping.items()):
revDict.setdefault(v, set()).add(k)
prefixes2Collapse = []
for k, v in list(revDict.items()):
origPrefixes = []
dupePrefixes = []
# group prefixes for a single URI by whether or not
# they have a _ prefix
for rt, items in itertools.groupby(v, originalPrefixes):
if rt:
dupePrefixes.extend(items)
else:
origPrefixes.extend(items)
if origPrefixes and len(v) > 1 and len(dupePrefixes):
# There are allocated prefixes for URIs that were originally
# given a prefix
assert len(origPrefixes) == 1
prefixes2Collapse.extend(dupePrefixes)
return dict([(k, v) for k, v in list(mapping.items()) if k not in prefixes2Collapse]) | 9f2befbd52b75b75aa15cadf9e68d5f9eebcae71 | 25,168 |
def do_pre_context(PreContextSmToBeReversedList, PreContextSmIdList, dial_db):
"""Pre-context detecting state machine (backward).
---------------------------------------------------------------------------
Micro actions are: pre-context fullfilled_f
DropOut --> Begin of 'main' state machine.
BLC --> ReloadStateBackward
EndOfStream --> 'error'
Variables (potentially) required:
pre_context_fulfilled_f[N] --> Array of flags for pre-context
indication.
RETURNS: [0] generated code text
[1] reload state BACKWARD, to be generated later.
"""
if not PreContextSmToBeReversedList: return [], None
analyzer_txt, \
analyzer = __do_state_machine(PreContextSmToBeReversedList, engine.BACKWARD_PRE_CONTEXT,
dial_db, ReverseF=True)
epilog_txt = _get_pre_context_epilog_definition(dial_db)
txt = analyzer_txt
txt.extend(epilog_txt)
for sm_id in PreContextSmIdList:
variable_db.require("pre_context_%i_fulfilled_f", Index = sm_id)
return txt, analyzer | d211cf1aac7e103b6d1efe25bfde964578b81950 | 25,169 |
from datetime import datetime
async def check_user_cooldown(ctx: Context, config: Config, cooldown: dict):
"""Check if command is on cooldown."""
command = ctx.command.qualified_name
last = cooldown[command]["last"]
rate = cooldown[command]["rate"]
per = cooldown[command]["per"]
uses = cooldown[command]["uses"]
now = utc_timestamp(datetime.utcnow())
if now >= last + per:
cooldown[command] = {
"last": utc_timestamp(datetime.utcnow()),
"rate": rate,
"per": per,
"uses": 1
}
return True
else:
if uses < rate:
cooldown[command] = {
"last": last,
"rate": rate,
"per": per,
"uses": uses + 1
}
return True
return False | 649b108def51c9029b17fa6e14eada141d7c5239 | 25,170 |
def round_robin(units, sets=None):
""" Generates a schedule of "fair" pairings from a list of units """
if len(units) % 2:
units.append(None)
count = len(units)
sets = sets or (count - 1)
half = count / 2
schedule = []
for turn in range(sets):
pairings = []
for i in range(half):
if units[i] is None or units[count-i-1] is None:
continue
pairings.append((units[i], units[count-i-1]))
units.insert(1, units.pop())
schedule.append(pairings)
return schedule | f736fe4ce1f0b407f55d4627a7ecc8396943cdd0 | 25,171 |
def filter_df(p_df:pd.DataFrame, col_name:str, value, keep:bool=True, period=None):
"""
Filter a dataframe based on a specific date
Parameters :
p_df : pandas.DataFrame
The original dataframe
col_name : str
The dataframe column name where the filter will be applyed
value : item or list
The value used to filter the specified column
keep : bool
If it must keep or not the selected value
Return : pandas.DataFrame
The filtered initial dataframe
"""
if type(value) == list:
operator = 'not in' if keep == False else 'in'
else:
operator = "==" if keep == True else "!="
df = p_df.query(f"{col_name} {operator} @value")
return df | f866ac1df9c436dc65e6a3d1b7eeb02487bba100 | 25,172 |
import logging
def _VerifyOptions(options):
"""Verify the passed-in options.
Args:
options: The parsed options to verify.
Returns:
Boolean, True if verification passes, False otherwise.
"""
if options.endpoints_service and not options.openapi_template:
logging.error('Please specify openAPI template with --openapi_template '
'in deploying endpoints.')
return False
if options.openapi_template and not options.endpoints_service:
logging.error('Please specify endpoints service with --endpoints_service '
'in deploying endpoints.')
return False
if (options.endpoints_service and
options.project_id not in options.endpoints_service):
logging.error('The project "%s" is not matched to the endpoints service '
'"%s".', options.project_id, options.endpoints_service)
return False
return True | 872feb5ac314ed2ef28ddbfaeff1b5dafc5e9ed8 | 25,173 |
def force_delegate(func: _F) -> _F:
"""
A decorator to allow delegation for the specified method even if cls.delegate = False
"""
func._force_delegate = True # type: ignore[attr-defined]
return func | 771159f2baafce044f480ce138596e4a07e89a97 | 25,174 |
import binascii
def create_signature(key_dict, data):
"""
<Purpose>
Return a signature dictionary of the form:
{'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'sig': '...'}.
The signing process will use the private key in
key_dict['keyval']['private'] and 'data' to generate the signature.
The following signature schemes are supported:
'RSASSA-PSS'
RFC3447 - RSASSA-PSS
http://www.ietf.org/rfc/rfc3447.
'ed25519'
ed25519 - high-speed high security signatures
http://ed25519.cr.yp.to/
Which signature to generate is determined by the key type of 'key_dict'
and the available cryptography library specified in 'settings'.
>>> ed25519_key = generate_ed25519_key()
>>> data = 'The quick brown fox jumps over the lazy dog'
>>> signature = create_signature(ed25519_key, data)
>>> securesystemslib.formats.SIGNATURE_SCHEMA.matches(signature)
True
>>> len(signature['sig'])
128
>>> rsa_key = generate_rsa_key(2048)
>>> signature = create_signature(rsa_key, data)
>>> securesystemslib.formats.SIGNATURE_SCHEMA.matches(signature)
True
>>> ecdsa_key = generate_ecdsa_key()
>>> signature = create_signature(ecdsa_key, data)
>>> securesystemslib.formats.SIGNATURE_SCHEMA.matches(signature)
True
<Arguments>
key_dict:
A dictionary containing the keys. An example RSA key dict has the
form:
{'keytype': 'rsa',
'scheme': 'rsassa-pss-sha256',
'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...',
'private': '-----BEGIN RSA PRIVATE KEY----- ...'}}
The public and private keys are strings in PEM format.
data:
Data to be signed. This should be a bytes object; data should be
encoded/serialized before it is passed here. The same value can be be
passed into securesystemslib.verify_signature() (along with the public
key) to later verify the signature.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'key_dict' is improperly
formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'key_dict'
specifies an unsupported key type or signing scheme.
TypeError, if 'key_dict' contains an invalid keytype.
<Side Effects>
The cryptography library specified in 'settings' is called to perform the
actual signing routine.
<Returns>
A signature dictionary conformant to
'securesystemslib_format.SIGNATURE_SCHEMA'.
"""
# Does 'key_dict' have the correct format?
# This check will ensure 'key_dict' has the appropriate number of objects
# and object types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
# The key type of 'key_dict' must be either 'rsa' or 'ed25519'.
securesystemslib.formats.ANYKEY_SCHEMA.check_match(key_dict)
# Signing the 'data' object requires a private key. Signing schemes that are
# currently supported are: 'ed25519', 'ecdsa-sha2-nistp256',
# 'ecdsa-sha2-nistp384' and rsa schemes defined in
# `securesystemslib.keys.RSA_SIGNATURE_SCHEMES`.
# RSASSA-PSS and RSA-PKCS1v15 keys and signatures can be generated and
# verified by rsa_keys.py, and Ed25519 keys by PyNaCl and PyCA's
# optimized, pure python implementation of Ed25519.
signature = {}
keytype = key_dict['keytype']
scheme = key_dict['scheme']
public = key_dict['keyval']['public']
private = key_dict['keyval']['private']
keyid = key_dict['keyid']
sig = None
if keytype == 'rsa':
if scheme in RSA_SIGNATURE_SCHEMES:
private = private.replace('\r\n', '\n')
sig, scheme = securesystemslib.rsa_keys.create_rsa_signature(
private, data, scheme)
else:
raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported'
' RSA signature scheme specified: ' + repr(scheme))
elif keytype == 'ed25519':
public = binascii.unhexlify(public.encode('utf-8'))
private = binascii.unhexlify(private.encode('utf-8'))
sig, scheme = securesystemslib.ed25519_keys.create_signature(
public, private, data, scheme)
# Continue to support keytypes of ecdsa-sha2-nistp256 and ecdsa-sha2-nistp384
# for backwards compatibility with older securesystemslib releases
elif keytype in ['ecdsa', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384']:
sig, scheme = securesystemslib.ecdsa_keys.create_signature(
public, private, data, scheme)
# 'securesystemslib.formats.ANYKEY_SCHEMA' should have detected invalid key
# types. This is a defensive check against an invalid key type.
else: # pragma: no cover
raise TypeError('Invalid key type.')
# Build the signature dictionary to be returned.
# The hexadecimal representation of 'sig' is stored in the signature.
signature['keyid'] = keyid
signature['sig'] = binascii.hexlify(sig).decode()
return signature | 1a1e37838679a6912c8dc3d482a8092b1e75056c | 25,175 |
def parse_line(line):
""" Parse a queue trace line into a dict
"""
line = line.split()
result = {}
if len(line) < 12:
return result
result["event"] = line[0]
result["time"] = float(line[1])
result["from"] = int(line[2])
result["to"] = int(line[3])
result["type"] = line[4]
result["size"] = int(line[5])
result["flags"] = line[6]
result["fid"] = int(line[7])
result["src"] = line[8]
result["dst"] = line[9]
result["seqnum"] = int(line[10])
result["pktid"] = int(line[11])
return result | 432e6a624626e89d27fe6d3d9ed7c4230d97c0a6 | 25,176 |
from typing import Dict
def gaussian_linear_combination(distributions_and_weights: Dict):
""" Computes the PDF of the weighted average of two Gaussian variables. """
assert isinstance(distributions_and_weights, dict)
assert all(
isinstance(dist, MultivariateNormal)
for dist in distributions_and_weights.keys()
)
return MultivariateNormal(
loc=sum(
[
dist.loc * weight
for dist, weight in distributions_and_weights.items()
]
),
covariance_matrix=sum(
[
dist.covariance_matrix * (weight ** 2)
for dist, weight in distributions_and_weights.items()
]
),
) | 704a1f22392819075e3d9ba0c243c7364baab827 | 25,177 |
def check_pattern_startswith_slash(pattern):
"""
Check that the pattern does not begin with a forward slash.
"""
regex_pattern = pattern.regex.pattern
if regex_pattern.startswith('/') or regex_pattern.startswith('^/'):
warning = Warning(
"Your URL pattern {} has a regex beginning with a '/'. "
"Remove this slash as it is unnecessary.".format(describe_pattern(pattern)),
id="urls.W002",
)
return [warning]
else:
return [] | 9015f1f8d17297ace5fcef2e2cf0fe2c6dd6e76c | 25,178 |
import subprocess
def attack_images(cores, prob_cutoff):
"""
:param cores: how many cores to use for multiprocessing
:param prob_cutoff: user's image belongs to a certain category if the output of the last FC layer of the resnet model for the category > prob_cutoff
:return:
"""
mediaFile = "target_media"
slice_files(mediaFile, DATAPATH, cores)
subprocess.call(['./parallelize_im2proba.sh', cores,
city]) # downloads images and converts to embeddings, shell script calls im2proba.py
prob_file = combine_files(DATAPATH, cores)
clean_file = clean_trim(prob_cutoff, DATAPATH, prob_file)
counts_file = count_cats(DATAPATH, clean_file, countsFile="proba_cut_01_counts.csv" )
allPairs = make_allPairs("avg_pairs.csv", u_list_file=counts_file, DATAPATH=DATAPATH,
friendFile=city + ".target_friends", makeStrangers=True)
data_file = DATAPATH + "im_dataset.csv"
dataset = make_features_counts(DATAPATH, clean_file, data_file, counts_file,
allPairs)
score(dataset, name="mini-counts, cosine, entropy of max cat", classifiers=classifiers)
print ("Created image dataset at", data_file)
return data_file | 609072c6e2deb03207008a4a1e79dfedf50f197d | 25,179 |
def ht(x):
"""ht(x)
Evaluates the heaviside function
Args:
x: Domain points
Returns:
ht(x): Heaviside function evaluated over the domain x
"""
g = np.ones_like(x)
for i in range(np.size(x)-1):
if x[i] < 0:
g[i] = 0
elif x[i] > 0:
g[i] = 1
elif x[i] == 0:
g[i] = .5
return g | b109a72a6fd57e088327cc1fa1d9d70950b1860a | 25,180 |
def xoGkuXokhXpZ():
"""Package link to class."""
pkg = Package("pkg")
return pkg.circles.simple_class.Foo | 500832ece1987a726812350faf72130de65f37a0 | 25,181 |
def create_all_pts_within_observation_window(observation_window_hours) -> str:
"""
create a view of all patients within observation window
return the view name
"""
view_name = f"default.all_pts_{observation_window_hours}_hours"
query = f"""
CREATE OR REPLACE VIEW {view_name} AS
WITH admits AS (
SELECT
admits.subject_id,
admits.hadm_id,
admits.admittime,
admits.admittime + interval %(time_window_hours)s hour index_date,
CASE WHEN admits.deathtime <= (admits.admittime + interval %(time_window_hours)s hour) THEN 1 ELSE 0 END AS death_during_obs_win,
CASE WHEN admits.dischtime <= (admits.admittime + interval %(time_window_hours)s hour) THEN 1 ELSE 0 END AS disch_during_obs_win
FROM mimiciii.admissions admits
)
SELECT
admits.subject_id,
admits.hadm_id,
admits.index_date,
admits.admittime
FROM admits
WHERE
admits.death_during_obs_win != 1
and admits.disch_during_obs_win != 1
order by random()
--limit 1000
"""
params = {
'time_window_hours': str(observation_window_hours)
}
cursor.execute(query, params)
return view_name | f711ac343815b9adc3b07e833ae8ee31cd07a125 | 25,182 |
def get_signature(data, raw_labels):
"""
Should return a 4 x z* matrix, where z* is the number of classes in the
labels matrix.
"""
labels = raw_labels.reset_index()
pca = decomposition.PCA(n_components=2)
lle = manifold.LocallyLinearEmbedding(n_components=2)
X_pca = pd.DataFrame(pca.fit_transform(data))
X_lle = pd.DataFrame(lle.fit_transform(data))
class_no = np.shape(labels[0].unique())[0]
S = np.zeros([4,class_no])
for a in labels[0].unique():
this_pca = X_pca.loc[labels.loc[labels[0]==a].index]
this_lle = X_lle.loc[labels.loc[labels[0]==a].index]
S[0,a] = this_pca[0].mean()
S[1,a] = this_pca[1].mean()
S[2,a] = this_lle[0].mean()
S[3,a] = this_lle[1].mean()
return S | eefd7f5e682ad25bb31989d118747691f4cc64f0 | 25,183 |
def get_xyz_where(Z, Cond):
"""
Z and Cond are MxN matrices. Z are data and Cond is a boolean
matrix where some condition is satisfied. Return value is x,y,z
where x and y are the indices into Z and z are the values of Z at
those indices. x,y,z are 1D arrays
"""
X,Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond] | b1e1b2144e44f292dc6e2c5e917cb7511bdbf288 | 25,184 |
def retrieve_seq_length(data):
"""compute the length of a sequence. 0 are masked.
Args:
data: input sequence
Returns:
a `int`, length of the sequence
"""
with tf.name_scope('GetLength'):
used = tf.sign(tf.reduce_max(tf.abs(data), axis=2))
length = tf.reduce_sum(used, axis=1)
length = tf.cast(length, tf.int32)
return length | ba6cb7ac9e9cc63311a6194e55b30ffa02fb3bc7 | 25,185 |
def get_census_params(variable_ids, county_level=False):
"""Gets census url params to make an API call.
variable_ids: The ids of the variables to request. Automatically includes
NAME.
county_level: Whether to request at the county level, or the state level."""
keys = variable_ids.copy()
keys.append("NAME")
params = {"get": ",".join(keys)}
params["for"] = "county:*" if county_level else "state:*"
return params | b24204c8e9ef82575b54151bdc0ac98de0fb7fc0 | 25,186 |
def lookupName(n, names):
"""Check if name is in list of names
Parameters
----------
n : str
Name to check
names : list
List of names to check in
Returns
-------
bool
Flag denoting if name has been found in list (True) or not (False)
"""
if n in names:
return True
else:
return False | 0fbb97e252f5daf9de52a946c206fa74395b01c6 | 25,187 |
def calculate_appointments(new_set, old_set):
"""
Calculate different appointment types.
Used for making useful distinctions in the email message.
new_set will be the fresh set of all available appointments at a given interval
old_set will the previous appointments variable getting passed in.
Ex1: Addition of HONEOYE
new_set = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'}
old_set = {'LIVERPOOL', 'BROOKLYN', 'KINGSTON'}
returns ->->
new_appointments = {'HONEOYE'}
all_appointments = {'LIVERPOOL', 'BROOKLYN', 'KINGSTON', HONEOYE}
Ex2: No Changes
new_set = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'}
old_set = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'}
returns ->->
new_appointments = set() (empty set)
all_appointments = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'}
"""
new_appointments = new_set.difference(old_set) # set of All appointments minus set of Old appointments yields the set of New appointments
old_appointments = new_set.intersection(old_set) # New intersect Old. yields those appointments that (intersect is equivalent the overlap in a venn diagram)
return new_appointments, old_appointments # Return new sets | b54735293ba910e2b310e55e263e2611863d088a | 25,188 |
from typing import Callable
import logging
def log(func: Callable[..., RT]) -> Callable[..., RT]:
"""logs entering and exiting functions for debugging."""
logger = logging.getLogger(func.__module__)
@wraps(func)
def wrapper(*args, **kwargs) -> RT:
logger.debug("Entering: %s", func.__name__)
result = func(*args, **kwargs)
# logger.debug(repr(result))
logger.debug("Exiting: %s", func.__name__)
return result
return wrapper | a83d691c86f92231bb78affe0e383535f4c3dd95 | 25,189 |
def transaksi_hari_ini():
"""
used in: app_kasir/statistik.html
"""
return Transaksi.objects.filter(
tanggal_transaksi__year=timezone.now().year,
tanggal_transaksi__month=timezone.now().month,
tanggal_transaksi__day=timezone.now().day
).count() | a04e835be4cc495b09e1d7ae93ed141315168a81 | 25,190 |
def extractWindows(signal, window_size=10, return_window_indices=False):
""" Reshape a signal into a series of non-overlapping windows.
Parameters
----------
signal : numpy array, shape (num_samples,)
window_size : int, optional
return_window_indices : bool, optional
Returns
-------
windows : numpy array, shape (num_windows, window_size)
window_indices : numpy array of int, shape (num_windows, window_size)
"""
tail_len = signal.shape[0] % window_size
pad_arr = m.np.full(window_size - tail_len, m.np.nan)
signal_padded = m.np.concatenate((signal, pad_arr))
windows = signal_padded.reshape((-1, window_size))
if not return_window_indices:
return windows
indices = m.np.arange(signal_padded.shape[0])
window_indices = indices.reshape((-1, window_size))
return windows, window_indices | 2d9b319325dc1be9a92766c093db12c2e1f24123 | 25,191 |
def add(left: int, right: int):
"""
add up two numbers.
"""
print(left + right)
return 0 | 75d7bd10cfdfb38211f6faf838b5e200e8593693 | 25,192 |
import argparse
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Move retrieval to a '
'different directory.')
parser.add_argument('retrieval_id', help='the id of the retrieval '
'to move', type=int)
parser.add_argument('-d', '--directory', help='the new top-level directory '
' for the DRS structure (default: %(default)s)',
default=NEW_BASE_OUTPUT_DIR)
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args | a7f536db32f4bbe8270af976b3e1e058d880a705 | 25,193 |
import random
def rand_x_digit_num(x):
"""Return an X digit number, leading_zeroes returns a string, otherwise int."""
return '{0:0{x}d}'.format(random.randint(0, 10**x-1), x=x) | b46864143ca6186ebeede6c687a85d1b585e70db | 25,194 |
def gen_workflow_steps(step_list):
"""Generates a table of steps for a workflow
Assumes step_list is a list of dictionaries with 'task_id' and 'state'
"""
steps = format_utils.table_factory(field_names=['Steps', 'State'])
if step_list:
for step in step_list:
steps.add_row([step.get('task_id'), step.get('state')])
else:
steps.add_row(['None', ''])
return format_utils.table_get_string(steps) | d01dc1937dc17e3d8b30390ccd1ea460391a7492 | 25,195 |
import os
def commonprefix(items):
"""Get common prefix for completions
Return the longest common prefix of a list of strings, but with special
treatment of escape characters that might precede commands in IPython,
such as %magic functions. Used in tab completion.
For a more general function, see os.path.commonprefix
"""
# the last item will always have the least leading % symbol
# min / max are first/last in alphabetical order
first_match = ESCAPE_RE.match(min(items))
last_match = ESCAPE_RE.match(max(items))
# common suffix is (common prefix of reversed items) reversed
if first_match and last_match:
prefix = os.path.commonprefix((first_match.group(0)[::-1], last_match.group(0)[::-1]))[::-1]
else:
prefix = ''
items = [s.lstrip(ESCAPE_CHARS) for s in items]
return prefix+os.path.commonprefix(items) | 4a93798b417487ac30dd1ee11936963f538f2d08 | 25,196 |
from typing import Union
from typing import Any
def sround(x: Union[np.ndarray, float, list, tuple], digits: int=1) -> Any:
""" 'smart' round to largest `digits` + 1
Args
x (float, list, tuple, ndarray)
digits (int [1]) number of digits beyond highest
Examples
>>> sround(0.0212343, 2) # result 0.0212
"""
if isinstance(x, (float, np.float64, np.float32)):
safelog10 = lambda x: 0.0 if not x else np.log10(np.abs(x))
_sround = lambda x, d=1: np.round(x, max((-np.floor(safelog10(x)).astype(int) + digits), 0))
return _sround(x, digits)
_as_tuple = False
if isinstance(x, tuple):
x = list(x)
_as_tuple = True
elif isinstance(x, (list, np.ndarray)):
safelog10 = np.log10(np.abs(x))
safelog10[np.abs(safelog10) == np.inf] = 0
digits = np.maximum(-np.floor(safelog10).astype(int) + digits, 0)
for i in range(len(x)):
x[i] = np.round(x[i], digits[i])
if _as_tuple:
x = tuple(x)
return x | a695546c46d4bbd41b481d7b58d879bcd4d53247 | 25,197 |
def element_png_display(element, max_frames):
"""
Used to render elements to PNG if requested in the display formats.
"""
if 'png' not in Store.display_formats:
return None
info = process_object(element)
if info:
IPython.display.display(IPython.display.HTML(info))
return
backend = Store.current_backend
if type(element) not in Store.registry[backend]:
return None
renderer = Store.renderers[backend]
# Current renderer does not support PNG
if 'png' not in renderer.params('fig').objects:
return None
data, info = renderer(element, fmt='png')
return data | 273d19194c467d5596f99626bbe01e53005bee17 | 25,198 |
def list_documents(connection, name: str = None, to_dictionary: bool = False,
to_dataframe: bool = False, limit: int = None, **filters):
"""Get all Documents available in the project specified within the
`connection` object.
Args:
connection(object): MicroStrategy connection object returned
by 'connection.Connection()'
name: exact name of the document to list
to_dictionary(bool, optional): if True, return Documents as
list of dicts
to_dataframe(bool, optional): if True, return Documents as
pandas DataFrame
limit: limit the number of elements returned to a sample of documents.
If `None`, all objects are returned.
**filters: Available filter parameters: ['name', 'id', 'type',
'subtype', 'date_created', 'date_modified', 'version', 'acg',
'owner', 'ext_type', 'view_media', 'certified_info', 'project_id']
Returns:
List of documents.
"""
# TODO: consider adding Connection.project_selected attr/method
if connection.project_id is None:
raise ValueError("Please log into a specific project to load documents within it. "
f"To load all documents across the whole environment use {list_documents_across_projects.__name__} function")
return Document._list_all(connection, to_dictionary=to_dictionary,
name=name, limit=limit,
to_dataframe=to_dataframe, **filters) | 383e74177fcc7eefb03ac3aa96ceb232685bb9ac | 25,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.