content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def paradigm_filler(shared_datadir) -> ParadigmFiller:
"""
hese layout, paradigm, and hfstol files are **pinned** test data;
the real files in use are hosted under res/ folder, and should not
be used in tests!
"""
return ParadigmFiller(
shared_datadir / "layouts",
shared_datadir / "crk-normative-generator.hfstol",
)
|
e20ca1eebfeca8b4d54d87a02f723e21fd54c3bf
| 3,644,700
|
def apply_scaling(data, dicom_headers):
"""
Rescale the data based on the RescaleSlope and RescaleOffset
Based on the scaling from pydicomseries
:param dicom_headers: dicom headers to use to retreive the scaling factors
:param data: the input data
"""
# Apply the rescaling if needed
private_scale_slope_tag = Tag(0x2005, 0x100E)
private_scale_intercept_tag = Tag(0x2005, 0x100D)
if 'RescaleSlope' in dicom_headers or 'RescaleIntercept' in dicom_headers \
or private_scale_slope_tag in dicom_headers or private_scale_intercept_tag in dicom_headers:
rescale_slope = 1
rescale_intercept = 0
if 'RescaleSlope' in dicom_headers:
rescale_slope = dicom_headers.RescaleSlope
if 'RescaleIntercept' in dicom_headers:
rescale_intercept = dicom_headers.RescaleIntercept
# try:
# # this section can sometimes fail due to unknown private fields
# if private_scale_slope_tag in dicom_headers:
# private_scale_slope = float(dicom_headers[private_scale_slope_tag].value)
# if private_scale_slope_tag in dicom_headers:
# private_scale_slope = float(dicom_headers[private_scale_slope_tag].value)
# except:
# pass
return do_scaling(data, rescale_slope, rescale_intercept)
else:
return data
|
13bc94058aa725b59f017fc069408a9d279cd933
| 3,644,701
|
from itertools import product
def allowed_couplings(coupling, flow, free_id, symmetries):
"""Iterator over all the allowed Irreps for free_id in coupling if the
other two couplings are fixed.
"""
if len(coupling) != 3:
raise ValueError(f'len(coupling) [{len(coupling)}] != 3')
if len(flow) != 3:
raise ValueError(f'len(flow) [{len(flow)}] != 3')
other_ids = [0, 1, 2]
other_ids.remove(free_id)
other_c = [coupling[o] for o in other_ids]
other_f = [flow[o] for o in other_ids]
this_f = flow[free_id]
def fermionic_constraint(oirr, oflow, tflow):
yield sum(oirr) % 2
def U1_constraint(oirr, oflow, tflow):
sign = {True: 1, False: -1}
yield sign[not tflow] * sum(sign[f] * x for x, f in zip(oirr, oflow))
def pg_constraint(oirr, oflow, tflow):
yield oirr[0] ^ oirr[1]
def SU2_constraint(oirr, oflow, tflow):
return range(abs(oirr[0] - oirr[1]), oirr[0] + oirr[1] + 1, 2)
constraint = {
'fermionic': fermionic_constraint,
'U(1)': U1_constraint,
'SU(2)': SU2_constraint,
'seniority': U1_constraint,
'C1': pg_constraint,
'Ci': pg_constraint,
'C2': pg_constraint,
'Cs': pg_constraint,
'D2': pg_constraint,
'C2v': pg_constraint,
'C2h': pg_constraint,
'D2h': pg_constraint
}
for ncoupling in product(*[constraint[s](c, other_f, this_f)
for *c, s in zip(*other_c, symmetries)]):
yield ncoupling
|
1e2d71edc68b8ecebfa3e09eae17e17a381d82b4
| 3,644,702
|
def poly_iou(poly1, poly2, thresh=None):
"""Compute intersection-over-union for two GDAL/OGR geometries.
Parameters
----------
poly1:
First polygon used in IOU calc.
poly2:
Second polygon used in IOU calc.
thresh: float or None
If not provided (default), returns the float IOU for the two polygons.
If provided, return True if the IOU met this threshold. Otherwise,
False.
Returns
-------
IOU: float or bool
Return the IOU value if `thresh` is None, otherwise boolean if the
threshold value was met.
"""
poly1 = ogr.CreateGeometryFromWkb(poly1)
poly2 = ogr.CreateGeometryFromWkb(poly2)
if not poly1.Intersects(poly2):
return False
intersection_area = poly1.Intersection(poly2).Area()
#intersection_area = intersection.Area()
union_area = poly1.Union(poly2).Area()
#union_area = union.Area()
# If threshold was provided, return if IOU met the threshold
if thresh is not None:
return (intersection_area / union_area) >= thresh
return intersection_area / union_area
|
c033e654144bb89093edac049b0dfcd4cdec3d1a
| 3,644,703
|
def score_file(filename):
"""Score each line in a file and return the scores."""
# Prepare model.
hparams = create_hparams()
encoders = registry.problem(FLAGS.problem).feature_encoders(FLAGS.data_dir)
has_inputs = "inputs" in encoders
# Prepare features for feeding into the model.
if has_inputs:
inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension.
batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D.
targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension.
batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D.
if has_inputs:
features = {"inputs": batch_inputs, "targets": batch_targets}
else:
features = {"targets": batch_targets}
# Prepare the model and the graph when model runs on features.
model = registry.model(FLAGS.model)(hparams, tf.estimator.ModeKeys.EVAL)
_, losses = model(features)
saver = tf.train.Saver()
with tf.Session() as sess:
# Load weights from checkpoint.
if FLAGS.checkpoint_path is None:
ckpts = tf.train.get_checkpoint_state(FLAGS.output_dir)
ckpt = ckpts.model_checkpoint_path
else:
ckpt = FLAGS.checkpoint_path
saver.restore(sess, ckpt)
# Run on each line.
with tf.gfile.Open(filename) as f:
lines = f.readlines()
results = []
for line in lines:
tab_split = line.split("\t")
if len(tab_split) > 2:
raise ValueError("Each line must have at most one tab separator.")
if len(tab_split) == 1:
targets = tab_split[0].strip()
else:
targets = tab_split[1].strip()
inputs = tab_split[0].strip()
# Run encoders and append EOS symbol.
targets_numpy = encoders["targets"].encode(
targets) + [text_encoder.EOS_ID]
if has_inputs:
inputs_numpy = encoders["inputs"].encode(inputs) + [text_encoder.EOS_ID]
# Prepare the feed.
if has_inputs:
feed = {inputs_ph: inputs_numpy, targets_ph: targets_numpy}
else:
feed = {targets_ph: targets_numpy}
# Get the score.
np_loss = sess.run(losses["training"], feed)
results.append(np_loss)
return results
|
62cac29e72bf5e7651cd8a258fb4151b6d7e1ff4
| 3,644,704
|
import responses
from renku.core.commands.providers.dataverse import DATAVERSE_API_PATH, DATAVERSE_VERSION_API
from renku.core.commands.providers.doi import DOI_BASE_URL
import json
import re
import urllib
import pathlib
def doi_responses():
"""Responses for doi.org requests."""
with responses.RequestsMock(assert_all_requests_are_fired=False) as response:
def doi_callback(request):
response_url = "https://dataverse.harvard.edu/citation" "?persistentId=doi:10.11588/data/yyxx1122"
if "zenodo" in request.url:
response_url = "https://zenodo.org/record/3363060"
return (
200,
{"Content-Type": "application/json"},
json.dumps(
{
"type": "dataset",
"id": request.url,
"author": [{"family": "Doe", "given": "John"}],
"contributor": [{"contributorType": "ContactPerson", "family": "Doe", "given": "John"}],
"issued": {"date-parts": [[2019]]},
"abstract": "Test Dataset",
"DOI": "10.11588/data/yyxx1122",
"publisher": "heiDATA",
"title": "dataset",
"URL": response_url,
}
),
)
response.add_callback(
method="GET", url=re.compile("{base_url}/.*".format(base_url=DOI_BASE_URL)), callback=doi_callback
)
def version_callback(request):
return (
200,
{"Content-Type": "application/json"},
json.dumps({"status": "OK", "data": {"version": "4.1.3", "build": "abcdefg"}}),
)
base_url = "https://dataverse.harvard.edu"
url_parts = list(urllib.parse.urlparse(base_url))
url_parts[2] = pathlib.posixpath.join(DATAVERSE_API_PATH, DATAVERSE_VERSION_API)
pattern = "{url}.*".format(url=urllib.parse.urlunparse(url_parts))
response.add_callback(method="GET", url=re.compile(pattern), callback=version_callback)
yield response
|
8e467910f2d9ad4df06ff0ecb11c0812e7dc3bb5
| 3,644,705
|
def lat_lng_to_tile_xy(latitude, longitude, level_of_detail):
"""gives you zxy tile coordinate for given latitude, longitude WGS-84 coordinates (in decimal degrees)
"""
x, y = lat_lng_to_pixel_xy(latitude, longitude, level_of_detail)
return pixel_xy_to_tile_xy(x, y)
|
99e2a4b3d9dee41222b434768bed5501e7561a40
| 3,644,706
|
def update_inverse_jacobian(previous_inv_jac, dx, df, threshold=0, modify_in_place=True):
"""
Use Broyden method (following Numerical Recipes in C, 9.7) to update inverse Jacobian
current_inv_jac is previous inverse Jacobian (n x n)
dx is delta x for last step (n)
df is delta errors for last step (n)
"""
dot_dx_inv_j = np.dot(dx, previous_inv_jac)
denom = np.dot(dot_dx_inv_j, df)
if abs(threshold) <= 0:
threshold = MIN_DENOM
if abs(denom) < threshold:
return previous_inv_jac, False
if modify_in_place:
previous_inv_jac += np.outer((dx - np.dot(previous_inv_jac, df)), dot_dx_inv_j) / denom
result = previous_inv_jac
else:
result = previous_inv_jac + np.outer((dx - np.dot(previous_inv_jac, df)), dot_dx_inv_j) / denom
return result, True
|
4f6a0e3e3bdc25132fae2aa1df9d0bbcdd73c3b1
| 3,644,707
|
def _ConvertStack(postfix):
"""Convert postfix stack to infix string.
Arguments:
postfix: A stack in postfix notation. The postfix stack will be modified
as elements are being popped from the top.
Raises:
ValueError: There are not enough arguments for functions/operators.
Returns:
A string of the infix represetation of the stack.
"""
if not postfix:
raise bigquery_client.BigqueryInvalidQueryError(
'Not enough arguments.', None, None, None)
top = postfix.pop()
if isinstance(top, util.OperatorToken):
args = []
for unused_i in range(top.num_args):
args.append(_ConvertStack(postfix))
args.reverse()
if top.num_args == 1:
return '%s %s' % (str(top), args[0])
else:
return '(%s %s %s)' % (args[0], str(top), args[1])
elif isinstance(top, util.BuiltInFunctionToken):
func_name = str(top)
if func_name in _ZERO_ARGUMENT_FUNCTIONS:
return '%s()' % func_name
elif func_name in _ONE_ARGUMENT_FUNCTIONS:
op = _ConvertStack(postfix)
return '%s(%s)' % (func_name, op)
elif func_name in _TWO_ARGUMENT_FUNCTIONS:
op2 = _ConvertStack(postfix)
op1 = _ConvertStack(postfix)
return '%s(%s, %s)' % (top, op1, op2)
elif func_name in _THREE_ARGUMENT_FUNCTIONS:
op3 = _ConvertStack(postfix)
op2 = _ConvertStack(postfix)
op1 = _ConvertStack(postfix)
return '%s(%s, %s, %s)' % (top, op1, op2, op3)
else:
raise bigquery_client.BigqueryInvalidQueryError(
'Function %s does not exist.' % str(top), None, None, None)
elif isinstance(top, util.AggregationFunctionToken):
num_args = top.num_args
func_name = str(top)
ops = []
for unused_i in range(int(num_args)):
ops.append(_ConvertStack(postfix))
ops.reverse()
if func_name == 'DISTINCTCOUNT':
func_name = 'COUNT'
ops[0] = 'DISTINCT ' + ops[0]
ops = [str(op) for op in ops]
return func_name + '(' + ', '.join(ops) + ')'
elif not isinstance(top, basestring):
return str(top)
else:
return top
|
d69f4a503a84efedbf623cca24c496f5540d1b77
| 3,644,708
|
import re
def count_repeats_for_motif(seq, motif, tally, intervals=None):
"""
seq --- plain sequence to search for the repeats (motifs)
motif --- plain sequence of repeat, ex: CGG, AGG
intervals --- 0-based start, 1-based end of Intervals to search motif in
"""
if intervals is None: # use the whole sequence
intervals = [Interval(0, len(seq))]
new_intl = []
for intl in intervals:
cur = seq[intl.start:intl.end]
prev_end = intl.start
found_flag = False
for m in re.finditer(motif, cur):
tally[motif].append(intl.start + m.start())
if m.start() > prev_end:
# new interval is prev_end (0-based), m.start() (1-based)
new_intl.append(Interval(prev_end, intl.start + m.start()))
prev_end = intl.start + m.end()
found_flag = True
if not found_flag:
new_intl.append(intl)
return new_intl
|
2a29339555374aaeb70ea07872a81a56050a9f36
| 3,644,709
|
import requests
import re
def get_oauth2_token(session: requests.Session, username: str, password: str):
"""Hackily get an oauth2 token until I can be bothered to do this correctly"""
params = {
'client_id': OAUTH2_CLIENT_ID,
'response_type': 'code',
'access_type': 'offline',
'redirect_uri': OAUTH2_REDIRECT_URI,
}
r1 = session.get(f'{LOGIN_URL}/oauth2/auth', params=params)
email_regex = (
r'^\s*(\w+(?:(?:-\w+)|(?:\.\w+)|(?:\+\w+))*\@'
r'[A-Za-z0-9]+(?:(?:\.|-)[A-Za-z0-9]+)*\.[A-Za-z0-9][A-Za-z0-9]+)\s*$'
)
clean_username = re.sub(email_regex, r'\1', username)
etr = etree.HTML(r1.text)
post_data = {
i.attrib['name']: i.attrib['value']
for i in etr.xpath("//form[@id = 'frmsignin']//input")
if 'value' in i.keys()
}
post_data['username'] = clean_username
post_data['password'] = password
r2 = session.post(f'{LOGIN_URL}/oauth2/g_authenticate', data=post_data, allow_redirects=False)
code = parse_qs(urlparse(r2.headers['Location']).query)['code'][0]
r3 = session.post(
f'{LOGIN_URL}/oauth2/token',
data={
'code': code,
'client_id': OAUTH2_CLIENT_ID,
'client_secret': OAUTH2_CLIENT_SECRET,
'redirect_uri': OAUTH2_REDIRECT_URI,
'grant_type': 'authorization_code',
}, auth=(OAUTH2_CLIENT_ID, OAUTH2_CLIENT_SECRET)
)
oauth_token = r3.json()
try:
session.headers.update({'Authorization': 'Bearer ' + oauth_token['access_token']})
except KeyError:
# TODO: make this better
raise GeAuthError(f'Failed to get a token: {oauth_token}')
return oauth_token
|
1361f76cdba195e43ba77ee756ef434ee6dab991
| 3,644,710
|
def press_level(pressure, heights, plevels, no_time=False):
"""
Calculates geopotential heights at a given pressure level
Parameters
----------
pressure : numpy.ndarray
The 3-D pressure field (assumes time dimension, turn off
with `no_time=True`)
heights : numpy.ndarray
The 3-D array of gridbox heights
plevels : list
List of pressure levels to interpolate to
no_time=False: bool
Optional, set to `True` to indicate lack of time dimension.
Returns
-------
press_height : numpy.ndarray
The geopotential heights at the specified pressure levels
"""
if no_time is False:
try:
tlen, zlen, ylen, xlen = pressure.shape
press_height = np.zeros((tlen, ylen, xlen))
for t in range(0, tlen):
for x in range(0, xlen):
for y in range(0, ylen):
press_height[t, y, x] =\
log_interpolate_1d(plevels, pressure[t, :, y, x],
heights[:, y, x])
except ValueError:
print("Error in dimensions, trying with no_time=True")
no_time = True
elif no_time is True:
try:
xlen, ylen, xlen = pressure.shape
press_height = np.zeros((ylen, xlen))
for x in range(0, xlen):
for y in range(0, ylen):
press_height[t, y, x] =\
log_interpolate_1d(plevels, pressure[t, :, y, x],
heights[:, y, x])
except ValueError:
print("Error in dimensions")
return press_height
|
7cae6fe91eb1f6ad4171006633d04744909849c5
| 3,644,711
|
def valtoindex(thearray, thevalue, evenspacing=True):
"""
Parameters
----------
thearray: array-like
An ordered list of values (does not need to be equally spaced)
thevalue: float
The value to search for in the array
evenspacing: boolean, optional
If True (default), assume data is evenly spaced for faster calculation.
Returns
-------
closestidx: int
The index of the sample in thearray that is closest to val
"""
if evenspacing:
limval = np.max([thearray[0], np.min([thearray[-1], thevalue])])
return int(np.round((limval - thearray[0]) / (thearray[1] - thearray[0]), 0))
else:
return (np.abs(thearray - thevalue)).argmin()
|
5540023c77b544fbd91a724badf467981a0e0a5c
| 3,644,712
|
def get_converter(result_format, converters=None):
"""
Gets an converter, returns the class and a content-type.
"""
converters = get_default_converters() if converters is None else converters
if result_format in converters:
return converters.get(result_format)
else:
raise ValueError('No converter found for type {}'.format(result_format))
|
79ce7a728fb801922d672716aeb77dc76e270194
| 3,644,713
|
import ast
def _deduce_ConstantArray(
self: ast.ConstantArray, ctx: DeduceCtx) -> ConcreteType: # pytype: disable=wrong-arg-types
"""Deduces the concrete type of a ConstantArray AST node."""
# We permit constant arrays to drop annotations for numbers as a convenience
# (before we have unifying type inference) by allowing constant arrays to have
# a leading type annotation. If they don't have a leading type annotation,
# just fall back to normal array type inference, if we encounter a number
# without a type annotation we'll flag an error per usual.
if self.type_ is None:
return _deduce_Array(self, ctx)
# Determine the element type that corresponds to the annotation and go mark
# any un-typed numbers in the constant array as having that type.
concrete_type = deduce(self.type_, ctx)
if not isinstance(concrete_type, ArrayType):
raise TypeInferenceError(
self.type_.span, concrete_type,
f'Annotated type for array literal must be an array type; got {concrete_type.get_debug_type_name()} {self.type_}'
)
element_type = concrete_type.get_element_type()
for member in self.members:
assert ast.is_constant(member)
if isinstance(member, ast.Number) and not member.type_:
ctx.type_info[member] = element_type
_check_bitwidth(member, element_type)
# Use the base class to check all members are compatible.
_deduce_Array(self, ctx)
return concrete_type
|
91b9966bc5f0fd1254551e450593b8f8b669baad
| 3,644,714
|
def _to_bool(s):
"""Convert a value into a CSV bool."""
if s.lower() == 'true':
return True
elif s.lower() == 'false':
return False
else:
raise ValueError('String cannot be converted to bool')
|
3f6c31a07e7ba054e5c52f9d3c09fdd2f004fec5
| 3,644,715
|
def register(*args, cache_default=True):
"""
Registers function for further caching its calls and restoring source.
Example:
``` python
@register
def make_ohe_pclass(df):
...
```
"""
def __register(func):
# if source_utils.source_is_saved(func) and not source_utils.matches_cache(func):
if func.__name__ + '_fc' in cache.cached_objs() and source_utils.get_source(func) != cache.load_obj(func.__name__ + '_fc').source:
raise NameError("A function with the same name is already registered")
if func.__name__ + '_fc' in cache.cached_objs():
return cache.load_obj(func.__name__ + '_fc')
else:
functor = FeatureConstructor(func, cache_default)
cache.cache_obj(functor, functor.__name__ + '_fc')
return functor
if args:
function = args[0]
return __register(function)
else:
return __register
|
41610d7f3463088f29125fe335a04b9b0292b74f
| 3,644,716
|
import re
def make_absolute_paths(content):
"""Convert all MEDIA files into a file://URL paths in order to
correctly get it displayed in PDFs."""
overrides = [
{
'root': settings.MEDIA_ROOT,
'url': settings.MEDIA_URL,
},
{
'root': settings.STATIC_ROOT,
'url': settings.STATIC_URL,
}
]
has_scheme = re.compile(r'^[^:/]+://')
for x in overrides:
if not x['url'] or has_scheme.match(x['url']):
continue
if not x['root'].endswith('/'):
x['root'] += '/'
occur_pattern = '''(["|']{0}.*?["|'])'''
occurences = re.findall(occur_pattern.format(x['url']), content)
occurences = list(set(occurences)) # Remove dups
for occur in occurences:
content = content.replace(occur, '"%s"' % (
pathname2fileurl(x['root']) +
occur[1 + len(x['url']): -1]))
return content
|
4632513f73bf49ec6d1acfef15d632ee980ab345
| 3,644,717
|
def social_distancing_start_40():
"""
Real Name: b'social distancing start 40'
Original Eqn: b'31'
Units: b'Day'
Limits: (None, None)
Type: constant
b''
"""
return 31
|
c874afed46a8303ec2d3ad0d571183ddc30059a0
| 3,644,718
|
import argparse
def parse_argv(argv):
"""
parse argv
"""
psr = argparse.ArgumentParser(prog=argv[0])
psr.add_argument("--users-csv", default="users.csv",
help=("a csv file describing directories to monitor, which at minimum must have a column 'notebooks'."
" they are typically notebooks/ directories of students (default: users.csv)."))
psr.add_argument("--dest", default="sync_dest",
help=("the directory into which directories are copied (default: ./sync_dest)"))
psr.add_argument("--log-dir", default="sync_logs",
help=("the directory into which rsync logs are stored (default: ./sync_logs)"))
psr.add_argument("--db", default="sync.sqlite",
help=("sqlite3 database to which all file histories are stored (default: sync.sqlite)"))
psr.add_argument("--repeat", default=-1, type=int,
help=("the number of times directories are copied."
" after this number of times, the program quits."
" negative numbers indicate forever (default: -1)."))
psr.add_argument("--overhead", default=0.05, type=float,
help=("the maximum CPU usage of this program (default: 0.05)."
" if this value is 0.05, it makes sure the program uses 5%% worth of a single core."
" it adjusts the overhead by adjusting the interval between two consecutive copies."))
psr.add_argument("--min-sleep", default=300.0, type=float,
help=("the minimum interval between two consecutive copies."))
psr.add_argument("--no-sudo", action="store_true",
help=("if given, sudo is not used"))
psr.add_argument("--replay-log", action="store_true",
help=("mainly used for debugging. if given, it does not look"
" at the actual user files. it instead looks at the log directory (--log-dir) and"
" and reconstruct the database solely based on the log."))
opt = psr.parse_args(argv[1:])
return opt
|
750c9ffd47e7e4a00c11292f78c154da93eda246
| 3,644,719
|
def get_token_auth_header(params):
"""
Obtains the Access Token from the Authorization Header
"""
auth = get_token(params)
parts = auth.split()
if parts[0].lower() != "bearer":
raise AuthError({"code": "invalid_header", "description": "Authorization header must start with Bearer"}, 401)
if len(parts) == 1:
raise AuthError({"code": "invalid_header", "description": "Token not found"}, 401)
if len(parts) > 2:
raise AuthError({"code": "invalid_header", "description": "Authorization header must be Bearer token"}, 401)
token = parts[1]
return token
|
c48a2306ea76b1b5f611194eb33fa13e40f0e155
| 3,644,720
|
from typing import Optional
def check_hu(base: str, add: Optional[str] = None) -> str:
"""Check country specific VAT-Id"""
weights = (9, 7, 3, 1, 9, 7, 3)
s = sum(int(c) * w for (c, w) in zip(base, weights))
r = s % 10
if r == 0:
return '0'
else:
return str(10 - r)
|
48f1043eeede4ea0b04eb71685f19901da495195
| 3,644,721
|
import requests
from bs4 import BeautifulSoup
def scrape_headline(news_link):
"""
function to scrape the headlines from a simple news website
:return: a dictionary with key as html link of the source and
value as the text in the headline of the news in the html link
"""
#Headlines
#URL = 'https://lite.cnn.com/en'
page = requests.get(news_link)
soup = BeautifulSoup(page.content, 'html.parser')
daily_news_headline_dict = myDict()
for link in soup.find_all('a'):
key = 'https://lite.cnn.com'+link.get('href')
text = cleantext.create_cleanerDoc(link.get_text('href'))
daily_news_headline_dict.add(key, text)
#print(daily_news_headline_dict)
return daily_news_headline_dict
|
f87453a925ace26a3f848c0ed380b6e4ab7030a7
| 3,644,722
|
def read_xml(img_path):
"""Read bounding box from xml
Args:
img_path: path to image
Return list of bounding boxes
"""
anno_path = '.'.join(img_path.split('.')[:-1]) + '.xml'
tree = ET.ElementTree(file=anno_path)
root = tree.getroot()
ObjectSet = root.findall('object')
bboxes = []
for object in ObjectSet:
box = object.find('bndbox')
x1 = int(box.find('xmin').text)
y1 = int(box.find('ymin').text)
x2 = int(box.find('xmax').text)
y2 = int(box.find('ymax').text)
bb = [x1, y1, x2, y2]
bboxes.append(bb)
return bboxes
|
7102edccb5258d88b67476770123e54e1b75a5c1
| 3,644,723
|
def a2funcoff(*args):
"""a2funcoff(ea_t ea, char buf) -> char"""
return _idaapi.a2funcoff(*args)
|
0cac71a4e071bf99bf6777fc35002e48099ecc46
| 3,644,724
|
def str(obj):
"""This function can be used as a default `__str__()` in user-defined classes.
Classes using this should provide an `__info__()` method, otherwise the `default_info()`
function defined in this module is used.
"""
info_func = getattr(type(obj), "__info__", default_info)
return "{}({})".format(type(obj).__name__, info_func(obj))
|
651e6f3e047a8f7583a39d337d202c09934bc37a
| 3,644,725
|
from typing import Union
from typing import Optional
from typing import Sequence
from typing import Any
def isin_strategy(
pandera_dtype: Union[numpy_engine.DataType, pandas_engine.DataType],
strategy: Optional[SearchStrategy] = None,
*,
allowed_values: Sequence[Any],
) -> SearchStrategy:
"""Strategy to generate values within a finite set.
:param pandera_dtype: :class:`pandera.dtypes.DataType` instance.
:param strategy: an optional hypothesis strategy. If specified, the
pandas dtype strategy will be chained onto this strategy.
:param allowed_values: set of allowable values.
:returns: ``hypothesis`` strategy
"""
if strategy is None:
return pandas_dtype_strategy(
pandera_dtype, st.sampled_from(allowed_values)
)
return strategy.filter(lambda x: x in allowed_values)
|
aecf05b269b7f89b6fea0b5bfbbc98e51d4caddb
| 3,644,726
|
def arrToDict(arr):
"""
Turn an array into a dictionary where each value maps to '1'
used for membership testing.
"""
return dict((x, 1) for x in arr)
|
3202aac9a6c091d7c98fd492489dbcf2300d3a02
| 3,644,727
|
def getPercentGC(img, nbpix) :
"""Determines if a page is in grayscale or colour mode."""
if img.mode != "RGB" :
img = img.convert("RGB")
gray = 0
for (r, g, b) in img.getdata() :
if not (r == g == b) :
# optimize : if a single pixel is no gray the whole page is colored.
return { "G" : 0.0, "C" : 100.0 }
return { "G" : 100.0, "C" : 0.0 }
|
e8ee682889e0f9284cecfcf57cf260b7056c1879
| 3,644,728
|
import ctypes
def rotate(angle: float, iaxis: int) -> ndarray:
"""
Calculate the 3x3 rotation matrix generated by a rotation
of a specified angle about a specified axis. This rotation
is thought of as rotating the coordinate system.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/rotate_c.html
:param angle: Angle of rotation (radians).
:param iaxis: Axis of rotation X=1, Y=2, Z=3.
:return: Resulting rotation matrix
"""
angle = ctypes.c_double(angle)
iaxis = ctypes.c_int(iaxis)
mout = stypes.empty_double_matrix()
libspice.rotate_c(angle, iaxis, mout)
return stypes.c_matrix_to_numpy(mout)
|
035144bdf04b4c39cc4bf1e41ec02d4c71d4d951
| 3,644,729
|
def build_categories(semanticGroups):
"""
Returns a list of ontobio categories or None
Parameters
----------
semanticGroups : string
a space delimited collection of semanticGroups
"""
if semanticGroups is None:
return None
categories = []
for semanticGroup in semanticGroups.split(' '):
try:
categories += UMLS_to_monarch(semanticGroup.upper())
except:
None
if len(categories) == 0:
return None
else:
return categories
|
5262b62cd5ce8e8c0864f91f43a0925ea991cc83
| 3,644,730
|
from x84.bbs import getterminal
import time
def show_nicks(handles):
""" return terminal sequence for /users result. """
term = getterminal()
return u''.join((
time.strftime('%H:%M'), u' ',
term.blue('-'), u'!', term.blue('-'),
u' ', term.bold_cyan('%d' % (len(handles))), u' ',
u'user%s: ' % (u's' if len(handles) > 1 else u''),
u', '.join(handles) + u'\n',))
|
6863b7a67686a337304c9f22eb4bd9488f959a1f
| 3,644,731
|
def cvCmp(*args):
"""cvCmp(CvArr src1, CvArr src2, CvArr dst, int cmp_op)"""
return _cv.cvCmp(*args)
|
ec2b9d8d68083fff3a09a5293e9950a2c67c424b
| 3,644,732
|
def convert_to_dtype(data, dtype):
"""
A utility function converting xarray, pandas, or NumPy data to a given dtype.
Parameters
----------
data: xarray.Dataset, xarray.DataArray, pandas.Series, pandas.DataFrame,
or numpy.ndarray
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
"""
if dtype is None: # Don't convert the data type.
return data
return data.astype(dtype)
|
ec3130311fe9c136707d5afb8f564b4f89067f4e
| 3,644,733
|
def process_files(data_path, output_path):
"""Returns a pipeline which rebalances data shards.
Args:
data_path: File(s) to read.
output_path: Path to which output CSVs are written, if necessary.
"""
def csv_pipeline(root):
_ = (
root
| beam.io.ReadFromText(data_path)
| beam.io.WriteToText(output_path,
num_shards=FLAGS.num_output_files))
def tfrecord_pipeline(root):
"""Pipeline instantiation function.
Args:
root: Source pipeline from which to extend.
"""
example_coder = beam.coders.ProtoCoder(tf.train.Example)
_ = (
root
| beam.io.ReadFromTFRecord(data_path, coder=example_coder)
| beam.io.WriteToTFRecord(output_path, file_name_suffix="tfrecord",
coder=example_coder,
num_shards=FLAGS.num_output_files))
pipeline = tfrecord_pipeline if FLAGS.filetype == "tfrecord" else csv_pipeline
return pipeline
|
320a66857dfcfa43995226ee20f714f0694c8f8d
| 3,644,734
|
def delete_tasklog_cached(dc_id, user_id=None):
"""
Remove tasklog cache entry.
"""
if user_id:
key = _cache_log_key(user_id, dc_id)
else:
key = _cache_log_key(settings.TASK_LOG_STAFF_ID, dc_id)
return cache.delete(key)
|
29435d0618850a442a56d4d28e96be5989bca1f5
| 3,644,735
|
def strip_headers(data):
""" Strips headers from data #depreciate"""
try:
return data['items']
except (TypeError, KeyError) as e:
print(e)
return data
|
2eb044e45043f103fff76bfa47007dbcd4aa49c7
| 3,644,736
|
def sns_plot(chart_type: str, df):
""" return seaborn plots """
fig, ax = plt.subplots()
if chart_type == "Scatter":
with st.echo():
sns.scatterplot(
data=df,
x="bill_depth_mm",
y="bill_length_mm",
hue="species",
)
plt.title("Bill Depth by Bill Length")
elif chart_type == "Histogram":
with st.echo():
sns.histplot(data=df, x="bill_depth_mm")
plt.title("Count of Bill Depth Observations")
elif chart_type == "Bar":
with st.echo():
sns.barplot(data=df, x="species", y="bill_depth_mm")
plt.title("Mean Bill Depth by Species")
elif chart_type == "Boxplot":
with st.echo():
sns.boxplot(data=df)
plt.title("Bill Depth Observations")
elif chart_type == "Line":
with st.echo():
sns.lineplot(data=df, x=df.index, y="bill_length_mm")
plt.title("Bill Length Over Time")
elif chart_type == "3D Scatter":
st.write("Seaborn doesn't do 3D ☹️. Here's 2D.")
sns.scatterplot(data=df, x="bill_depth_mm", y="bill_length_mm", hue="island")
plt.title("Just a 2D Scatterplot")
return fig
|
8081349e83745167443d76c9be30ee8b884e8d67
| 3,644,737
|
def decode_fields(source_str: str, resp_type):
""" This is the lower level decode of fields, no automatic guess of type is performed."""
field_decoding = FIELD_MAPPING[resp_type]
unpacked_fields = {}
for field_name, field_type, field_subtype in field_decoding:
search_term = f"{field_name}:".encode()
field_location = source_str.find(search_term)
assert field_location >= 0
# Attempt to extract the value
field_value_start = field_location + len(search_term)
if field_type is list:
# Handle as a list
field_value_end = source_str.find(b']', field_value_start)
assert field_value_end > field_value_start
list_str = source_str[field_value_start + 1:field_value_end].strip()
if len(list_str) == 0:
field_list = []
else:
if field_subtype is int:
list_base = 16 if b'x' in list_str else 10
field_list = [int(x,list_base) for x in list_str.split(b',')]
elif field_subtype is str:
field_list = [x.replace(b"'", b"").replace(b'"',b"").decode() for x in list_str.split(b',')]
unpacked_fields[field_name] = field_list
else:
# Handle as a single value
field_value_end = source_str.find(b',', field_value_start)
assert field_value_end > field_value_start
if field_type is not bool:
field_value = field_type(source_str[field_value_start:field_value_end])
else:
field_value = source_str[field_value_start:field_value_end] == b'1'
unpacked_fields[field_name] = field_value
return unpacked_fields
|
6d1afebcfb377be0ce454f5a1db7b6aad37313c5
| 3,644,738
|
def make_egg(a=-1.25, b=7):
"""
Return x, y points that resemble an egg.
Egg equation is:
r = cos(2θ) + a * cos(θ) + b
@param a: Number.
@param b: Number.
"""
theta = np.linspace(0, 2 * np.pi, 100)
r = np.cos(2 * theta) + a * np.cos(theta) + b
y = r * np.cos(theta)
x = r * np.sin(theta)
return np.array([x, y]).T.tolist()
|
b94ca316ba9e8bcfdcc3622205204e322c2bccb8
| 3,644,739
|
def test_styling_object_which_implements_str_proto():
"""
Test styling an object which implements the str protocol
"""
class Dummy(object):
def __str__(self):
return 'I am a dummy object'
colorful = core.Colorful(colormode=terminal.ANSI_8_COLORS)
assert str(colorful.black(Dummy())) == '\033[30mI am a dummy object\033[39m'
|
d14567675db25c66bdc00e28904b721bb3af536d
| 3,644,740
|
def determine_auto_approval(financial_aid, tier_program):
"""
Takes income and country code and returns a boolean if auto-approved. Logs an error if the country of
financial_aid does not exist in CountryIncomeThreshold.
Args:
financial_aid (FinancialAid): the financial aid object to determine auto-approval
tier_program (TierProgram): the TierProgram for the user's income level
Returns:
boolean: True if auto-approved, False if not
"""
try:
country_income_threshold = CountryIncomeThreshold.objects.get(country_code=financial_aid.country_of_income)
income_threshold = country_income_threshold.income_threshold
except CountryIncomeThreshold.DoesNotExist:
log.error(
"Country code %s does not exist in CountryIncomeThreshold for financial aid id %s",
financial_aid.country_of_income,
financial_aid.id
)
income_threshold = DEFAULT_INCOME_THRESHOLD
if tier_program.discount_amount == 0:
# There is no discount so no reason to go through the financial aid workflow
return True
elif income_threshold == 0:
# There is no income which we need to check the financial aid application
return True
else:
return financial_aid.income_usd > income_threshold
|
280af275564046ed36b8b5442879f8dcc7e515cb
| 3,644,741
|
def crustal_model_files(alt = [200, 1000], anomaly = 'Global', lim = [0., 360. -90., 90.], binsize = 0.1):
""""
Reads the .bin IDL files of the crustal magnetic field model (Langlais) for a range of altitudes and creates a function based on a linear interpolation.
Parameters:
alt: 2-elements array, optional
The array containing the altitude range. Default is [200, 1000] km.
anomaly: string, optional
The anomaly index, e. g., A1, A2, A6, etc. This string is used to find the directory where the model matrices are located. Default is 'Global'.
lim: 4-elements array, optional
An array cointaining the limits for latitude and longitude data, in which: [lon_min, lon_max, lat_min, lat_max]. Default is the whole range of Mars.
binsize: double, optional
The size of the lon and lat bins (must be the same size). Default is 0.1 degrees.
Returns:
A function and a matrix containing the data.
"""
longitude = np.linspace(lim[0], lim[1], int((lim[1] - lim[0]) / binsize + 1))
latitude = np.linspace(lim[2], lim[3], int((lim[3] - lim[2]) / binsize + 1))
altitude = np.linspace(alt[0], alt[1], int(alt[1] - alt[0] + 1))
br = np.empty((len(longitude), len(latitude), len(altitude)))
for i in range(len(altitude)):
h = int(i + alt[0])
data = sp.io.readsav('/home/oliveira/ccati_mexuser/LANGLAIS_Matrices/'+anomaly+'/LANGLAIS_BR_ALT_' + \
str(h) + '_RES_01.bin')
br[:, :, i] = data['zbins'].T
fn = rgi((longitude, latitude, altitude), br)
return fn, br
|
e5deb36b571c0cc75e738bd0bdce7a2fa6ea8d7a
| 3,644,742
|
def f1(y_true, y_pred):
"""
Function for computing the unweighted f1 score using tensors. The Function
handles only the binary case and compute the unweighted f1 score
for the positive class only.
Args:
- y_true: keras tensor, ground truth labels
- y_pred: keras tensord, labels estimated by the model
Returns:
- f1: float, unweighted f1 score for the positive class
"""
precision_v = precision(y_true, y_pred)
recall_v = recall(y_true, y_pred)
nominator = 2 * (precision_v * recall_v)
denominator = (precision_v + recall_v + K.epsilon())
f1 = nominator / denominator
return f1
|
793fbcbd2ddcec2608139174794e94304f69a631
| 3,644,743
|
def get_selector_score(key, selector, use_median, best_based_on_final):
"""
:param key: Thing to measure (e.g. Average Returns, Loss, etc.)
:param selector: Selector instance
:param use_median: Use the median? Else use the mean
:param best_based_on_final: Only look at the final value? Else use all
values.
:return: A single number that gives the score of `key` inside `selector`
"""
data = selector.extract()
if best_based_on_final:
values = [
exp.progress.get(key, np.array([np.nan]))[-1]
for exp in data
]
else:
values = np.concatenate([
exp.progress.get(key, np.array([np.nan]))
for exp in data
] or [[np.nan]])
if len(values) == 0 or not np.isfinite(values).all():
return np.nan
if use_median:
return np.nanpercentile(values, q=50, axis=0)
else:
return np.nanmean(values)
|
4c9d2e08fcc4f4ee3ecbd2cf67e4db829850707a
| 3,644,744
|
import pytz
def str_to_timezone(tz):
"""
从字符串构建时区
"""
return pytz.timezone(tz) if tz else pytz.utc
|
02c004171f50ceb4b60272769036634f6778c791
| 3,644,745
|
def _get_previous_index_search_col(
m, col, nested_list, trans_function=None, transformation=False
):
"""Return previous index of a a key, from a sorted nested list where a key is being seached in the col number.Returns -1 if value is not found.
Args:
m (comparable): comparable being searched
col (int): Column number to be searched.
nested_list (list): Nested List with the values being searched. Ex [[0,1,2,2][0,1,2,2]] First inner list represents a row of attributes of an instance.
trans_function (func): Function to transform the comparison value of the column.
transformation (boolean): If true uses a tranformation in the column value before comparison of the values.
Returns:
int: Index of the value being searched.
"""
ix = _search_previous_col(m, col, nested_list, trans_function, transformation)
assert ix != -1, f"Previous keyword to {m} was not found."
return ix
|
4545395928128ce2858c1d0508e77a47be543dd7
| 3,644,746
|
import json
def guest_import(hypervisor, host):
"""
Import a new guest
::
POST /:hypervisor/:host/guests
"""
response.content_type = "application/json"
manager = create_manager(hypervisor, host)
guest = manager.guest_import(
request.environ['wsgi.input'],
request.content_length
)
location = "/%s/%s/guests/%s" % (hypervisor, host, guest["id"])
response.set_header("Location", location)
manager.logout()
return json.dumps(guest)
|
d26ecbac6bce0ab07c365aabb8352ec57719798d
| 3,644,747
|
from typing import List
def get_doc_count(
group_by: List[str] = ["year", "country"],
sort_by: List[metadata.SortOn] = [
metadata.SortOn(field="year", order=metadata.SortOrder.desc),
metadata.SortOn(field="count", order=metadata.SortOrder.desc)],
limit: int = 10):
"""This endpoint provides a generic interface to get the count of documents given an arbitrary set of `group_by` fields. The return value can be sorted based on the `sort_by` fields input. The number of returned groups is limited by the `limit` parameter.
"""
assert len(set(so.field
for so in sort_by).difference(group_by + ['count'])) == 0
group_id = {b: f"${b}" for b in group_by}
sort_by = SON(
[(so.field, -1 if so.order == metadata.SortOrder.desc else 1) for so in sort_by])
projection = {b: f"$_id.{b}" for b in group_by}
projection["count"] = "$count"
projection["_id"] = 0
# Identify fields that needs unwinding, if any
list_fields = set(["adm_region", "author", "country", "der_acronyms", "doc_type",
"geo_region", "major_doc_type", "topics_src", "wb_subtopic_src"])
unwind_fields = [{"$unwind": f"${b}"}
for b in list_fields.intersection(group_by)]
pipeline = []
if unwind_fields:
pipeline = unwind_fields
pipeline.extend([
{"$group": {"_id": group_id, "count": {"$sum": 1}}},
{"$project": projection},
{"$sort": sort_by},
{"$limit": limit},
])
agg = mongodb.get_docs_metadata_collection().aggregate(
pipeline
)
values = [{"rank": ix, **result} for ix, result in enumerate(agg, 1)]
return values
|
c815d6d9a2746c61d3affbca07e8334c75862030
| 3,644,748
|
import re
def get_author_list(text):
"""function to extract authors from some text that will also include
associations
example input:
`J. C. Jan†, F. Y. Lin, Y. L. Chu, C. Y. Kuo, C. C. Chang, J. C. Huang and C. S. Hwang,
National Synchrotron Radiation Research Center, Hsinchu, Taiwan, R.O.C`
or
`M.B. Behtouei, M. Migliorati, L. Palumbo, B. Spataro, L. Faillace`
assumptions:
- if you split by ', ' and the second character of a token is a '.' period
then its probably a valid token (an author) but this is not guaranteed
(see above example that ends in 'R.O.C')
- There can be multiple initials as evidenced above.
- Initials may not necessarily be split by a space.
watch out for:
- hypenated names: 'B. Walasek-Hoehne'
- hyphenated initials: 'E. J-M. Voutier' 'J.-L. Vay'
- multiple surnames: 'M.J. de Loos' 'S.B. van der Geer' 'A. Martinez de la Ossa' 'N. Blaskovic Kraljevic' 'G. Guillermo Cant�n' 'C. Boscolo Meneguolo'
- surname with apostrophes: 'G. D'Alessandro'
- extra stuff tacked on: 'S.X. Zheng [on leave]' 'G.R. Li [on leave]' (from the csv file)
- one rare instance of non-period separated initials: 'Ph. Richerot (from csv file)
my pattern of a name which should match vast majority of names while not matching vast majority of non-names:
single letter, followed by a period, potentially followed by a space but
not always, repeated n times, and ending in a word of more than one character
which may contain hyphens, apostrophes, repeated n times, and finally
finishing with a comma
word character followed by dot and potentially space, repeated n times
then
word character repeated n times
/(\\w\\.\\ ?)+(\\w+\\ ?)+/g (note this comment had to double up the escape backslashes)
(https://regexr.com/)
"""
newline_fixed_text = text
for newline_char in LINE_TERMINATOR_CHARS:
newline_fixed_text = newline_fixed_text.replace(newline_char, ', ')
potential_authors = newline_fixed_text.replace(NON_BREAKING_SPACE, ' ').replace(' and ', ', ').split(', ')
filtered_authors = list()
my_name_pattern = re.compile("(-?\\w\\.\\ ?)+([\\w]{2,}\\ ?)+")
# the allowance of an optional hyphen preceding an initial is to satisfy a
# common pattern observed with the papers coming out of asia.
for author in potential_authors:
if my_name_pattern.match(author): # match has an implied ^ at the start
# which is ok for our purposes.
filtered_authors.append(author)
return filtered_authors
|
94b7f74ed24be8bb8bbfacca37dfc9f65f1fc99b
| 3,644,749
|
def find_matching_format_function(word_with_formatting, format_functions):
""" Finds the formatter function from a list of formatter functions which transforms a word into itself.
Returns an identity function if none exists """
for formatter in format_functions:
formatted_word = formatter(word_with_formatting)
if word_with_formatting == formatted_word:
return formatter
return lambda word: word
|
3d2ce0956de4c8ca0de6d0d21f8bbd718247caff
| 3,644,750
|
from typing import List
def mean (inlist:List(float))->float:
"""
Returns the arithematic mean of the values in the passed list.
Assumes a '1D' list, but will function on the 1st dim of an array(!).
Usage: lmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + item
return sum/float(len(inlist))
|
713bbbec706671043a5b76f142d4f19cfa247c6a
| 3,644,751
|
def create_file_download_url(file_path: str) -> str:
"""
Creates Telegram URL for downloading of file.
- contains secret information (bot token)!
:param file_path: `file_path` property of `File` object.
"""
token = environ["TELEGRAM_API_BOT_TOKEN"]
return create_url(
"https://api.telegram.org/file",
f"bot{token}",
file_path
)
|
6395d17520778d6bf4507ba69559b6ef1ba32ba9
| 3,644,752
|
import csv
from datetime import datetime
def convert_to_csv(items):
"""
Args:
items: all arns in a region from the DynamoDB query as a list
returns:
csv_body: body of the csv file to write out
"""
fieldnames = ["Package", "Package Version", "Status", "Expiry Date", "Arn"]
# sort by package, and then created date (oldest to newest)
sorted_items = sorted(items, key=lambda i: (i["pckg"].lower(), i["crtdDt"]))
with open("/tmp/packages.csv", "w", newline="") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for item in sorted_items:
# convert datetime to human readable
try:
if item["exDt"]:
item["exDt"] = datetime.utcfromtimestamp(item["exDt"]).isoformat()
except KeyError:
item["exDt"] = ""
csv_item = {
"Package": item["pckg"],
"Package Version": item["pckgVrsn"],
"Arn": item["arn"],
"Status": item["dplySts"],
"Expiry Date": item["exDt"],
}
writer.writerow(csv_item)
with open("/tmp/packages.csv", "r") as csvfile:
csv_text = csvfile.read()
return csv_text
|
6e651065f06595e9b964bee1b8dab2965e0076f6
| 3,644,753
|
def manhattan(train_X, val_X):
"""
:param train_X: one record from the training set
(type series or dataframe including target (survived))
:param val_X: one record from the validation set
series or dataframe include target (survived)
:return: the Manhattan distance between train_X and val_X
"""
diff = train_X - val_X
# Remove survived column
diff = diff.iloc[:, :-1]
dist = np.sqrt((np.abs(diff)).sum(axis=1))
return dist
|
1989466af70d38a17c2b52dd667733da46bbed0c
| 3,644,754
|
def author_idea_list(request, registrant_id):
"""
Returns author ideas
"""
registrant = get_object_or_404(Registrant, pk=registrant_id)
ideas = Idea.objects.filter(author=registrant)
serializer = IdeaSerializer(ideas, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
|
64ee16535243bfe5414326bed86f5b9efdb97941
| 3,644,755
|
import re
def match(text: str, pattern: str) -> bool:
"""
Match a text against a given regular expression.
:param text: string to examine.
:param pattern: regular expression.
:returns: ``True`` if pattern matches the string.
"""
return re.match(pattern, text) is not None
|
a59d71283766c5079e8151e8be49501246218001
| 3,644,756
|
def _compute_hash_check(input_strings: tf.Tensor, field_size: int, seed: int,
dtype: tf.dtypes.DType) -> tf.Tensor:
"""Returns the hash_check for input_strings modulo field_size."""
hash_check_salt = _get_hash_check_salt(seed)
salted_input = tf.strings.join([hash_check_salt, input_strings])
hash_check = tf.strings.to_hash_bucket_fast(
salted_input, num_buckets=field_size)
hash_check = tf.reshape(hash_check, shape=[tf.size(hash_check), 1])
hash_check = tf.cast(hash_check, dtype=dtype)
return hash_check
|
bff5d9b24f17fd32ea3a5bfbd60a8446f10471aa
| 3,644,757
|
def _trademark(request):
"""
access to the produt database is available here, making a request to save/check the data
for storage inside the database
"""
# site data from scrap program
websitename = WebsiteClassName().getProducts( WebsiteClassName().getCategoryLinks() )
# access the data structure we need to save in the db
websitename_data = DataCompiler().getPythonData( websitename )
# Show the name of items inserted in DB
items_inserted = []
# counter for each item scrapped in total
items_counter = 0
with transaction.atomic():
for item in websitename_data:
try:
# creates the data objects and places them in the apporiate tables/rows. The website id will be assigned in Step 1.
# See the Readme.md in algorithm_scrape in github repo this id will assign the products to the correct registered website.
# To see website id all see the docs in the repo
data_store = Product.objects.get_or_create( product_slug_url=item['product_slug_url'], website_id=int, defaults=item )
if data_store:
# Logging for Django purposes
logger.debug('Inserting %r into products', item )
items_inserted.append( item['product_name'] )
items_counter += 1
# Gives a count of how many items are in the database
data_count = Product.objects.filter( website_id=int ).count()
# saves the instance of all the products inside the database
data_store.save()
else:
# updates any new items of fields inside the database
data_store.update(**item)
except Exception:
# "Not inserted ==>", into database
logger.exception('Something went wrong inserting a new entry %r', item )
return HttpResponse('<h1>Products saved!</h1><br\>'
'<h2> %r Total Products Scrapped</h2><br\>'
'<h4> %r Products Currently in db</h4><br\>'
'<div><ul> <li>%s</li> </ul></div>' % (items_counter, data_count, items_inserted )
)
|
e34c77a423a23310e9fbdb6f867795b3ce16935f
| 3,644,758
|
import numpy
def calc_extinction(radius:float, mosaicity:float, model:str,
a:float, b:float, c:float, alpha:float, beta:float, gamma:float,
h:float, k:float, l:float,
f_sq:float, wavelength:float, flag_derivative_f_sq=False):
"""
Isotropical extinction coorection y:
$$
|F|^2_{\text{corrected}} = y \cdot |F|^2
$$
radius primary extinction ???
mosaisity secondary extinction
model= "gauss" or "lorentz"
a,b,c,alpha,beta,gamma are unit cell parameters (in angstrem and radians)
h, k, l are Miller indices
f_sq is square of structure factor (in 10-12cm)
wavelength is neutron wavelength in angstrems
flag_derivative_radius
flag_derivative_mosaicity
flag_derivative_a
flag_derivative_b
flag_derivative_c
flag_derivative_alpha
flag_derivative_beta
flag_derivative_gamma
flag_derivative_f_sq
flag_derivative_wavelength
"""
r = float(radius)
g = float(mosaicity)
g_sq = numpy.square(g)
kk = 1.
c_a, c_b, c_g = numpy.cos(alpha), numpy.cos(beta), numpy.cos(gamma)
volume_unit_cell = calc_volume_uc_by_abc_cosines(a, b, c, c_a, c_b, c_g)
sthovl = calc_sthovl_by_hkl_abc_cosines(h, k, l, a, b, c, c_a, c_b, c_g)
yext, dder = calc_extinction_2(radius, mosaicity, model,
f_sq, volume_unit_cell, sthovl, wavelength)
return yext, dder
|
b0922fde0246ee250033d9ff9eb3fde59c17c343
| 3,644,759
|
def smape(y_true: Yannotation, y_pred: Yannotation):
"""
Calculate the symmetric mean absolute percentage error between `y_true`and `y_pred`.
Parameters
----------
y_true : array, `dataframe`, list or `tensor`
Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred : array, `dataframe`, list or `tensor`
The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns
-------
error : `tensor`
Symetric mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`.
Examples
--------
>>> from autopycoin.losses import smape
>>> import tensorflow as tf
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [1., 0.]]
>>> smape(y_true, y_pred).numpy()
array([99.999985, 99.999985], dtype=float32)
"""
if not isinstance(y_pred, tf.RaggedTensor):
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, dtype=y_pred.dtype)
error = tf.abs(y_true - y_pred) / (
tf.maximum(tf.abs(y_true), epsilon()) + tf.abs(y_pred)
)
return 200.0 * tf.reduce_mean(error, axis=-1)
|
0046481ea6b2ddc3295f9d597d6cc3488b498415
| 3,644,760
|
def acosh(rasters, extent_type="FirstOf", cellsize_type="FirstOf", astype=None):
"""
The ACosH operation
The arguments for this function are as follows:
:param rasters: array of rasters. If a scalar is needed for the operation, the scalar can be a double or string
:param extent_type: one of "FirstOf", "IntersectionOf", "UnionOf", "LastOf"
:param cellsize_type: one of "FirstOf", "MinOf", "MaxOf, "MeanOf", "LastOf"
:param astype: output pixel type
:return: the output raster
"""
return local(rasters, 59, extent_type=extent_type, cellsize_type=cellsize_type, astype=astype)
|
593b13639f40c347a27d4fc772d7b2ec2d062a86
| 3,644,761
|
from typing import Optional
from typing import Dict
from typing import Tuple
from typing import Any
def load_does(
filepath: PathType, defaults: Optional[Dict[str, bool]] = None
) -> Tuple[Any, Any]:
"""Load_does from file."""
does = {}
defaults = defaults or {"do_permutation": True, "settings": {}}
data = OmegaConf.load(filepath)
data = OmegaConf.to_container(data)
mask = data.pop("mask")
for doe_name, doe in data.items():
for k in defaults:
if k not in doe:
doe[k] = defaults[k]
does[doe_name] = doe
return does, mask
|
22cebd75da899bebb092c1c470eabe87e17c41f5
| 3,644,762
|
def causal_segment_mask(segment_ids: JTensor,
dtype: jnp.dtype = jnp.float32) -> JTensor:
"""Computes the masks which combines causal masking and segment masks.
Args:
segment_ids: a JTensor of shape [B, T], the segment that each token belongs
to.
dtype: data type of the input.
Returns:
A JTensor of shape [B, 1, T, T].
"""
# [B, 1, T, T]
segment_mask_t = segment_mask(segment_ids, dtype=dtype)
# [1, 1, T, T]
b, t = segment_ids.shape
causal_mask_t = causal_mask(jnp.zeros([b, t, 1], dtype=dtype))
return jnp.minimum(segment_mask_t, causal_mask_t)
|
7e16f2a943e19b232fb3f1e55f7b348aa7f56a72
| 3,644,763
|
def remove_outliers(peaks: np.ndarray, **kwargs):
"""
https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.LocalOutlierFactor.html#sklearn.neighbors.LocalOutlierFactor
https://scikit-learn.org/stable/modules/outlier_detection.html
Parameters
----------
peaks
kwargs
Returns
-------
"""
clf = LocalOutlierFactor(**kwargs)
is_inlier = clf.fit_predict(peaks) # 1 inliers, -1 is outliers
mask = is_inlier == 1
return peaks[mask], peaks[np.invert(mask)]
|
969fe4523e8529edd49a5c0cd81c51949bbe3de5
| 3,644,764
|
def powerset(iterable):
""" Calcualtes the powerset, copied from https://docs.python.org/3/library/itertools.html#itertools-recipes """
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
|
3b645848c0810c69685c06b94fee42e5747bb6e8
| 3,644,765
|
def get_rotational_vector(skew_symmetric):
"""Get the rotational vector from a skew symmetric matrix.
Parameters
----------
skew_symmetric: numpy.ndarray
the skew symmetric matrix.
Returns
-------
rotational_vector:
the rotational vector.
"""
# make sure that the input is skew symmetric
if np.linalg.norm(skew_symmetric + skew_symmetric.T) > 1e-12:
raise ValueError("The input is not skew symmetric!")
rotational_vector = np.zeros((3, 1), dtype=float)
rotational_vector[0] = skew_symmetric[2, 1]
rotational_vector[1] = skew_symmetric[0, 2]
rotational_vector[2] = skew_symmetric[1, 0]
return rotational_vector
|
e63b771f6db93f63d7307a85689d87162208c6ff
| 3,644,766
|
def diaperchange_lifetimes(changes):
"""
Create a graph showing how long diapers last (time between changes).
:param changes: a QuerySet of Diaper Change instances.
:returns: a tuple of the the graph's html and javascript.
"""
changes = changes.order_by("time")
durations = []
last_change = changes.first()
for change in changes[1:]:
duration = change.time - last_change.time
if duration.seconds > 0:
durations.append(duration)
last_change = change
trace = go.Box(
y=[round(d.seconds / 3600, 2) for d in durations],
name=_("Changes"),
jitter=0.3,
pointpos=-1.8,
boxpoints="all",
)
layout_args = utils.default_graph_layout_options()
layout_args["height"] = 800
layout_args["title"] = _("<b>Diaper Lifetimes</b>")
layout_args["yaxis"]["title"] = _("Time between changes (hours)")
layout_args["yaxis"]["zeroline"] = False
layout_args["yaxis"]["dtick"] = 1
fig = go.Figure({"data": [trace], "layout": go.Layout(**layout_args)})
output = plotly.plot(fig, output_type="div", include_plotlyjs=False)
return utils.split_graph_output(output)
|
4937cff711b8e37e4162a4c7de8cc258c25d2979
| 3,644,767
|
def batch_norm_conv(x, n_out, phase_train, scope='bn'):
"""
Batch normalization on convolutional maps.
Args:
x: Tensor, 4D BHWD input maps
n_out: integer, depth of input maps
phase_train: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
with tf.variable_scope(scope):
beta = tf.Variable(tf.constant(0.0, shape=[n_out]),
name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),
name='gamma', trainable=True)
batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
|
2a08db220f08270a8f2870671ee93278f0c7ddd2
| 3,644,768
|
def strip_variants(address):
"""Return a copy of the given address with the variants (if any) stripped from the name.
:rtype: :class:`pants.build_graph.address.Address`
"""
address, _ = parse_variants(address)
return address
|
a2cc1c68b6032304720b9cae05516535cb9ede22
| 3,644,769
|
def deleteIdentifiedStock(bot, update):
"""Deletes the user's selected stock.
If the user's selected stock is valid, proceed to delete it.
Returns:
Return MENU state with normal keyboard.
"""
if update.message.chat.username is None:
# User has no username
update.message.reply_text(
"It seems you do not have a Telegram Username.\nI'll need your username in order to function :( /start me up when you have one! (You can set your username in Settings.)")
else:
# User has username
text = update.message.text
message = bots.deleteUserStock(update.message.chat.username, text)
update.message.reply_text(message, parse_mode='HTML')
update.message.reply_text(
"What would you like to do next?", reply_markup=markup_one)
return MENU
|
7a47579f7e0b9b9388ef0f0f4650cb045cf53570
| 3,644,770
|
def z_inc_down(grid):
"""Return True if z increases downwards in the coordinate reference system used by the grid geometry
:meta common:
"""
if grid.crs is None:
assert grid.crs_uuid is not None
grid.crs = rqc.Crs(grid.model, uuid = grid.crs_uuid)
return grid.crs.z_inc_down
|
26b05defc1b75ec5a4f3aa9f61c3ba0cb5921bdc
| 3,644,771
|
def load_coord_var(prob_data_type):
"""
Loads a coordinate variable from the source data and returns it.
:param prob_data_type:
:return:
"""
fpath = "{}/source_others/a1b_tas_jja_EAW_1961-1990.dat".format(BASEDIR)
with open(fpath, 'rb') as reader:
data = cPickle.load(reader)
key = prob_data_map[prob_data_type]
if key == 'prob':
return np.array((data[key] * 100), np.float)
else:
return np.array(data[key], np.int32)
|
46969ac762393c8b7c60d08b543a2fc2f0069b74
| 3,644,772
|
import platform
def get_os():
"""Get the current operating system.
:returns: The OS platform (str).
"""
return platform.system()
|
307c6c94573733d900b2e31cfc8bcf3db8b6e5b7
| 3,644,773
|
import os
import yaml
def get_config():
"""This function retrieves API keys, access tokens, and other key data from the config file."""
global LOG_NAME, TARGET, URL_NUMBER, WHERE, BOT
print("Building OAuth header...")
if 'XKCD_APPNAME' in os.environ: # Running on a cloud server
key = [os.environ.get('API_KEY', None),
os.environ.get('API_SECRET_KEY', None),
os.environ.get('ACCESS_TOKEN', None),
os.environ.get('ACCESS_TOKEN_SECRET', None)]
LOG_NAME = os.environ.get('LOG_NAME', None)
TARGET = os.environ.get('TARGET', None)
URL_NUMBER = int(os.environ.get('URL_NUMBER', None))
WHERE = int(os.environ.get('WHERE', None))
BOT = os.environ.get('BOT', None)
else: # Running locally
with open('config.yaml') as config_file:
CONFIG = yaml.safe_load(config_file)
key = [CONFIG['API Key'],
CONFIG['API Secret Key'],
CONFIG['Access Token'],
CONFIG['Access Token Secret']]
LOG_NAME = CONFIG['Target name in logs']
TARGET = CONFIG['Target account handle']
URL_NUMBER = int(CONFIG['Tweet URL location'])
WHERE = int(CONFIG['Target image location on site'])
BOT = CONFIG['Your account handle']
for i in key:
if i is None: # Verify keys were loaded
print("OAuth initiation failed: API key or access token not found")
del key
return 'crash' # Enter log protection mode
auth = OAuth1(key[0], key[1], key[2], key[3])
print('OAuth initiation successful!')
del key
return auth
|
0b898df5edb85c5d826b94c8962a0d5f327e94d2
| 3,644,774
|
def count_hits(space, positions, pi_plus_4_vecs_lab, pi_null_4_vecs_lab, r):
"""returns a list of hit counts for z values in space"""
return [count_double_hits(positions, pi_plus_4_vecs_lab, pi_null_4_vecs_lab, r=r, z_detector=z) for z in space]
|
66ba0f61d8491ef6687c0fb20761375c6470cae2
| 3,644,775
|
import time
import math
def time_since(since, m_padding=2, s_padding=2):
"""Elapsed time since last record point."""
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '{}m:{}s'.format(str(int(m)).zfill(m_padding),
str(int(s)).zfill(s_padding))
|
62641b723bf286f54280bb5c6fb1d54c9753907c
| 3,644,776
|
import subprocess
import collections
def run_cmd(cmd, encoding=DEFAULT_ENCODING):
"""
Run a command as a subprocess.
# Arguments
* `cmd` (list<str>): The command to run.
* `encoding` (str): The encoding to use for communicating with the
subprocess.
# Returns
A named tuple with the following fields:
- returncode: The returned value from the subproccess.
- stderr: The stderr output from the subprocess.
- stdout: The stdout output from the subprocess.
"""
p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return collections.namedtuple("CmdResult",
["returncode", "stderr", "stdout"])(
p.returncode,
p.stderr.decode(encoding).strip(),
p.stdout.decode(encoding).strip())
|
b03ae4949d00f8a1019ea6bde739550a21585409
| 3,644,777
|
def record_check(record):
"""
record dict check
--- a dictionary is required as the input ---
"""
assert isinstance(
record, dict), 'record should be dict, while the input is {}'.format(type(record))
cnn_json_struct = JsonFormatSetting.CNN_JSON_STRUCTURE
record_struct = cnn_json_struct["record"][0]
return check_dict(record, record_struct)
|
965cced685b45de8083dc1c8e161e9aa100b4cf0
| 3,644,778
|
import struct
def encrypt_chunk(chunk, password=None):
"""Encrypts the given chunk of data and returns the encrypted chunk.
If password is None then saq.ENCRYPTION_PASSWORD is used instead.
password must be a byte string 32 bytes in length."""
if password is None:
password = saq.ENCRYPTION_PASSWORD
assert isinstance(password, bytes)
assert len(password) == 32
iv = Crypto.Random.OSRNG.posix.new().read(AES.block_size)
encryptor = AES.new(password, AES.MODE_CBC, iv)
original_size = len(chunk)
if len(chunk) % 16 != 0:
chunk += b' ' * (16 - len(chunk) % 16)
result = struct.pack('<Q', original_size) + iv + encryptor.encrypt(chunk)
return result
|
328484205dff850f3857f0a7d19e922ffa230c61
| 3,644,779
|
def convert_to_pj_lop_plus(lops):
"""
Converts the list of PlayerStates to an LOP+
:param lops: The PlayerStates to be converted
:type lops: [PlayerState, ...]
:return: The LOP+
:rtype: PyJSON
"""
return [convert_to_pj_player_plus(ps) for ps in lops]
|
968e0a38b5df2a1ce4bf6106632c31213d278a29
| 3,644,780
|
from typing import Tuple
import math
def euler_to_quaternion(roll: float = 0, pitch: float = 0, yaw: float = 0) -> Tuple[float, float, float, float]:
"""
Convert Euler to Quaternion
Args:
roll (float): roll angle in radian (x-axis)
pitch (float): pitch angle in radian (y-axis)
yaw (float): yaw angle in radian (z-axis)
Returns:
Tuple[float, float, float, float]: x, y, z, w
"""
# Abbreviations for the various angular functions
cy = math.cos(yaw * 0.5)
sy = math.sin(yaw * 0.5)
cp = math.cos(pitch * 0.5)
sp = math.sin(pitch * 0.5)
cr = math.cos(roll * 0.5)
sr = math.sin(roll * 0.5)
# Quaternion
w = cr * cp * cy + sr * sp * sy
x = sr * cp * cy - cr * sp * sy
y = cr * sp * cy + sr * cp * sy
z = cr * cp * sy - sr * sp * cy
return x, y, z, w
|
e8346172f07510c377e14827842eb18f1631402e
| 3,644,781
|
import logging
def supervisorctl_url():
"""This parses supervisord.conf which contains the URL of supervisorctl."""
parsed_config = _get_parsed_configuration()
# For example 'http://localhost:9001'
control_url = _clean_config_value(parsed_config['supervisorctl']['serverurl'])
logging.debug("control_url=%s" % control_url)
return control_url
|
9ada9298f99eccfac3c02c2653ead753ea208119
| 3,644,782
|
def create_generic_constant(type_spec, scalar_value):
"""Creates constant for a combination of federated, tuple and tensor types.
Args:
type_spec: Instance of `computation_types.Type` containing only federated,
tuple or tensor types for which we wish to construct a generic constant.
May also be something convertible to a `computation_types.Type` via
`computation_types.to_type`.
scalar_value: The scalar value we wish this constant to have.
Returns:
Instance of `computation_building_blocks.ComputationBuildingBlock`
representing `scalar_value` packed into `type_spec`.
Raises:
TypeError: If types don't match their specification in the args section.
Notice validation of consistency of `type_spec` with `scalar_value` is not
the rsponsibility of this function.
"""
type_spec = computation_types.to_type(type_spec)
py_typecheck.check_type(type_spec, computation_types.Type)
inferred_scalar_value_type = type_utils.infer_type(scalar_value)
if (not isinstance(inferred_scalar_value_type, computation_types.TensorType)
or inferred_scalar_value_type.shape != tf.TensorShape(())):
raise TypeError('Must pass a scalar value to '
'`create_tensorflow_constant`; encountered a value '
'{}'.format(scalar_value))
if not type_utils.type_tree_contains_only(
type_spec,
(computation_types.FederatedType, computation_types.NamedTupleType,
computation_types.TensorType)):
raise TypeError
if type_utils.type_tree_contains_only(
type_spec,
(computation_types.NamedTupleType, computation_types.TensorType)):
return computation_constructing_utils.create_tensorflow_constant(
type_spec, scalar_value)
elif isinstance(type_spec, computation_types.FederatedType):
unplaced_zero = computation_constructing_utils.create_tensorflow_constant(
type_spec.member, scalar_value)
if type_spec.placement == placement_literals.CLIENTS:
placement_fn_type = computation_types.FunctionType(
type_spec.member,
computation_types.FederatedType(
type_spec.member, type_spec.placement, all_equal=True))
placement_function = computation_building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS.uri, placement_fn_type)
elif type_spec.placement == placement_literals.SERVER:
placement_fn_type = computation_types.FunctionType(
type_spec.member,
computation_types.FederatedType(
type_spec.member, type_spec.placement, all_equal=True))
placement_function = computation_building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_VALUE_AT_SERVER.uri, placement_fn_type)
return computation_building_blocks.Call(placement_function, unplaced_zero)
elif isinstance(type_spec, computation_types.NamedTupleType):
elements = []
for k in range(len(type_spec)):
elements.append(create_generic_constant(type_spec[k], scalar_value))
names = [name for name, _ in anonymous_tuple.to_elements(type_spec)]
packed_elements = computation_building_blocks.Tuple(elements)
named_tuple = computation_constructing_utils.create_named_tuple(
packed_elements, names)
return named_tuple
else:
raise ValueError(
'The type_spec {} has slipped through all our '
'generic constant cases, and failed to raise.'.format(type_spec))
|
9e1db57d93407eef385c1ac88ba83a4e578f891a
| 3,644,783
|
from django_openid.models import UserOpenidAssociation
def has_openid(request):
"""
Given a HttpRequest determine whether the OpenID on it is associated thus
allowing caller to know whether OpenID is good to depend on.
"""
for association in UserOpenidAssociation.objects.filter(user=request.user):
if association.openid == unicode(request.openid):
return True
return False
|
ad193ae3c299867ed4c29ee059c45fe24a07523c
| 3,644,784
|
def get_wordcloud():
"""
Generates the wordcloud and sends it to the front end as a png file.
:return: generated tag_cloud.png file
"""
update_tagcloud(path_to_save='storage/tmp', solr_service=solr)
return send_from_directory("storage/tmp", "tag_cloud.png", as_attachment=True)
|
c40cbf95676ac1b3da9c589934bb67a589a80810
| 3,644,785
|
def rtc_runner(rtc):
"""
:type rtc: pbcommand.models.ResolvedToolContract
:return:
"""
return gather_run_main(chunk_json=rtc.task.input_files[0],
chunk_key=Constants.CHUNK_KEY,
gathered_fn=rtc.task.output_files[0],
ln_name = Constants.DEFAULT_OUT_NAME,
gather_func=cat_txt_with_header)
|
10aa9c707284a04a0d002b95169d0a28e91213eb
| 3,644,786
|
from typing import Tuple
def get_matching_axis(shape: Tuple, length: int) -> int:
"""
Infers the correct axis to use
:param shape: the shape of the input
:param length: the desired length of the axis
:return: the correct axis. If multiple axes match, then it returns the last
one.
"""
# noinspection PyUnresolvedReferences
axis_candidates = np.nonzero(np.array(shape) == length)[0]
if len(axis_candidates) == 0:
raise ValueError('Unable to infer axis tue to shape mismatch: '
'{} =/= {}.'.format(shape, length))
return axis_candidates[-1]
|
981e2bb2487cd113ffc5dd19c2a62d581cf38304
| 3,644,787
|
def is_paths(maybe_paths, marker='*'):
"""
Does given object `maybe_paths` consist of path or path pattern strings?
"""
return ((is_path(maybe_paths) and marker in maybe_paths) or # Path str
(is_path_obj(maybe_paths) and marker in maybe_paths.as_posix()) or
(is_iterable(maybe_paths) and
all(is_path(p) or is_ioinfo(p) for p in maybe_paths)))
|
dc825d7417cb7cb52beaecc4fb6eef333db1514b
| 3,644,788
|
import logging
def output_numpy_or_asa(obj, data, *, output_type=None, labels=None):
"""This function returns a numpy ndarray or nelpy.AnalogSignalArray
Parameters
----------
obj : numpy.ndarray or a nelpy object
data : numpy.ndarray, with shape (n_samples, n_signals)
Data is either passed through as the np.ndarray
or used to form a nelpy object, depending on 'output_type'.
output_type : string, optional
Specifies the object that should be returned.
Default is a numpy np.ndarray
labels : np.adarray of string, optional
Labels that will be attached to the nelpy object, if
that is the desired output type. If the output type is
'numpy', the labels are ignored.
Returns
-------
Output object of the specified type. If a numpy array, it will
have shape (n_samples, n_signals)
"""
if data.size == 0:
logging.warning("Output data is empty")
if not isinstance(data, np.ndarray):
raise TypeError("data must be a numpy ndarray")
if output_type is not None:
if output_type != 'asa':
raise TypeError(("Invalid output type {} specified".
format(output_type)))
if output_type == 'asa':
try:
res = isinstance(obj, nel.RegularlySampledAnalogSignalArray)
if res is False:
raise TypeError("You specified output type {} but the input"
" object was not a nelpy object. Cannot form an"
" ASA around the input object".format(output_type))
# Transpose data since ASAs have shape (n_signals, n_samples)
out = nel.AnalogSignalArray(data.T,
abscissa_vals=obj.abscissa_vals,
fs=obj.fs,
support=obj.support,
labels=labels)
return out
except NameError:
raise ModuleNotFoundError("You must have nelpy installed for"
" output type {}".format(output_type))
return data
|
2e19de7caa58d4be606fa3c2fef623c32a08a201
| 3,644,789
|
def embed_oar(features: Array, action: Array, reward: Array,
num_actions: int) -> Array:
"""Embed each of the (observation, action, reward) inputs & concatenate."""
chex.assert_rank([features, action, reward], [2, 1, 1])
action = jax.nn.one_hot(action, num_classes=num_actions) # [B, A]
reward = jnp.tanh(reward)
while reward.ndim < action.ndim:
reward = jnp.expand_dims(reward, axis=-1)
embedding = jnp.concatenate([features, action, reward], axis=-1) # [B, D+A+1]
return embedding
|
624b1ca67fa031e548411b4c9cfd9f86765cbd7e
| 3,644,790
|
import json
async def kibana(es, params):
"""
Simulates Kibana msearch dashboard queries.
It expects the parameter hash to contain the following keys:
"body" - msearch request body representing the Kibana dashboard in the form of an array of dicts.
"params" - msearch request parameters.
"meta_data" - Dictionary containing meta data information to be carried through into metrics.
"""
request = params["body"]
request_params = params["params"]
meta_data = params["meta_data"]
if meta_data["debug"]:
logger.info("Request:\n=====\n{}\n=====".format(json.dumps(request)))
visualisations = int(len(request) / 2)
response = {}
for key in meta_data.keys():
response[key] = meta_data[key]
response["request_params"] = request_params
response["weight"] = 1
response["unit"] = "ops"
response["visualisation_count"] = visualisations
result = await es.msearch(body=request, params=request_params)
sum_hits = 0
max_took = 0
error_count = 0
error_details = set()
for r in result["responses"]:
if "error" in r:
error_count += 1
extract_error_details(error_details, r)
else:
hits = r.get("hits", {}).get("total", 0)
if isinstance(hits, dict):
sum_hits += hits["value"]
else:
sum_hits += hits
max_took = max(max_took, r["took"])
# use the request's took if possible but approximate it using the maximum of all responses
response["took"] = result.get("took", max_took)
response["hits"] = sum_hits
response["success"] = error_count == 0
response["error-count"] = error_count
if error_count > 0:
response["error-type"] = "kibana"
response["error-description"] = error_description(error_details)
if meta_data["debug"]:
for r in result["responses"]:
# clear hits otherwise we'll spam the log
if "hits" in r and "hits" in r["hits"]:
r["hits"]["hits"] = []
r["aggregations"] = {}
logger.info("Response (excluding specific hits):\n=====\n{}\n=====".format(json.dumps(result)))
return response
|
d07b3278bb7fc62565cce905e575bcc21a54f79a
| 3,644,791
|
import requests
import json
def aggregations_terms(query=None):
"""Get page for aggregations."""
if query is None:
# Default query
query = "state,config.instance_type"
# Remove all white spaces from the str
query = query.replace(" ", "")
data = {"query": query}
end_point = "aggregations/terms"
url = SOUNDWAVE_API + end_point
response = requests.post(url, json=data)
if response.status_code == 200 or response.status_code == 304:
json_data = json_loads_byteified(response.text)
return render_template(
"aggregations.html",
data=json.dumps(json_data),
query=query)
elif response.status_code == 404 or response.status_code == 400:
logger.warn("Data not found in soundwave elastic search store. API returned 404")
return render_template("404.html")
elif response.status_code == 500:
logger.warn("soundwave api returned 500 status code. Internal Server error")
return render_template("500.html")
|
c69308a007ec4b366129de3c3aa277c96fda2edd
| 3,644,792
|
import tqdm
import torch
def validate_base(model,
args,
loader,
loadername,
train=True):
"""
The validation function. Validates the ELBO + MIL, ELBO, and the accuracy
of the given [training, validation or test] loader.
Returns
-------
loss: list
Either the ELBO (from base VAE) or the accuracy rate (from the base MIL).
"""
# Model: validate
model.eval()
# Declare loss tracker
loss_val = 0.
# Initialize the number of points
N = 0
# Loop through the data
for data, label in tqdm(loader, desc=f' Validation[{loadername}]'):
# Convert the data to cuda if available
data = data.to(device=args.device).squeeze(0)
# Update the N
N += data.shape[0]
# If args.mode is 'base_att'
if args.model == 'base_att':
# Convert the label to cuda if available
label = label[0].to(device=args.device)
# Calculate the objective for the Attention MIL
# (name kept the same not to duplicate the code blocks)
elbo_u_sum = model.calculate_classification_error(data, label)[0]
# Otherwise
else:
# Calculate ELBO for unlabeled data
elbo_u_sum = model(data)
# Track elbo results together [sum]
loss_val += elbo_u_sum.item()
if args.test_mode:
break
# If the mode is base_att
if args.model == 'base_att':
# Divide the accuracy by the length of the loader
loss_val = loss_val / len(loader)
# Trace
print(f' [Valid {loadername}]\t accuracy: {loss_val: .2f}')
# If the loader is not the training loader
if not train:
# If the validation accuracy is higher than the previous one
if loss_val >= args.prev_val:
# Save the model
torch.save(model.state_dict(),
f'{args.MODELPATH}/{args.mode}_E{args.epoch}.pt')
# Update the accuracy value
args.prev_val = loss_val
# If the mode is base
elif args.model == 'base':
# Divide the loss by the number of points
loss_val = loss_val / N
# Trace
print(f' [Valid {loadername}]\t elbo: {loss_val: .2f}')
# If the loader is not the training loader
if not train:
# If the validation loss is lower than the previous one
if loss_val <= args.prev_val:
# Save the model
torch.save(model.state_dict(),
f'{args.MODELPATH}/{args.mode}_E{args.epoch}.pt')
# Update the accuracy value
args.prev_val = loss_val
# Return validation records
return loss_val
|
85c9558bd484190eb61599509b9c9ec9f4a5cc0a
| 3,644,793
|
def parse_person(person):
"""
https://doc.rust-lang.org/cargo/reference/manifest.html#the-authors-field-optional
A "person" is an object with an optional "name" or "email" field.
A person can be in the form:
"author": "Isaac Z. Schlueter <i@izs.me>"
For example:
>>> p = parse_person('Barney Rubble <b@rubble.com>')
>>> assert p == ('Barney Rubble', 'b@rubble.com')
>>> p = parse_person('Barney Rubble')
>>> assert p == ('Barney Rubble', None)
>>> p = parse_person('<b@rubble.com>')
>>> assert p == (None, 'b@rubble.com')
"""
parsed = person_parser(person)
if not parsed:
name = None
parsed = person_parser_no_name(person)
else:
name = parsed.group('name')
email = parsed.group('email')
if name:
name = name.strip()
if email:
email = email.strip('<> ')
return name, email
|
3fe30bf85f3ba4877b2924c5b5778d5a5205b6ee
| 3,644,794
|
def _glasstone_surface_cf(y):
"""Correction factor provided by TEoNW for contact surface bursts (p. 335)."""
return np.interp(y, [1.0, 50.0, 100.0, 300.0, 700.0, 2000.0, 5000.0, 5000.0], [0.6666666666666666, 0.6666666666666666, 1.0, 1.25, 1.5, 2.0, 3.0, 3.0])
|
8ac3b0273e8c8fe218d15a2b89aad994a7413d68
| 3,644,795
|
import torch
def create_eval_fn(task_id, calculate_gradient=False):
"""Creates an evaluation function for a given task. Returns an evaluation
function that takes in a model, dataloader, and device, and evaluates the
model on the data from the dataloader. Returns a dictionary with mean
"loss" and "accuracy". If calculate_gradient is True, dictionary will also
contain gradients for the model wrt the loss on the data.
Args:
task_id: Task id corresponding to the data that will be evaluated.
calculate_gradient: Whether gradient should be calculated.
"""
def eval_fn(model, dataloader, device):
model.eval()
total_loss = 0
loss_fn = torch.nn.CrossEntropyLoss(reduction="sum").to(device=device)
num_correct = 0
model.zero_grad()
torch.set_grad_enabled(calculate_gradient)
for X, y in iter(dataloader):
X = X.to(device=device)
y = y.to(device=device)
output = model(X, task_id)
preds = torch.argmax(output, dim=1)
num_correct += (preds == y).sum().item()
loss = loss_fn(output, y) / len(dataloader.dataset)
if calculate_gradient:
loss.backward()
total_loss += loss.item()
accuracy = num_correct / len(dataloader.dataset)
metrics = {"loss": total_loss, "accuracy": accuracy}
if calculate_gradient:
gradients = flatten_gradients(model)
metrics["gradients"] = gradients
return metrics
return eval_fn
|
ac0a7107f695170f2fa6c65dfeae63056b53452d
| 3,644,796
|
import os
def get_dashboard_oauth_client_id():
"""Gets the client ID used to authenticate with Identity-Aware Proxy
from the environment variable DASHBOARD_OAUTH_CLIENT_ID."""
return os.environ.get('DASHBOARD_OAUTH_CLIENT_ID')
|
37a0ba23dab00d2e43fc1cbd124713d2231d91a9
| 3,644,797
|
def naming_style(f):
"""Decorator for name utility functions.
Wraps a name utility function in a function that takes one or more names,
splits them into a list of words, and passes the list to the utility function.
"""
def inner(name_or_names):
names = name_or_names if isinstance(name_or_names, list) else [name_or_names]
words = []
for name in names:
words.extend(split_name(name))
return f(words)
return inner
|
bbcb1b0b06bbb7a24abe60131a9a5ba525ed01db
| 3,644,798
|
import argparse
def parse_args(argv, app_name):
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description=app_name)
parser.add_argument(
"-c",
"--config",
dest="config",
type=str,
default="config.yaml",
help="Set config.yaml file",
)
parser.add_argument(
"-s", "--service", dest="service", action="store_true", help="Run as service"
)
parser.add_argument(
"-d", "--debug", dest="debug", action="store_true", help="Turn on DEBUG logging"
)
parser.set_defaults(service=False)
parser.set_defaults(debug=False)
return parser.parse_args(argv)
|
5befd81f895ee8d15b9972587a980bbdba51085e
| 3,644,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.