content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import logging
import os
def _CalculatePerDirectoryCoverageSummary(per_file_coverage_summary):
"""Calculates per directory coverage summary.
Args:
per_file_coverage_summary: A dictionary from file path to coverage summary.
Returns:
A dictionary from directory path to coverage summary.
"""
logging.debug('Calculating per-directory coverage summary.')
per_directory_coverage_summary = defaultdict(lambda: _CoverageSummary())
for file_path in per_file_coverage_summary:
summary = per_file_coverage_summary[file_path]
parent_dir = os.path.dirname(file_path)
while True:
per_directory_coverage_summary[parent_dir].AddSummary(summary)
if parent_dir == SRC_ROOT_DIR:
break
parent_dir = os.path.dirname(parent_dir)
logging.debug('Finished calculating per-directory coverage summary.')
return per_directory_coverage_summary | eacf0ba87d8facfcff1d1b6c5f1bb87052053409 | 3,637,700 |
def is_aix():
"""
Simple function to return if host is AIX or not
"""
return salt.utils.platform.is_aix() | e4be83dfefc2a7ce5d97894b7a882808658d470a | 3,637,701 |
def draw_support_spring(
fig,
support,
orientation="up",
color='orange',
show_values=True,
row=None,
col=None,
units="N/m"):
"""Draw an anchored spring shape on a plotly figure.
Parameters
----------
fig : plotly figure
plotly figure to append roller shape to.
support : Support instance
support to be represented on figure
orientation : 'up' or 'right, optional
direction that the arrow faces, by default "up"
color : str, optional
color of spring, by default 'orange'.
show_values: bool,optional
If true annotates numerical force value next to arrow, by default True.
row : int or None,
Row of subplot to draw line on. If None specified assumes a full plot,
by default None.
col : int or None,
Column of subplot to draw line on. If None specified assumes a full
plot, by default None.
units: str,
The units suffix drawn with the stiffness value. Default is 'N/m'.
Returns
-------
plotly figure
Returns the plotly figure passed into function with the spring shape
appended to it."""
x_sup = support._position
# x0 and y0 initialised so that when loop through each point in the coords
# list will have two points to reference.
x0, y0 = 0, 0
# reduction of 0.8 used on coords specified (simple reduction modification)
reduce = 0.8
if orientation in ['up', 'right']:
# coords are points between lines to be created
# label and stiffness are defined for use as meta data to be added to
# the hovertemplate
if orientation == 'right':
coords = [(5, 0), (7, 5), (12, -5), (14, 0), (19, 0)]
stiffness = support._stiffness[0]
else:
coords = [(0, 5), (-5, 7), (5, 12), (0, 14), (0, 19)]
stiffness = support._stiffness[1]
# x1 and y1 are the ends of the line to be created
for x1, y1 in coords:
x1, y1 = x1 * reduce, y1 * reduce
# Create dictionary for line shape object. Note: multiple lines
# added but reference must always be to the same xanchor
shape = dict(
type="line",
xref="x", yref="y",
x0=x0, y0=y0, x1=x1, y1=y1,
line_color=color,
line_width=2,
xsizemode='pixel',
ysizemode='pixel',
xanchor=x_sup,
yanchor=0
)
# Append line to plot or subplot
if row and col:
fig.add_shape(shape, row=row, col=col)
else:
fig.add_shape(shape)
# set end point to be start point for the next line
x0, y0 = x1, y1
if show_values:
y0 = max(y0, 7)
annotation = dict(
xref="x", yref="y",
x=x_sup,
y=0,
yshift=y0 * 1.5,
xshift=x0 * 2,
text=f"{stiffness:.3f} {units}",
font_color=color,
showarrow=False,
)
# Append shape to plot or subplot
if row and col:
fig.add_annotation(annotation, row=row, col=col)
else:
fig.add_annotation(annotation)
return fig | 73c546289ac02d9021375f553504991bdaa4ca89 | 3,637,702 |
def _collins_crt(r, R, P, p, K):
"""Wrapper of CRT for Collins's resultant algorithm. """
return gf_int(gf_crt([r, R], [P, p], K), P*p) | d84f5ad514872acacc5f7ef626cb05f5df7771f3 | 3,637,703 |
def quantity_remover(my_thing):
"""
removes pint quantities to make json output happy
Parameters
----------
my_thing
Returns
-------
"""
if hasattr(my_thing, 'magnitude'):
return 'QUANTITY', my_thing.magnitude, my_thing.units.format_babel()
elif isinstance(my_thing, dict):
newdict = dict()
for key, item in my_thing.items():
newdict[key] = quantity_remover(item)
return newdict
elif hasattr(my_thing, '__iter__') and not isinstance(my_thing, str):
my_type = type(my_thing)
return my_type([quantity_remover(item) for item in my_thing])
else:
return my_thing | 54b2db5b638f297ca503513f79eb4eec4ac2afa2 | 3,637,704 |
def sliced_wasserstein(PD1, PD2, M=50):
""" Implementation of Sliced Wasserstein distance as described in
Sliced Wasserstein Kernel for Persistence Diagrams by Mathieu Carriere, Marco Cuturi, Steve Oudot (https://arxiv.org/abs/1706.03358)
Parameters
-----------
PD1: np.array size (m,2)
Persistence diagram
PD2: np.array size (n,2)
Persistence diagram
M: int, default is 50
Iterations to run approximation.
Returns
--------
sw: float
Sliced Wasserstein distance between PD1 and PD2
"""
diag_theta = np.array(
[np.cos(0.25 * np.pi), np.sin(0.25 * np.pi)], dtype=np.float32
)
l_theta1 = [np.dot(diag_theta, x) for x in PD1]
l_theta2 = [np.dot(diag_theta, x) for x in PD2]
if (len(l_theta1) != PD1.shape[0]) or (len(l_theta2) != PD2.shape[0]):
raise ValueError("The projected points and origin do not match")
PD_delta1 = [[np.sqrt(x ** 2 / 2.0)] * 2 for x in l_theta1]
PD_delta2 = [[np.sqrt(x ** 2 / 2.0)] * 2 for x in l_theta2]
# i have the input now to compute the sw
sw = 0
theta = 0.5
step = 1.0 / M
for i in range(M):
l_theta = np.array(
[np.cos(theta * np.pi), np.sin(theta * np.pi)], dtype=np.float32
)
V1 = [np.dot(l_theta, x) for x in PD1] + [np.dot(l_theta, x) for x in PD_delta2]
V2 = [np.dot(l_theta, x) for x in PD2] + [np.dot(l_theta, x) for x in PD_delta1]
sw += step * cityblock(sorted(V1), sorted(V2))
theta += step
return sw | c8de271435b9b393f7230c13f6eb746e3d566828 | 3,637,705 |
def update_handler(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <https://flask.palletsprojects.com/en/1.1.x/api/#flask.Flask.make_response>`.
"""
print(request)
request_json = request.get_json()
content_type = request.headers['content-type']
if content_type == 'application/json':
request_json = request.get_json(silent=True)
if request_json and 'name' in request_json:
name = request_json['name']
else:
raise ValueError("JSON is invalid, or missing a 'name' property")
elif content_type == 'application/octet-stream':
name = request.data
elif content_type == 'text/plain':
name = request.data
elif content_type == 'application/x-www-form-urlencoded':
name = request.form.get('name')
else:
raise ValueError("Unknown content type: {}".format(content_type))
return 'Hello {}!'.format(escape(name))
# if request.args and 'message' in request.args:
# return request.args.get('message')
# elif request_json and 'message' in request_json:
# return request_json['message']
# else:
# return f'Hello World xxx!' | 5fa052ddbd9e4016645e0ee81be1d8aeaaca7531 | 3,637,706 |
def usercourse(request, course_code):
"""
The function is use for course content
"""
user = request.user
extrainfo = ExtraInfo.objects.select_related().get(user=user) # get the type of user
courseid = Courses.objects.select_related().get(code=course_code)
classes = OnlineClasses.objects.select_related().filter(course_id=courseid.id)
if extrainfo.user_type == 'faculty':
if request.method == 'POST':
if 'submiturl' in request.POST:
topic = request.POST.get('topicName')
class_date = request.POST.get('date')
start_time = request.POST.get('StartTime')
end_time = request.POST.get('EndTime')
upload_url = request.POST.get('ClassURL')
OnlineClasses.objects.create(course_id = courseid,
class_date=class_date,
start_time=start_time,
end_time=end_time,
description=topic,
upload_url=upload_url
)
if 'deleteurl' in request.POST:
classid = request.POST.get('delete-id')
OnlineClasses.objects.get(id=classid).delete()
return render(request, "online_cms/course_new.html", {'classes': classes, 'extrainfo': extrainfo}) | ab5519f211e4c6e2574536f1a1c5a781d3529e7d | 3,637,707 |
def add_device(config_id, name, device_type_id, device_subtype_id, ip4address, ip6address, properties):
"""Add device to BAM."""
response = get_api()._api_client.service.addDevice(config_id, name, device_type_id, device_subtype_id, ip4address,
ip6address, properties)
return get_api().get_entity_by_id(response) | 84c66b5ab3951b764a8669bb49438eb6101e1355 | 3,637,708 |
def cdf_approx(X): #, smoothness_factor=1):
"""
Generates a ppoly spline to approximate the cdf of a random variable,
from a 1-D array of i.i.d. samples thereof.
Args:
X: a collection of i.i.d. samples from a random variable.
args, kwargs: any options to forward to the cvxopt qp solver
Returns:
scipy.interpolate.PPoly object, estimating the cdf of the random variable.
Raises:
TODO
"""
# Pre-format input as ordered numpy array
X = np.asarray(X)
diff_X = np.diff(X)
if not (diff_X > 0).all():
X.sort()
diff_X = np.diff(X)
assert(diff_X.all()) # avoids case of duplicate X-values
n = len(X)
scale_axi, scale_ei = make_obj_scale(X)#, smoothness_factor)
P, q = make_P_q(X, scale_a=scale_axi, scale_e=scale_ei)
G, h = make_G_h(X)
#A, b = make_A_b(X) # simply unnecessary
bmid_c_init = bmid_c_init_state(X)
qp_res = cvxopt.solvers.qp(
cvxopt.matrix(P),
cvxopt.matrix(q),
cvxopt.matrix(G),
cvxopt.matrix(h),
#cvxopt.matrix(A),
#cvxopt.matrix(b),
#*args, **kwargs
)
X, P_X, dP_X, d2P_X = clean_optimizer_results(np.array(qp_res['x']), X)
return PPoly.construct_fast(np.stack((d2P_X, dP_X, P_X)), X, extrapolate=True) | 6a9cdc8ef3b8413ea10484ace49c7514241d897d | 3,637,709 |
def cleanse_param_name(name):
"""Converts Chainer parameter names to ONNX names.
Note ONNX identifiers must be a valid C identifier.
Args:
name (str): A Chainer parameter name (e.g., /l/W).
Returns
A valid ONNX name (e.g., param_l_W).
"""
return 'param' + name.replace('/', '_') | 9b7774aabeeab322f53321b91195333359c8ee7b | 3,637,710 |
def calc_checksum_for_ip_change(old_ip_packet, new_ip_packet, old_checksum, is_ipv6=False):
""" ip地址改变之后重新获取校检码
:param old_ip_packet:
:param new_ip_packet:
:param old_checksum:
:param is_ipv6:是否是ipv6
:return:
"""
final_checksum = old_checksum
a = 0
b = 1
# tmpcsum = old_checksum
if is_ipv6:
n = 8
else:
n = 2
i = 0
while i < n:
old_field = (old_ip_packet[a] << 8) | old_ip_packet[b]
new_field = (new_ip_packet[a] << 8) | new_ip_packet[b]
# final_checksum = checksum.calc_incre_checksum(final_checksum, old_field, new_field)
final_checksum = fn_utils.calc_incre_csum(final_checksum, old_field, new_field)
a = a + 2
b = b + 2
i += 1
return final_checksum | 7bcc7d96b6b8eef9c1ef93ca922ec192194785ff | 3,637,711 |
def get_sender_password():
"""Get sender password
"""
try:
return Setting.objects.get(slug=KEY_SENDER_PASSWORD)
except Setting.DoesNotExist:
return None | 80e0c0843b02f7a27d62727fc6b104a566cc7442 | 3,637,712 |
def standardize_data(df):
"""Standardizes the data by cleaning string values and standardizing column
names.
df: Pandas dataframe to standardize.
"""
# Clean string values in the dataframe.
df = df.applymap(
lambda x: x.replace('"', '').strip() if isinstance(x, str) else x)
# Standardize column names.
df = df.rename(columns=COL_NAME_MAPPING)
# Add race metadata columns.
if std_col.RACE_CATEGORY_ID_COL in df.columns:
std_col.add_race_columns_from_category_id(df)
return df | 68a00c00003206e1875ca166de02336fb845fce3 | 3,637,713 |
def MatrixExp6(se3mat):
"""Computes the matrix exponential of an se3 representation of
exponential coordinates
:param se3mat: A matrix in se3
:return: The matrix exponential of se3mat
Example Input:
se3mat = np.array([[0, 0, 0, 0],
[0, 0, -1.57079632, 2.35619449],
[0, 1.57079632, 0, 2.35619449],
[0, 0, 0, 0]])
Output:
np.array([[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, -1.0, 0.0],
[0.0, 1.0, 0.0, 3.0],
[ 0, 0, 0, 1]])
"""
omgtheta = so3ToVec(se3mat[0: 3, 0: 3])
if omgtheta.norm() == 0:
return (eye(3).row_join(se3mat[0: 3, 3])).col_join(Matrix([[0, 0, 0, 1]]))
else:
theta = AxisAng3(omgtheta)[1]
omgmat = se3mat[0: 3, 0: 3] / theta
R = MatrixExp3(se3mat[0: 3, 0: 3])
p = (eye(3) * theta + (1 - cos(theta)) * omgmat + (theta - sin(theta)) \
* omgmat*omgmat) * se3mat[0: 3, 3] / theta
T = (R.row_join(p)).col_join(Matrix(1,4,[0,0,0,1]))
return T | 5fb0c8ec0a43410c8bb85e98b3edbd8ab23efea0 | 3,637,714 |
import re
def parse_log(content, arg_parser=json_arg_parser):
""" Parse important information from log files.
These log files are small so we are making the logic a little simpler by loading all
the content into memory at once rather than using an iostream.
Args:
content (string): the string content of the file
Returns:
args (dict<string, value>): a dictionary of function arguments of the program that
created the log and the value those arguments were set to.
history (list<(int, float)>): a list of tuples of time (in epoch index) and corresponding
classification loss
runtime (float): runtime of program in seconds
"""
lines = content.split('\n')
# Part 1: parse arguments
arg_pair_lists = exception_safe_map(arg_parser, lines[:20], exception=NotArgLineException)
args = dict(chain.from_iterable(arg_pair_lists))
# parse CV
for l in lines[:10]:
m = re.match(r'subjects (\d+) are held out', l)
if m:
args['held_out'] = m.group(1)
# Part 2: parse history
history_matches = imap(
lambda l: re.match(r'epoch (\d+), validation accuracy (.*)%', l),
lines)
history_matches = compress(*tee(history_matches, 2)) # filter out the 'Nones'
history = [(int(h.group(1)), float(h.group(2))) for h in history_matches]
# Part 3: parse run time
runtime = None
for l in lines[-3:]:
m = re.match(r'Code ran for ran for (.+)m', l)
if m:
runtime = float(m.group(1))
break
if runtime is None or len(history) == 0 or len(args) == 0:
raise BadLogFileException('file was not formatted properly')
return args, history, runtime | 88659c548cdd95bd152ee0a829486301b42956c4 | 3,637,715 |
def _ohlc_dict(df_or_figure, open='', high='', low='', close='', volume='',
validate='', **kwargs):
"""
Returns a dictionary with the actual column names that
correspond to each of the OHLCV values.
df_or_figure : DataFrame or Figure
open : string
Column name to be used for OPEN values
high : string
Column name to be used for HIGH values
low : string
Column name to be used for LOW values
close : string
Column name to be used for CLOSE values
volume : string
Column name to be used for VOLUME values
validate : string
Validates that the stated column exists
Example:
validate='ohv' | Will ensure Open, High
and close values exist.
"""
c_dir = {}
ohlcv = ['open', 'high', 'low', 'close', 'volume']
if type(df_or_figure) == pd.DataFrame:
cnames = df_or_figure.columns
elif type(df_or_figure) == Figure or type(df_or_figure) == dict:
cnames = df_or_figure.axis['ref'].keys()
elif type(df_or_figure) == pd.Series:
cnames = [df_or_figure.name]
c_min = dict([(v.lower(), v) for v in cnames])
for _ in ohlcv:
if _ in c_min.keys():
c_dir[_] = c_min[_]
else:
for c in cnames:
if _ in c.lower():
c_dir[_] = c
if open:
c_dir['open'] = open
if high:
c_dir['high'] = high
if low:
c_dir['low'] = low
if close:
c_dir['close'] = close
if volume:
c_dir['volume'] = volume
for v in list(c_dir.values()):
if v not in cnames:
raise StudyError('{0} is not a valid column name'.format(v))
if validate:
errs = []
val = validate.lower()
s_names = dict([(_[0], _) for _ in ohlcv])
cols = [_[0] for _ in c_dir.keys()]
for _ in val:
if _ not in cols:
errs.append(s_names[_])
if errs:
raise StudyError('Missing Columns: {0}'.format(', '.join(errs)))
return c_dir | e6512a307217cb79b56942aa8c469a56d3cac8fc | 3,637,716 |
def _s_to_b(value):
"""[string to binary single value]"""
try:
return bytes(value, 'utf-8')
except:
return value | bbabffa2fbd2ec62778a19c8ab3e1fe410b4640f | 3,637,717 |
def get_or_create(
*, db_session, email: str, incident: Incident = None, **kwargs
) -> IndividualContact:
"""Gets or creates an individual."""
# we fetch the individual contact from the database
individual_contact = get_by_email_and_project(
db_session=db_session, email=email, project_id=incident.project.id
)
# we try to fetch the individual's contact information using the contact plugin
contact_plugin = plugin_service.get_active_instance(
db_session=db_session, project_id=incident.project.id, plugin_type="contact"
)
individual_info = {}
if contact_plugin:
individual_info = contact_plugin.instance.get(email, db_session=db_session)
kwargs["email"] = individual_info.get("email", email)
kwargs["name"] = individual_info.get("fullname", "Unknown")
kwargs["weblink"] = individual_info.get("weblink", "")
if not individual_contact:
# we create a new contact
individual_contact_in = IndividualContactCreate(**kwargs, project=incident.project)
individual_contact = create(
db_session=db_session, individual_contact_in=individual_contact_in
)
else:
# we update the existing contact
individual_contact_in = IndividualContactUpdate(**kwargs, project=incident.project)
individual_contact = update(
db_session=db_session,
individual_contact=individual_contact,
individual_contact_in=individual_contact_in,
)
return individual_contact | eacc9c048551f430927bdfe8c67a1af5209c0b18 | 3,637,718 |
def stats(request):
"""Return stats as JSON according to different GET query parameters."""
offset = request.GET.get('offset', '0')
limit = request.GET.get('limit', '10')
order_by = request.GET.get('order_by', 'public_backlinks')
return build_stats(offset, limit, order_by) | d252e8654a4a70f4de56e937b14cc449b6e477b6 | 3,637,719 |
def news():
"""
Return the latest version of the news json
"""
# TODO: add options to request like request.args.get('from', default='')
latest_news = get_latest_news(local=CONFIG['PARAMS']['local'] == 'True')
response = app.response_class(response=latest_news, status=200)
return response | ce6230bc3b11e99dcfab8883f77cb0345af4c64c | 3,637,720 |
def zero_crossing(arr, rank=1):
"""Calculates the zero crossing rate"""
if rank == 1:
nzc = tf.cast(tf.count_nonzero(tf_diff_axis(tf.sign(arr))), tf.float32)
else:
nzc = tf.cast(tf.count_nonzero(tf_diff_axis(tf.sign(arr)), axis=rank - 1), tf.float32)
arrlen = tf.cast(arr.shape[rank - 1], tf.float32)
return tf.divide(nzc, arrlen, name='zcr') | c7a6271d1cbf299278a06845e753d0e431716df8 | 3,637,721 |
def normalize_command(command):
"""Convert `command` to the string representation.
"""
if isinstance(command, list):
if len(command) == 1:
# This is either a quoted compound shell command or a simple
# one-item command. Pass it as is.
command = command[0]
else:
command = " ".join(shlex_quote(c) for c in command)
return command | 700559f7b96ba4ea37f639fdc438db5c2ad70c29 | 3,637,722 |
def make_struct(*args, **kwargs):
"""Create a Struct class according to the given format"""
exec _structdef(*args, **kwargs)
return Struct | 2fece3443e516019492af454f3f4b99bba2bd481 | 3,637,723 |
def link_iterable_by_fields(unlinked, other=None, fields=None, kind=None,
internal=False, relink=False):
"""Generic function to link objects in ``unlinked`` to objects in ``other`` using fields ``fields``.
The database to be linked must have uniqueness for each object for the given ``fields``.
If ``kind``, limit objects in ``unlinked`` of type ``kind``.
If ``relink``, link to objects which already have an ``input``. Otherwise, skip already linked objects.
If ``internal``, linked ``unlinked`` to other objects in ``unlinked``. Each object must have the attributes ``database`` and ``code``."""
if kind:
kind = {kind} if isinstance(kind, str) else kind
if relink:
filter_func = lambda x: x.get('type') in kind
else:
filter_func = lambda x: x.get('type') in kind and not x.get('input')
else:
if relink:
filter_func = lambda x: True
else:
filter_func = lambda x: not x.get('input')
if internal:
other = unlinked
duplicates, candidates = {}, {}
try:
# Other can be a generator, so a bit convoluted
for ds in other:
key = activity_hash(ds, fields)
if key in candidates:
duplicates.setdefault(key, []).append(ds)
else:
candidates[key] = (ds['database'], ds['code'])
except KeyError:
raise StrategyError("Not all datasets in database to be linked have "
"``database`` or ``code`` attributes")
for container in unlinked:
for obj in filter(filter_func, container.get('exchanges', [])):
key = activity_hash(obj, fields)
if key in duplicates:
raise StrategyError(format_nonunique_key_error(obj, fields, duplicates[key]))
elif key in candidates:
obj['input'] = candidates[key]
return unlinked | 08abab5fd1db346e2fedfc9e7a9ad7542e6424a7 | 3,637,724 |
def is_conn() -> bool:
"""是否连接核心网"""
return param.parent.ia != utz.IA_INVALID and param.parent.is_conn | 8e3b06d49473caf43bf97fb133aec49907535777 | 3,637,725 |
def GetConstants():
"""Returns a list of all available constant values used by some Nexpose Criteria"""
return _get_filtered_classes(NexposeCriteriaConstant) | 24be59ec50dada727efdb394c247435111ab4b5f | 3,637,726 |
import json
def getEmpiresForUser(user_email):
"""Fetches empires for the given user.
Even though the empires should be in the data store already, we force fetch them from the server. This is
because it could be a new user and it hasn't synced yet, but also this provides a way for the user to force
their empire to update after changing names or shield (otherwise, they'd have to wait for ~3 hours when the
cron job runs)."""
keyname = 'profile:empires-for-user:'+user_email
empires = memcache.get(keyname)
if not empires:
# we fire off an HTTP request to each of the realms to get empire details about this email address
urls = {}
for realm_name,base_url in REALMS.items():
urls[realm_name] = base_url+'empires/search?email=' + user_email
# make simultaneous calls to all the URLs
rpcs = {}
for realm_name,url in urls.items():
rpc = urlfetch.create_rpc()
urlfetch.make_fetch_call(rpc, url, headers = {'Accept': 'text/json'})
rpcs[realm_name] = rpc
empires = {}
for realm_name, rpc in rpcs.items():
result = rpc.get_result()
if result.status_code == 200:
empire = json.loads(result.content)
if empire:
empire = empire["empires"][0]
empires[realm_name] = empire
# while we're here, save it to the data store
model.profile.Empire.Save(realm_name, empire)
memcache.set(keyname, empires, time=3600)
return empires | 9d2e460c726a36b8071cdf6ea2ebeb8b36e468bc | 3,637,727 |
def netflix(es, ps, e0, l=0.0001):
"""Combine predictions with the optimal weights to minimize RMSE.
Ref: Töscher, A., Jahrer, M., & Bell, R. M. (2009). The bigchaos solution to the netflix grand prize.
Args:
es (list of float): RMSEs of predictions
ps (list of np.array): predictions
e0 (float): RMSE of all zero prediction
l (float): lambda as in the ridge regression
Returns:
(tuple):
- (np.array): ensemble predictions
- (np.array): weights for input predictions
"""
m = len(es)
n = len(ps[0])
X = np.stack(ps).T
pTy = 0.5 * (n * e0 ** 2 + (X ** 2).sum(axis=0) - n * np.array(es) ** 2)
w = np.linalg.pinv(X.T.dot(X) + l * n * np.eye(m)).dot(pTy)
return X.dot(w), w | 359ca02bb6c7f9a3d4d25fe2b41a4bcac5fd086f | 3,637,728 |
def create_list(list_data, user_id, status=200):
"""Create a new todo list throught the API"""
res = app.post_json('/v1/users/{user_id}/lists'.format(user_id=user_id),
list_data,
status=status,
expect_errors=status != 200)
return res | f6fa4c0e523b0c1187e927cdab0292037b0cecdb | 3,637,729 |
import fastapi
async def create_movie(
*,
session: aio_session.AsyncSession = fastapi.Depends(
dependencies.get_session),
movie_in: movie_model.MovieCreate,
current_patron: patron_model.Patron = fastapi.Depends( # pylint: disable=unused-argument
dependencies.get_current_active_patron),
) -> movie_model.Movie:
"""Creates a new movie."""
movie_db = await movie_crud.MovieCRUD.get_by_title(session,
movie_in.title_en)
if movie_db:
raise fastapi.HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail="An movie with this title already exists in the system.",
)
if current_patron.id != movie_in.proposed_by:
raise fastapi.HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="https://www.youtube.com/watch?v=Z4oDZCJMDeY")
movie = await movie_crud.MovieCRUD.create(session, model_in=movie_in)
return movie | 88c1acca8980788031e9a64d22dd2ca0e629cc5c | 3,637,730 |
import time
import math
def project_gdf(gdf, to_crs=None, to_latlong=False, verbose=False):
"""
https://github.com/gboeing/osmnx/blob/v0.9/osmnx/projection.py#L58
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid.
The simple calculation in this function works well for most latitudes, but
won't work for some far northern locations like Svalbard and parts of far
northern Norway.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected
to_crs : dict
if not None, just project to this CRS instead of to UTM
to_latlong : bool
if True, projects to latlong instead of to UTM
Returns
-------
GeoDataFrame
"""
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
# if gdf has no gdf_name attribute, create one now
if not hasattr(gdf, 'gdf_name'):
gdf.gdf_name = 'unnamed'
# if to_crs was passed-in, use this value to project the gdf
if to_crs is not None:
projected_gdf = gdf.to_crs(to_crs)
# if to_crs was not passed-in, calculate the centroid of the geometry to
# determine UTM zone
else:
if to_latlong:
# if to_latlong is True, project the gdf to latlong
latlong_crs = default_crs
projected_gdf = gdf.to_crs(latlong_crs)
if verbose:
print('Projected the GeoDataFrame "{}" to default_crs in {:,.2f} seconds'.format(gdf.gdf_name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, just return it
# if (gdf.crs is not None) and ('proj' in gdf.crs) and (gdf.crs['proj'] == 'utm'):
if gdf.crs.is_projected and gdf.crs.coordinate_operation.name.upper().startswith('UTM'):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the UTM
# CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {'datum': 'WGS84',
'ellps': 'WGS84',
'proj' : 'utm',
'zone' : utm_zone,
'units': 'm'}
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
if verbose:
print('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} seconds'.format(gdf.gdf_name, utm_zone, time.time()-start_time))
projected_gdf.gdf_name = gdf.gdf_name
return projected_gdf | aed2c42282301d2623c92dd1516f99d953afc1c2 | 3,637,731 |
from .. import __version__
from ..importer import IMPORTED
from .driver import schema_all_drivers
from .executor import schema_all_executors
from .flow import schema_flow
from .meta import schema_metas
from .request import schema_requests
from .pod import schema_pod
def get_full_schema() -> dict:
"""
Return the full schema for Jina core as a dict.
"""
definitions = {}
for s in [
schema_all_drivers,
schema_all_executors,
schema_flow,
schema_metas,
schema_requests,
schema_pod,
IMPORTED.schema_executors,
IMPORTED.schema_drivers
]:
definitions.update(s)
# fix CompoundExecutor
definitions['Jina::Executors::CompoundExecutor']['properties']['components'] = {
'$ref': '#/definitions/Jina::Executors::All'
}
return {
'$id': f'https://api.jina.ai/schemas/{__version__}.json',
'$schema': 'http://json-schema.org/draft-07/schema#',
'description': 'The YAML schema of Jina objects (Flow, Executor, Drivers).',
'type': 'object',
'oneOf':
[{'$ref': '#/definitions/Jina::Flow'}] +
[{"$ref": f"#/definitions/{k}"} for k in IMPORTED.schema_executors.keys()],
'definitions': definitions
} | db31d02fc1ef7ef3ed19cefffc3dcd0cdfdbb237 | 3,637,732 |
def power_method(A, x0, n_iter=1):
"""Compute the first singular components by power method."""
for i in range(n_iter):
x0 = A.T @ A @ x0
v = x0 / norm(x0)
s = norm(A @ v)
u = A @ v / s
return u, s, v | 7efc860520535aab42aeda24e15e4d4f5c340901 | 3,637,733 |
def convert_coevalcube_to_sphere_surface_inpdict(inpdict):
"""
-----------------------------------------------------------------------------
Covert a cosmological coeval cube at a given resolution (in physical comoving
distance) to HEALPIX coordinates of a specified nside covering the whole sky
or coordinates covering a spherical patch. Wrapper for
convert_coevalcube_to_sphere_surface()
Inputs:
inpdict [dictionary] Dictionary of parameters for tiling cosmological
coeval cubes to healpix lightcone cubes. It consists of the
following keys and values:
inpcube [numpy array] Cosmological cube in three dimensions
of comoving distance
inpres [scalar or tuple or list or numpy array] Input cube
pixel resolution (in comoving Mpc). If specified as
scalar, it is applied to all three dimensions.
Otherwise a three-element tuple, list or numpy array
must be specified one for each dimension
nside [scalar] HEALPIX nside parameter for output HEALPIX
map. If set theta_phi will be ignored.
theta_phi [numpy array] nsrc x 2 numpy array of theta and phi
(in degrees) at which the lightcone surface should
be evaluated. One and only one of nside or theta_phi
must be specified.
freq [scalar] Frequency (in Hz) to be processed. One and
only one of inputs freq or z (see below) must be set
in order to determined the redshift at which this
processing is to take place. Redshift is necessary
to determine the cosmology. If set to None, redshift
must be specified (see below)
redshift [scalar] Redshift to be processed. One and only one
of inputs freq (see above) or redshift must be
specified. If set to None, freq must be specified
(see above)
method [string] Method of interpolation from cube to
spherical surface pixels. Accepted values are
'nearest_rounded' (fastest but not accurate), and
those accepted by the input keyword method in
scipy.interpolate.interpn(), namely, 'linear' and
'nearest', and 'splinef2d'. 'splinef2d' is only
supported for 2-dimensional data. Default='linear'
rest_freq [scalar] Rest frame frequency (in Hz) to be used in
determination of redshift. Will be used only if
freq is set and redshift is set to None.
Default=1420405751.77 Hz (the rest frame frequency
of neutral Hydrogen spin flip transition)
cosmo [instance of class astropy.cosmology] Instance of
class astropy.cosmology to determine comoving
distance for a given redshift. By default (None) it
is set to WMAP9
Output:
Stacked lightcone surfaces covering spherical patch (whole sky using HEALPIX
if nside is specified) or just at specified theta and phi coordinates. It is
of shape npix
-----------------------------------------------------------------------------
"""
try:
inpdict
except NameError:
raise NameError('Input inpdict must be provided')
if not isinstance(inpdict, dict):
raise TypeError('Input inpdict must be a dictionary')
for key,val in inpdict.iteritems():
exec(key + '=val')
try:
inpcube, inpres
except NameError:
raise NameError('Inputs inpcube and inpres must be specified in inpdict')
try:
nside
except NameError:
nside = None
try:
theta_phi
except NameError:
theta_phi = None
try:
freq
except NameError:
freq = None
try:
redshift
except NameError:
redshift = None
try:
cosmo
except NameError:
cosmo = None
try:
method
except NameError:
method = 'linear'
try:
rest_freq
except NameError:
rest_freq = CNST.rest_freq_HI
return convert_coevalcube_to_sphere_surface(inpcube, inpres, nside=nside, theta_phi=theta_phi, freq=freq, redshift=redshift, method=method, rest_freq=rest_freq, cosmo=cosmo) | e99f4ca3d6ff1a76ce95c4e929521ccf857148df | 3,637,734 |
def postmsg(message):
"""!Sends the message to the jlogfile logging stream at level INFO.
This is identical to:
@code
jlogger.info(message).
@endcode
@param message the message to log."""
return jlogger.info(message) | b7cad54650fd769ef9c56f8a03e68d0ef9fa485d | 3,637,735 |
def dec_lap_pyr(x, levs):
""" constructs batch of 'levs' level laplacian pyramids from x
Inputs:
x -- BxCxHxW pytorch tensor
levs -- integer number of pyramid levels to construct
Outputs:
pyr -- a list of pytorch tensors, each representing a pyramid level,
pyr[0] contains the finest level, pyr[-1] the coarsest
"""
pyr = []
cur = x # Initialize approx. coefficients with original image
for i in range(levs):
# Construct and store detail coefficients from current approx. coefficients
h = cur.size(2)
w = cur.size(3)
x_small = F.interpolate(cur, (h // 2, w // 2), mode='bilinear')
x_back = F.interpolate(x_small, (h, w), mode='bilinear')
lap = cur - x_back
pyr.append(lap)
# Store new approx. coefficients
cur = x_small
pyr.append(cur)
return pyr | d0b48660b194c71e34e7f838525d0814081939fb | 3,637,736 |
import os
import logging
def subinit2_readPPdb_checkkeys(PATHS, config, metadata):
"""
Reads the power plant database and determines the required input files (fundamentals and parameters):
1) Read the power plant database from disk
2) Read the database and check the required input files for:
- fuels
- efficiency curves
(could add more in the future)
This is done by:
2.1) Collect key requirements from PPdb (e.g. fuels, params, etc.)
2.2) Check if these keys are in metadata.ini
2.3) Use the corresponding instructions in metadata.ini to check if the required input files are in
./Inputs and resources
If 2.2 or 2.3 fails, raise appropriate exception.
"""
# ----------------------------------------------------------------------- 1) Read PPdb from disk
PPdb = pp.GenUnit.set_PPdb(PATHS['PP database'], readprms={key: config['power plant database'][key]
for key in config['power plant database']})
# ----------------------------------------------------------------------- 2.1) Collect the required keys
prmssh = PPdb['params']
# NOTE - When you have more metadata sections, this is where you add them
# note - dropna() here to allow WtEs to have no fuel key
# {metadata section : df[['in metadata', 'file found']]
checkKeys = {
'fuels': pd.DataFrame(index=pd.Index(prmssh['Fuel ID*'].dropna().unique())),
'efficiency curves': pd.DataFrame(index=pd.Index(prmssh['Efficiency Curve*'].dropna().unique())),
}
# df.index = required keys as in sheet (apply key.lower() to check in metadata)
# Prep for 2.3
extract_fname = lambda **args: args['filename'] # Use Python's kwargs parser :)
PATHS_tupkey = {key[1]: key for key in PATHS.keys() if isinstance(key, tuple)}
for mdsection, df in checkKeys.items():
# ------------------------------------------------------------ 2.2) Check if the keys are in metadata.ini
# logical series for filtering items that have to be checked further
df['in metadata'] = pd.Series(index=df.index,
data=(key.lower() in metadata[mdsection] for key in df.index))
sub_idx = df['in metadata'].loc[df['in metadata'] ].index
# ------------------------------------------------------ 2.3) Check if input files are in the project directory
# (only for keys found in metadata)
# 2.3.1) Build the check df's
df['file found'] = pd.Series(index=df.index)
for key in sub_idx:
mdkey = key.lower()
# a) Extract the filename
try:
fname = eval("extract_fname({})".format(metadata[mdsection][mdkey]))
except SyntaxError:
print("SyntaxError encountered while evaluating the metadata['{mdsection}']['{mdkey}'] instructions. "
"Pls. check that the following encoded argument in the metadata file is a valid expression to "
"pass to DataHandler.Metadata(): \n\n '{arg}'\n\n".format(
mdsection=mdsection, mdkey=mdkey, arg=metadata[mdsection][mdkey]))
raise
if fname is None:
raise NotImplementedError("This implies that dh.Metadata() will be called with values passed. Current "
"implementation only expects file reads.")
# b) Get the path
fp = os.path.join(PATHS[PATHS_tupkey.get(mdsection, mdsection)], fname)
# c) Check if exists and assign to series
df.loc[key, 'file found'] = os.path.exists(fp)
# ------------------------------------------------------ 2.3.2) Summarize the results
# Do this by looking for the failed keys
err_msg = "Error in checking the parameter and input keys in the power plant database: \n\n"
# a, b) Not in metadata, In metadata but file not found
Failed_metadata, Failed_file = {}, {}
for mdsection, df in checkKeys.items():
_md = tuple(key for key in df.index if not df.loc[key, 'in metadata'])
_file = tuple(key for key in df.index if not df.loc[key, 'file found'])
if _md: Failed_metadata[mdsection] = _md
if _file: Failed_file[mdsection] = _file
# c) Report
if Failed_metadata:
err_msg += "The ff. keys were not found in the metadata file: \n\n{}\n\n".format(
"\n".join("\t{}: {}".format(mdsection, ", ".join(keys)) for mdsection, keys in Failed_metadata.items()))
if Failed_file:
err_msg += "The ff. keys were not found in the appropriate project input directories: \n\n{}\n\n".format(
"\n".join("\t{}: {}".format(mdsection, ", ".join(keys)) for mdsection, keys in Failed_file.items()))
if Failed_metadata or Failed_file:
logging.debug("\n\n".join("\n{}\n{}".format(key.upper(), val) for key, val in checkKeys.items()))
raise RuntimeError(err_msg)
return PPdb | 67f16f567c04c6765207e452477227e2599ae062 | 3,637,737 |
def mif2amps(sh_mif_file, working_dir, dsi_studio_odf="odf8"):
"""Convert a MRTrix SH mif file to a NiBabel amplitudes image.
Parameters:
===========
sh_mif_file : str
path to the mif file with SH coefficients
"""
verts, _ = get_dsi_studio_ODF_geometry(dsi_studio_odf)
num_dirs, _ = verts.shape
hemisphere = num_dirs // 2
directions = verts[:hemisphere]
x, y, z = directions.T
_, theta, phi = cart2sphere(x, y, -z)
dirs_txt = op.join(working_dir, "directions.txt")
np.savetxt(dirs_txt, np.column_stack([phi, theta]))
odf_amplitudes_nii = op.join(working_dir, "amplitudes.nii")
popen_run(["sh2amp", "-quiet", "-nonnegative", sh_mif_file, dirs_txt, odf_amplitudes_nii])
if not op.exists(odf_amplitudes_nii):
raise FileNotFoundError("Unable to create %s", odf_amplitudes_nii)
amplitudes_img = nb.load(odf_amplitudes_nii)
return amplitudes_img, directions | 2defa9d0656bc6c884e6f0591041efdea743db95 | 3,637,738 |
import struct
import array
def write_nifti_header(hdrname, hdr, newfile=True):
#*************************************************
"""
filename is the name of the nifti header file.
hdr is a header dictionary. Contents of the native header
will be used if it is a nifti header.
Returns: 0 if no error, otherwise 1.
"""
if hdr.has_key('native_header'):
whdr = hdr['native_header']
if whdr.has_key('filetype'):
ftype = whdr['filetype']
else:
ftype = 'unknown'
else:
ftype = 'unknown'
Rout = hdr['R']
# Fix broken headers.
if hdr['mdim'] == 0:
hdr['mdim'] = 1
if hdr['tdim'] == 0:
hdr['tdim'] = 1
if hdr['zdim'] == 0:
hdr['zdim'] = 1
# Insert info for fieldmap correction if available.
modify_nifti_auxfile(hdr)
# Convert to quaternions.
if abs(Rout[:3,:3]).sum() > 0 and Rout[3,3] == 1.:
# This looks like a valid R matrix.
x = rot44_to_quatern(Rout)
else:
x = None
if isinstance(x, tuple):
qa, qb, qc, qd, qfac, qoffx, qoffy, qoffz = x
qform_code = whdr.get('qform_code',c.NIFTI_XFORM_SCANNER_ANAT)
qform_code = c.NIFTI_XFORM_SCANNER_ANAT
else:
# Conversion failed, use defaults.
qa, qb, qc, qd, qfac, qoffx, qoffy, qoffz = \
(0., 0., 0., 0., 1., 0., 0., 0.)
qform_code = c.NIFTI_XFORM_UNKNOWN
fmt = 'i10s18sihsB8hfffhhhh8ffffhcbffffii80s24shh6f4f4f4f16s4s'
lgth = struct.calcsize(fmt)
if hdr['swap']:
fmt = ">" + fmt
else:
fmt = "<" + fmt
if hdr['native_header'].has_key('ScanningSequence'):
if whdr['ScanningSequence'][0].strip() == 'EP':
slice_dim = NIFTI_SLICE_ALT_INC
else:
slice_dim = 0
if whdr['PhaseEncDir'] == 'ROW':
# dim_info = (slice_dim << 4) | (0x1 << 2) | 0x2
freq_dim = 2
phase_dim = 1
else:
# dim_info = (slice_dim << 4) | (0x2 << 2) | 0x1
freq_dim = 1
phase_dim = 2
else:
freq_dim = whdr.get('freq_dim', 0)
phase_dim = whdr.get('phase_dim', 0)
slice_dim = whdr.get('slice_dim', 0)
if not whdr.has_key('quatern_b'):
# Existing header not for a nifti file. Rewrite defaults.
whdr = {'sizeof_hdr':348, 'data_type':"", 'db_name':"", \
'extents':16384, \
'session_error':0, 'regular':"r", 'dim_info':"0", \
'dim':[1, 1, 1, 1, 1, 1, 1, 1], \
'intent_p1':0., 'intent_p2':0., 'intent_p3':0., 'intent_code':0, \
'bitpix':0, 'slice_start':0, \
'pixdim':[1., 0., 0., 0., 0., 0., 0., 0.], \
'vox_offset':0., 'scl_slope':0., 'scl_inter':0., 'slice_code':"", \
'xyzt_units':"", 'cal_max':0., 'cal_min':0., 'slice_duration':0., \
'toffset':0., 'glmax':0, 'glmin':0, 'descrip':"", \
'qform_code':qform_code, 'time_units':'msec', 'space_units':'mm', \
'misc_units':'', 'sform_code':'unknown', 'intent_name':"", \
'magic':"ni1"}
# Set orientation information.
whdr['quatern_b'] = qb
whdr['quatern_c'] = qc
whdr['quatern_d'] = qd
whdr['qoffset_x'] = qoffx
whdr['qoffset_y'] = qoffy
whdr['qoffset_z'] = qoffz
Rlpi = convert_R_to_lpi(hdr['R'], hdr['dims'], hdr['sizes'])
# Rlpi = hdr['R']
Rtmp = dot(Rlpi, diag([hdr['xsize'], hdr['ysize'], hdr['zsize'], 1.]))
whdr['srow_x'] = zeros(4, float)
whdr['srow_x'][:] = Rtmp[0, :]
whdr['srow_y'] = zeros(4, float)
whdr['srow_y'][:] = Rtmp[1, :]
whdr['srow_z'] = zeros(4, float)
whdr['srow_z'][:] = Rtmp[2, :]
# whdr['srow_x'][:3] *= hdr['xsize']
# whdr['srow_y'][:3] *= hdr['ysize']
# whdr['srow_z'][:3] *= hdr['zsize']
whdr['qfac'] = qfac
# Set undefined fields to zero. Spm puts garbage here.
whdr['glmin'] = 0
whdr['glmax'] = 0
whdr['sizeof_hdr'] = 348
whdr['descrip'] = hdr['native_header'].get('descrip','')
whdr['aux_file'] = hdr['native_header'].get('aux_file','')
if len(whdr['descrip']) > 79:
whdr['descrip'] = whdr['descrip'][:79]
whdr['dim'] = [hdr['ndim'], hdr['xdim'], hdr['ydim'], hdr['zdim'], \
hdr['tdim'], hdr['mdim'], 0, 0]
whdr['slice_end'] = hdr['zdim']-1
if hdr['sizes'][3] > 0.:
TR = hdr['sizes'][3]
else:
TR = hdr.get('TR',0.)
if TR == 0.:
TR = hdr['subhdr'].get('TR',0.)
whdr['pixdim'] = [hdr['ndim'], hdr['xsize'], hdr['ysize'], hdr['zsize'], \
TR, hdr['msize'], 0., 0.]
whdr['qoffset_x'] = qoffx
whdr['qoffset_y'] = qoffy
whdr['qoffset_z'] = qoffz
whdr['quatern_b'] = qb
whdr['quatern_c'] = qc
whdr['quatern_d'] = qd
whdr['qfac'] = float(qfac)
whdr['bitpix'] = datatype_to_lgth[hdr['datatype']]
whdr['datatype'] = nifti_type_to_datacode[hdr['datatype']]
whdr['dim_info'] = freq_dim | (phase_dim << 2) | (slice_dim << 4)
whdr['slice_code'] = nifti_slice_order_encode[ \
hdr['native_header'].get('SliceOrder', 'unknown')]
whdr['intent_code'] = nifti_intent_encode[whdr.get('intent_class', \
'unknown')]
whdr['qform_code'] = nifti_sqform_encode.get(qform_code, c.NIFTI_XFORM_UNKNOWN)
whdr['sform_code'] = nifti_sqform_encode[whdr.get('sform_code', 0)]
whdr['xyzt_units'] = nifti_units_encode[whdr.get('space_units', 'mm')] | \
nifti_units_encode[whdr.get('time_units', 'msec')] | \
nifti_units_encode[whdr.get('misc_units', '')]
if hdr['filetype'] == 'nii':
hdr['filetype'] = 'n+1'
whdr['magic'] = hdr['filetype']
if hdr['filetype'] == 'n+1':
vox_offset = 348
vox_offset = vox_offset + 4
else:
vox_offset = 0
extcode = whdr.get('extcode', '0000')
if extcode[0] != '0':
vox_offset = int(vox_offset) + 6 + len(whdr.get('edata',''))
whdr['vox_offset'] = vox_offset
binary_hdr = struct.pack(fmt, whdr['sizeof_hdr'], whdr['data_type'], \
whdr['db_name'], whdr['extents'], whdr['session_error'], whdr['regular'], \
whdr['dim_info'], whdr['dim'][0], whdr['dim'][1], whdr['dim'][2], \
whdr['dim'][3], whdr['dim'][4], whdr['dim'][5], whdr['dim'][6], \
whdr['dim'][7], whdr['intent_p1'], whdr['intent_p2'], whdr['intent_p3'], \
whdr['intent_code'], whdr['datatype'], whdr['bitpix'], \
whdr['slice_start'], whdr['qfac'], whdr['pixdim'][1], whdr['pixdim'][2], \
whdr['pixdim'][3], whdr['pixdim'][4], whdr['pixdim'][5], \
whdr['pixdim'][6], whdr['pixdim'][7], whdr['vox_offset'], \
hdr['scale_factor'], hdr['scale_offset'], whdr['slice_end'], \
whdr['slice_code'], whdr['xyzt_units'], whdr['cal_max'], whdr['cal_min'], \
whdr['slice_duration'], whdr['toffset'], whdr['glmax'], whdr['glmin'], \
whdr['descrip'], whdr['aux_file'], whdr['qform_code'], whdr['sform_code'], \
whdr['quatern_b'], whdr['quatern_c'], whdr['quatern_d'], \
whdr['qoffset_x'], whdr['qoffset_y'], whdr['qoffset_z'], \
whdr['srow_x'][0], whdr['srow_x'][1], whdr['srow_x'][2], whdr['srow_x'][3], \
whdr['srow_y'][0], whdr['srow_y'][1], whdr['srow_y'][2], whdr['srow_y'][3], \
whdr['srow_z'][0], whdr['srow_z'][1], whdr['srow_z'][2], whdr['srow_z'][3], \
whdr['intent_name'], whdr['magic'])
# try:
if True:
if newfile:
f = open(hdrname, 'w')
else:
f = open(hdrname, 'r+')
f.seek(0)
# except IOError:
# raise IOError(\
# "\nfile_io::write_nifti: Could not open %s\n\n"%hdrname)
try:
f.write(binary_hdr)
except IOError:
raise IOError(\
"\nfile_io::write_nifti: Could not write to %s\n\n"%hdrname)
if hdr['filetype'] == 'n+1':
ecodes = whdr.get('extcode', zeros(4,byte))
if isinstance(ecodes, list):
ecodes = array(ecodes)
if ecodes[0]:
# Extension is present.
exthdr = struct.pack('ccccii', ecodes[0], ecodes[1], \
ecodes[2], ecodes[3], whdr['esize'], \
nifti_ecode_encode[whdr['ecode']]) + whdr['edata']
else:
exthdr = fromstring(ecodes,byte)
# Write the extension header.
f.write(exthdr)
f.close()
return 0 | 8b9239ff96d453f8bcb7a667e62434fa9f1bfbc6 | 3,637,739 |
import struct
def get_array_of_float(num, data):
"""Read array of floats
Parameters
----------
num : int
Number of values to be read (length of array)
data : str
4C binary data file
Returns
-------
str
Truncated 4C binary data file
list
List of floats
"""
length = 4
results = struct.unpack('f' * num, data[:num * length])
pos = num * length
new_data = data[pos:]
return new_data, list(results) | 92a0a4cc653046826b14c2cd376a42045c4fa641 | 3,637,740 |
def AUcat(disk=None, first=1, last=1000, Aname=None, Aclass=None, Aseq=0,
giveList=False):
"""
Catalog listing of AIPS UV data files on disk disk
Strings use AIPS wild cards:
* blank => any
'?' => one of any character
"*" => arbitrary string
If giveList then return list of CNOs
* disk = AIPS disk number to list
* first = lowest slot number to list
* last = highest slot number to list
* Aname = desired AIPS name, using AIPS wildcards, None -> don't check
* Aclass = desired AIPS class, using AIPS wildcards, None -> don't check
* Aseq = desired AIPS sequence, 0=> any
* giveList = If true, return list of CNOs matching
"""
################################################################
global Adisk
if disk==None:
disk = Adisk
else:
Adisk = disk
# Get catalog
cat = AIPSData.AIPSCat(disk)
olist = AIPSDir.PListCat(cat.catalog, disk, type="UV", first=first, last=last,
Aname=Aname, Aclass=Aclass, Aseq=Aseq,
giveList=giveList)
OErr.printErrMsg(err, "Error with AIPS catalog")
return olist
# end AUcat | 501bb5a1eaa82fd162d17478f5bd9b14d8b76124 | 3,637,741 |
def process_threat_results(matching_threats, context):
""" prepare response from threat results """
threats = [ThreatSerializer(threat).data for threat in matching_threats]
response_data = {
"id": context.id,
"hits": threats,
}
status_code = status.HTTP_200_OK
if context.pending_searches:
response_data["retry_secs"] = 60
status_code = status.HTTP_303_SEE_OTHER
return Response(response_data, status_code) | b6f763f1a2983967dd0ccc68237408bf3871f9ac | 3,637,742 |
def entropy_logits(logits):
"""
Computes the entropy of an unnormalized probability distribution.
"""
probs = F.softmax(logits, dim=-1)
return entropy(probs) | a9806dfbafbe77f74df55b81cc19603826e2d994 | 3,637,743 |
def convert_int_to_str(number: int, char: str = "'"):
"""Converts an ugly int into a beautiful and sweet str
Parameters:
nb: The number which is gonna be converted.
char: The characters which are gonna be inserted between every 3 digits.
Example: 2364735247 --> 2'364'735'247"""
number = str(number)
for index in range(len(number) - 3, 0, -3):
number = number[:index] + char + number[index:]
return number | ae8e2b0e4cc9a332e559e3128c440fff59cf6c78 | 3,637,744 |
def exists(index, doc_type, id, **kwargs):
"""
Returns a boolean indicating whether or not given document exists in Elasticsearch.
http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html
"""
res = request("exists", None, index, doc_type, id, **kwargs)
jsonprint(res)
return res | fd5488acef16b22b0da7302345eab2de6073523c | 3,637,745 |
def deserialize_cookie(string):
"""Deserialize cookie"""
parts = string.split("#")
length = len(parts)
if length == 0 or length < 3:
return None
if not is_int(parts[2]):
return None
return create_internal_cookie(
unquote(parts[0]),
unquote(parts[1]),
parse_int(parts[2])
) | 9887eb18c4cc91a13048b987ec962deb83a4da2b | 3,637,746 |
def choose(n, k):
"""This is a binomial coeficient nCk used in binomial probablilty
this funtion uses factorial()
Usage: choose(n, k)
args:
n = total number
k = total number of sub-groups """
try:
return factorial(n)/(factorial(k) * factorial(n - k))
except(ValueError, ZeroDivisionError, TypeError):
print("""This is a binomial coeficient nCk used in binomial probablilty
this funtion uses factorial()
Usage: choose(n, k)
args:
n = total number
k = total number of sub-groups """) | 3e9fe5212a2ddf680fc6681c0a7d7bd1ec9a4de2 | 3,637,747 |
def BiRNN(x, seq_lens):
"""TODO: full docstring; seq_lens is np_array of actual input seq lens.
Actually seq_lens is a tf.placeholder"""
# data input shape: (batch_size, seq_lens, n_input)
# Define lstm cells with tensorflow
# Forward direction cell
lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Backward direction cell
lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=lstm_fw_cell,
cell_bw=lstm_bw_cell,
inputs=x,
sequence_length=seq_lens,
dtype=tf.float32)
# concatenate the forward and backward layer outputs
recurrent_layer_output = tf.concat([output_fw, output_bw], 2)
output = tf.layers.dense(
inputs=recurrent_layer_output,
units=n_classes)
return output | 603efcb8c664f8793d56222234fa1f381446bdeb | 3,637,748 |
import grp
from typing import cast
def get_os_group(name: _STR_OR_INT_OR_NONE = None) -> grp.struct_group:
"""Get an operating system group object.
Args:
name (:obj:`str` or :obj:`int`, optional): The "group name" or ``gid``.
Defaults to the current users's group.
Raises:
OSError: If the given ``name`` does not exist as a "group
name" for this operating system.
OSError: If the given ``name`` is a ``gid`` and it does not
exist.
:rtype:
:obj:`struct_group <grp>`
* A tuple like object.
Example:
>>> from flutils.pathutils import get_os_group
>>> get_os_group('bar')
grp.struct_group(gr_name='bar', gr_passwd='*', gr_gid=2001,
gr_mem=['foo'])
"""
if name is None:
name = get_os_user().pw_gid
name = cast(int, name)
if isinstance(name, int):
try:
return grp.getgrgid(name)
except KeyError:
raise OSError(
'The given gid: %r, is not a valid gid for this operating '
'system.' % name
)
try:
return grp.getgrnam(name)
except KeyError:
raise OSError(
'The given name: %r, is not a valid "group name" '
'for this operating system.' % name
) | 6c359b46cdd2766cbdea7fb5412b1e03a3fbecac | 3,637,749 |
def _process_output(response, context):
"""Post-process TensorFlow Serving output before it is returned to the client.
Args:
response (obj): the TensorFlow serving response
context (Context): an object containing request and configuration details
Returns:
(bytes, string): data to return to client, response content type
"""
if response.status_code != 200:
_return_error(response.status_code, response.content.decode('utf-8'))
response_content_type = context.accept_header
print("response.json():", response.json())
# remove whitespace from output JSON string
prediction = response.content.decode('utf-8').translate(dict.fromkeys(map(ord,whitespace)))
return prediction, response_content_type | 19805fc9ce122b4c02a596167edbc01398dfa2ab | 3,637,750 |
import sys
import plistlib
def execute_dscl(option="-plist", datasource=".", command="-read", parameters=""):
"""Execute dscl and return the values
Args:
option (str, optional): The option to use. Defaults to "-plist".
datasource (str, optional): The node to query. Defaults to ".".
command (str, optional): The dscl command to run. Defaults to "-read".
parameters (str, optional): Parameters that will be passed to the command option. Defaults to "".
Returns:
dict: A dict of the results from dscl
"""
results = execute_process(f"/usr/bin/dscl {option} {datasource} {command} {parameters}")
# Verify command result
if not results['success']:
print("Failed to admin group membership!")
print(results['stderr'])
sys.exit(2)
return plistlib.loads(results['stdout'].encode()) | ae234185cbc48fa71e6a1b43408474255561a1a0 | 3,637,751 |
from bs4 import BeautifulSoup
import requests
def make_soup(text: str, mode: str="url", parser: str=PARSER) -> BeautifulSoup:
""" Returns a soup. """
if mode == "url" or isinstance(mode, dict):
params = mode if isinstance(mode, dict) else {}
text = requests.get(text, params=params).text
elif mode == "file":
text = open(text)
return BeautifulSoup(text, parser) | 9641a7a0807194c911614e2ac41551b04bdbe22d | 3,637,752 |
import ast
def _merge_inner_function(
class_def, infer_type, intermediate_repr, merge_inner_function
):
"""
Merge the inner function if found within the class, with the class IR
:param class_def: Class AST
:type class_def: ```ClassDef```
:param infer_type: Whether to try inferring the typ (from the default)
:type infer_type: ```bool```
:param intermediate_repr: a dictionary of form
{ "name": Optional[str],
"type": Optional[str],
"doc": Optional[str],
"params": OrderedDict[str, {'typ': str, 'doc': Optional[str], 'default': Any}]
"returns": Optional[OrderedDict[Literal['return_type'],
{'typ': str, 'doc': Optional[str], 'default': Any}),)]] }
:type intermediate_repr: ```dict```
:param merge_inner_function: Name of inner function to merge. If None, merge nothing.
:type merge_inner_function: ```Optional[str]```
:returns: a dictionary of form
{ "name": Optional[str],
"type": Optional[str],
"doc": Optional[str],
"params": OrderedDict[str, {'typ': str, 'doc': Optional[str], 'default': Any}]
"returns": Optional[OrderedDict[Literal['return_type'],
{'typ': str, 'doc': Optional[str], 'default': Any}),)]] }
:rtype: ```dict```
"""
function_def = next(
filter(
lambda func: func.name == merge_inner_function,
filter(rpartial(isinstance, FunctionDef), ast.walk(class_def)),
),
None,
)
if function_def is not None:
function_type = (
"static" if not function_def.args.args else function_def.args.args[0].arg
)
inner_ir = function(
function_def,
function_name=merge_inner_function,
function_type=function_type,
infer_type=infer_type,
)
ir_merge(other=inner_ir, target=intermediate_repr)
return intermediate_repr | 5c891ba82cb5b41a5b5d311611f5d318d249a31e | 3,637,753 |
def pb_set_defaults():
"""Set board defaults. Must be called before using any other board functions."""
return spinapi.pb_set_defaults() | 30d360a15e4602c64a81900a581a2f4429f7d71e | 3,637,754 |
def count_routes_graph(graph, source_node, dest_node):
"""
classic tree-like graph traversal
"""
if dest_node == source_node or dest_node - source_node == 1:
return 1
else:
routes = 0
for child in graph[source_node]:
routes += count_routes_graph(graph, child, dest_node)
return routes | f952b35f101d9f1c42eb1d7444859493701c6838 | 3,637,755 |
from typing import Dict
def pluck_state(obj: Dict) -> str:
"""A wrapper to illustrate composing
the above two functions.
Args:
obj: The dictionary created from the json string.
"""
plucker = pipe(get_metadata, get_state_from_meta)
return plucker(obj) | d9517346b701f9ff434452992a4f3e8ca3dccf08 | 3,637,756 |
from typing import Callable
from typing import Mapping
from typing import Any
from typing import Optional
def value(
parser: Callable[[str, Mapping[str, str]], Any] = nop,
tag_: Optional[str] = None,
var: Optional[str] = None,
) -> Parser:
"""Return a parser to parse a simple value assignment XML tag.
:param parser:
The text parser to use for the contents of the given `tag_`. It will
also be given the attributes mapping.
:param tag_:
The name of the tag to parse. The default is to consume any tag.
:param var:
Override the name the value is to be assigned to. The default is the
tag name.
.. note::
Use of this will break the AST's ability to make suggestions when
attempting to assign to an invalid variable as that feature
requires the tag and variable to have the same name.
:return:
A parser that consumes the given XML `tag_` and produces a
:class:`rads.config.ast.Assignment` AST node.
:raises rads.config.xml_parsers.TerminalXMLParseError:
Raised by the returned parser if the consumed tag is empty or the given
text `parser` produces a :class:`rads.config.text_parsers.TextParseError`.
"""
def process(element: Element) -> Assignment:
var_ = var if var else element.tag
condition = parse_condition(element.attributes)
action = parse_action(element)
text = element.text if element.text else ""
source = source_from_element(element)
try:
value = parser(text, element.attributes)
except TextParseError as err:
raise error_at(element)(str(err)) from err
return Assignment(
name=var_, value=value, condition=condition, action=action, source=source
)
if tag_:
return tag(tag_) ^ process
return any() ^ process | dcb2ad9b9e83015f1fd86323a156bbe92d505211 | 3,637,757 |
def compute_Rnorm(image, mask_field, cen, R=12, wid=1, mask_cross=True, display=False):
""" Compute (3 sigma-clipped) normalization using an annulus.
Note the output values of normalization contain background.
Paramters
----------
image : input image for measurement
mask_field : mask map with nearby sources masked as 1.
cen : center of target
R : radius of annulus
wid : half-width of annulus
Returns
-------
I_mean: mean value in the annulus
I_med : median value in the annulus
I_std : std value in the annulus
I_flag : 0 good / 1 bad (available pixles < 5)
"""
annulus_ma = CircularAnnulus([cen], R-wid, R+wid).to_mask()[0]
mask_ring = annulus_ma.to_image(image.shape) > 0.5 # sky ring (R-wid, R+wid)
mask_clean = mask_ring & (~mask_field) # sky ring with other sources masked
# Whether to mask the cross regions, important if R is small
if mask_cross:
yy, xx = np.indices(image.shape)
rr = np.sqrt((xx-cen[0])**2+(yy-cen[1])**2)
cross = ((abs(xx-cen[0])<4)|(abs(yy-cen[1])<4))
mask_clean = mask_clean * (~cross)
if len(image[mask_clean]) < 5:
return [np.nan] * 3 + [1]
z = sigma_clip(np.log10(image[mask_clean]), sigma=2, maxiters=5)
I_mean, I_med, I_std = 10**np.mean(z), 10**np.median(z.compressed()), np.std(10**z)
if display:
z = 10**z
fig, (ax1,ax2) = plt.subplots(nrows=1, ncols=2, figsize=(9,4))
ax1.imshow(mask_clean, cmap="gray", alpha=0.7)
ax1.imshow(image, vmin=image.min(), vmax=I_med+50*I_std,
cmap='viridis', norm=AsinhNorm(), alpha=0.7)
ax1.plot(cen[0], cen[1], 'r*', ms=10)
ax2.hist(sigma_clip(z),alpha=0.7)
# Label mean value
plt.axvline(I_mean, color='k')
plt.text(0.5, 0.9, "%.1f"%I_mean, color='darkorange', ha='center', transform=ax2.transAxes)
# Label 20% / 80% quantiles
I_20 = np.quantile(z.compressed(), 0.2)
I_80 = np.quantile(z.compressed(), 0.8)
for I, x_txt in zip([I_20, I_80], [0.2, 0.8]):
plt.axvline(I, color='k', ls="--")
plt.text(x_txt, 0.9, "%.1f"%I, color='orange',
ha='center', transform=ax2.transAxes)
return I_mean, I_med, I_std, 0 | 7c0b2aebf009b81c19de30e3a0d9f91fcfcebd52 | 3,637,758 |
import six
def inject_timeout(func):
"""Decorator which injects ``timeout`` parameter into request.
On client initiation, default timeout is set. This timeout will be
injected into any request if no explicit parameter is set.
:return: Value of decorated function.
"""
@six.wraps(func)
def decorator(self, *args, **kwargs):
kwargs.setdefault("timeout", self._timeout)
return func(self, *args, **kwargs)
return decorator | 479ed7b6aa7005d528ace0ff662840d14c23035c | 3,637,759 |
def test_match_partial(values):
"""@match_partial allows not covering all the cases."""
v, v2 = values
@match_partial(MyType)
class get_partial_value(object):
def MyConstructor(x):
return x
assert get_partial_value(v) == 3 | 826a08066822e701c2077c2b71be48152c401b3f | 3,637,760 |
def assert_sim_of_model_with_itself_is_approx_one(mdl: nn.Module, X: Tensor,
layer_name: str,
metric_comparison_type: str = 'pwcca',
metric_as_sim_or_dist: str = 'dist') -> bool:
"""
Returns true if model is ok. If not it asserts against you (never returns False).
"""
dist: float = get_metric(mdl, mdl, X, X, layer_name, metric_comparison_type=metric_comparison_type,
metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'Should be very very close to 0.0: {dist=} ({metric_comparison_type=})')
assert approx_equal(dist, 0.0), f'Sim should be close to 1.0 but got: {dist=}'
return True | 76d9b88063b69b69217f28cb98c985ff92f9b6e0 | 3,637,761 |
def cver(verstr):
"""Converts a version string into a number"""
if verstr.startswith("b"):
return float(verstr[1:])-100000
return float(verstr) | 1ad119049b9149efe7df74f5ac269d3dfafad4e2 | 3,637,762 |
import urllib
def _GetGaeCookie(host, service, auth_token, secure):
"""This function creates a login cookie using the authentication token
obtained after logging in successfully in the Google account.
Args:
host: Host where the user wants to login.
service: Service code where the user wants to login.
auth_token: Authentication token obtained from ClientLogin.
secure: True if we want a secure cookie, false if not.
Returns:
A cookie for the specifed service.
Raises:
urllib2.HTTPError: This exception is raised when the cookie cannot be
obtained and the user is redirected to another place.
"""
# Create a request for Google's service with the authentication token.
continue_location = 'http://localhost/'
cookie_request_data_map = {
'continue' : continue_location,
'auth' : auth_token,
}
cookie_request_data = urllib.urlencode(cookie_request_data_map)
cookie_url = '{protocol}://{host}/_{service}/login?{data}'.format(
protocol=('https' if secure else 'http'), host=host, service=service,
data=cookie_request_data)
cookie_request = urllib2.Request(cookie_url)
try:
# Create a custom opener, make the request and extract the body.
http_opener = _GetHTTPOpener()
cookie_response = http_opener.open(cookie_request)
except urllib2.HTTPError as e:
# Keep the error as the cookie response.
cookie_response = e
# Check that a redirection was made to the required continue location.
# Otherwise, return an HTTP error.
response_code = cookie_response.code
if (response_code != 302 or
cookie_response.info()['location'] != continue_location):
raise urllib2.HTTPError(cookie_request.get_full_url(), response_code,
cookie_response.msg, cookie_response.headers,
cookie_response.fp)
# Extract the cookie from the headers and remove 'HttpOnly' from it.
cookie = cookie_response.headers.get('Set-Cookie')
return cookie.replace('; HttpOnly', '') | 9bef7516f6b43c2b744e6bb0a75a488e8aee3934 | 3,637,763 |
async def ping_handler() -> data.PingResponse:
"""
Check server status.
"""
return data.PingResponse(status="ok") | 77d1130aa31f54fbcac351d58b8ae4e4b893c5e9 | 3,637,764 |
import subprocess
def main():
"""Start a child process, output status, and monitor exit."""
args = docopt.docopt(__doc__, options_first=True, version=__version__)
command = " ".join(args["<command>"])
timeout = parse_time(args["--timeout"])
# Calculate the time at which we will kill the child process.
now = now_no_us()
killtime = now + timeout
# Log some startup information for the user.
cprint(f"Running: {command}")
cprint(f"Max runtime {timeout}")
cprint(f"Will kill at {killtime} UTC")
# Start the child process.
child = subprocess.Popen(command, shell=True) # nosec
# Loop until it is time to kill the child process.
while now < killtime:
# Log how much time is remaining.
remaining_delta = killtime - now
cprint(f"{remaining_delta} remaining", severity=Severity.WARNING)
try:
sleep_time = calculate_sleep_time(remaining_delta)
# Sleep while waiting for the child to exit.
child.wait(sleep_time)
# The child has exited before the timeout
break
except subprocess.TimeoutExpired:
# The child did not exit. Not a problem.
pass
now = now_no_us()
else:
# We've reached the killtime.
cprint("Timeout reached... killing child.", severity=Severity.FAIL)
child.kill()
# Wait for the child to exit if it hasn't already.
return_code = child.wait()
# Log the return code of the child.
if return_code == 0:
cprint(f"Child has exited with: {return_code}", severity=Severity.GOOD)
else:
cprint(f"Child has exited with: {return_code}", severity=Severity.FAIL)
# Return the child's return code as our own so that it can be acted upon.
return return_code | 2dbf4e514999f4805fe1cb8d36febd80cfb21458 | 3,637,765 |
import os
import csv
def get_columns_sql(table):
"""Construct SQL component specifying table columns"""
# Read rows and append column name and data type to main container
template_path = os.path.join(os.environ['MYSQL_TABLE_TEMPLATES_DIR'], f'{table}.csv')
with open(template_path, newline='') as f:
template_reader = csv.reader(f, delimiter=',')
# Rows in the CSV template (corresponding to columns into MySQL table)
columns = []
for row in template_reader:
columns.append(row[:2])
# SQL to construct column name component for query
sql = ', '.join([' '.join(c) for c in columns])
return sql | 0cff0f424b0284931951b7d198996328a011ffee | 3,637,766 |
def create_session_cookie():
"""
Creates a cookie containing a session for a user
Stolen from https://stackoverflow.com/questions/22494583/login-with-code-when-using-liveservertestcase-with-django
:param username:
:param password:
:return:
"""
# First, create a new test user
user = AuthUserFactory()
# Then create the authenticated session using the new user credentials
session = SessionStore()
session[SESSION_KEY] = user.pk
session[BACKEND_SESSION_KEY] = settings.AUTHENTICATION_BACKENDS[0]
session[HASH_SESSION_KEY] = user.get_session_auth_hash()
session.save()
# Finally, create the cookie dictionary
cookie = {settings.SESSION_COOKIE_NAME: session.session_key}
return cookie | d4d7eef96e7b0136aa888d362b3278eb24ae91b8 | 3,637,767 |
from pathlib import Path
from typing import Optional
from typing import List
import os
import fnmatch
def is_excluded(src_path: Path, globs: Optional[List[str]] = None) -> bool:
"""
Determine if a src_path should be excluded.
Supports globs (e.g. folder/* or *.md).
Credits: code inspired by / adapted from
https://github.com/apenwarr/mkdocs-exclude/blob/master/mkdocs_exclude/plugin.py
Args:
src_path (Path): Path of file
globs (list): list of globs
Returns:
(bool): whether src_path should be excluded
"""
if globs is None or len(globs) == 0:
return False
assert isinstance(src_path, Path)
assert hasattr(globs, "__iter__") # list or tuple
# Windows reports filenames as eg. a\\b\\c instead of a/b/c.
# To make the same globs/regexes match filenames on Windows and
# other OSes, let's try matching against converted filenames.
# On the other hand, Unix actually allows filenames to contain
# literal \\ characters (although it is rare), so we won't
# always convert them. We only convert if os.sep reports
# something unusual. Conversely, some future mkdocs might
# report Windows filenames using / separators regardless of
# os.sep, so we *always* test with / above.
if os.sep != "/":
src_path_fix = str(src_path).replace(os.sep, "/")
else:
src_path_fix = str(src_path)
for g in globs:
if fnmatch.fnmatchcase(src_path_fix, g):
return True
if src_path.name == g:
return True
return False | 6d3d2ce7a7842cb071cfe2ba8c0635a5864127a7 | 3,637,768 |
from pyquickhelper.loghelper import BufferedPrint
import os
def get_seattle_streets(filename=None, folder="."):
"""
Retrieves processed data from
`Seattle Streets <https://data.seattle.gov/dataset/Street-Network-Database/
afip-2mzr/data)>`_.
@param filename local filename
@param folder temporary folder where to download files
@return shapes, records
The function returns a filename.
"""
if filename is None:
names = download_data("WGS84_seattle_street.zip", whereTo=folder)
shp = [n for n in names if n.endswith('.shp')]
if len(shp) != 1:
buf = BufferedPrint()
names = download_data("WGS84_seattle_street.zip",
whereTo=folder, fLOG=buf.fprint)
raise FileNotFoundError(
"Unable to download data 'WGS84_seattle_street.zip' to '{0}', log={1}\nnames={2}.".format(
filename, str(buf), "\n".join(names)))
filename = shp[0]
elif not os.path.exists(filename):
raise FileNotFoundError(filename)
return filename | 396dfe59db9ef68528b9aea7328581154fa84444 | 3,637,769 |
def _replace_oov(original_vocab, line):
"""Replace out-of-vocab words with "UNK".
This maintains compatibility with published results.
Args:
original_vocab: a set of strings (The standard vocabulary for the dataset)
line: a unicode string - a space-delimited sequence of words.
Returns:
a unicode string - a space-delimited sequence of words.
"""
return u" ".join(
[word if word in original_vocab else u"UNK" for word in line.split()]) | 2e2cb1464484806b79263a14fd32ed4d40d0c9ba | 3,637,770 |
def linear_CMD_fit(x,y,xerr,yerr):
"""
Does a linear fit to CMD data where x is color and y is amplitude, returning some fit
statistics
Parameters
----------
x : array-like
color
y : array-like
magnitude
xerr : array-like
color errors
yerr : array-like
magnitude errors
Returns
-------
slope : float
slope of best-fit line
r_squared : float
Correlation coefficient (R^2)
"""
data = RealData(x, y, sx=xerr, sy=yerr)
mod = Model(line)
odr = ODR(data, mod, beta0=[-0.1, np.mean(y)])
out = odr.run()
slope = out.beta[0]
r_squared = r2_score(y, line(out.beta, x))
return slope, r_squared | fb145d5caf48d2ab1b49a17b1e05ddd32e97c3f1 | 3,637,771 |
def _verify_path_value(value, is_str, is_kind=False):
"""Verify a key path value: one of a kind, string ID or integer ID.
Args:
value (Union[str, int]): The value to verify
is_str (bool): Flag indicating if the ``value`` is a string. If
:data:`False`, then the ``value`` is assumed to be an integer.
is_kind (Optional[bool]): Flag indicating if the value is meant to
be a kind. Defaults to :data:`False`.
Returns:
Union[str, int]: The ``value`` passed in, if it passed verification
checks.
Raises:
ValueError: If the ``value`` is a ``str`` for the kind, but the number
of UTF-8 encoded bytes is outside of the range ``[1, 1500]``.
ValueError: If the ``value`` is a ``str`` for the name, but the number
of UTF-8 encoded bytes is outside of the range ``[1, 1500]``.
ValueError: If the ``value`` is an integer but lies outside of the
range ``[1, 2^63 - 1]``.
"""
if is_str:
if 1 <= len(value.encode("utf-8")) <= _MAX_KEYPART_BYTES:
return value
if is_kind:
raise ValueError(_BAD_KIND.format(_MAX_KEYPART_BYTES, value))
else:
raise ValueError(_BAD_STRING_ID.format(_MAX_KEYPART_BYTES, value))
else:
if 1 <= value <= _MAX_INTEGER_ID:
return value
raise ValueError(_BAD_INTEGER_ID.format(value)) | 3d8db518f244e6d09826d29dfcc42769a0015c33 | 3,637,772 |
def _is_tipologia_header(row):
"""Controlla se la riga corrente e' una voce o l'header di una
nuova tipologia di voci ("Personale", "Noli", etc).
"""
if type(row.iloc[1]) is not str:
return False
if type(row.iloc[2]) is str:
if row.iloc[2] != HEADERS["units"]:
return False
else:
if not np.isnan(row.iloc[2]):
return False
return True | 0fdbc6bea8d961fbe990d607a175815ccc475f88 | 3,637,773 |
def validateFloat(
value,
blank=False,
strip=None,
allowRegexes=None,
blockRegexes=None,
min=None,
max=None,
lessThan=None,
greaterThan=None,
excMsg=None,
):
# type: (str, bool, Union[None, str, bool], Union[None, Sequence[Union[Pattern, str]]], Union[None, Sequence[Union[Pattern, str, Sequence[Union[Pattern, str]]]]], Optional[int], Optional[int], Optional[int], Optional[int], Optional[str]) -> Union[float, str]
"""Raises ValidationException if value is not a float.
Returns value, so it can be used inline in an expression:
print(2 + validateFloat(your_number))
Note that since float() ignore leading or trailing whitespace
when converting a string to a number, so does this validateNum().
* value (str): The value being validated as an int or float.
* blank (bool): If True, a blank string will be accepted. Defaults to False.
* strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped.
* allowRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers.
* blockRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation.
* _numType (str): One of 'num', 'int', or 'float' for the kind of number to validate against, where 'num' means int or float.
* min (int, float): The (inclusive) minimum value for the value to pass validation.
* max (int, float): The (inclusive) maximum value for the value to pass validation.
* lessThan (int, float): The (exclusive) minimum value for the value to pass validation.
* greaterThan (int, float): The (exclusive) maximum value for the value to pass validation.
* excMsg (str): A custom message to use in the raised ValidationException.
If you specify min or max, you cannot also respectively specify lessThan
or greaterThan. Doing so will raise PySimpleValidateException.
>>> import pysimplevalidate as pysv
>>> pysv.validateFloat('3.14')
3.14
>>> pysv.validateFloat('pi')
Traceback (most recent call last):
...
pysimplevalidate.ValidationException: 'pi' is not a float.
>>> pysv.validateFloat('3')
3.0
>>> pysv.validateFloat('3', min=3)
3.0
>>> pysv.validateFloat('3', greaterThan=3)
Traceback (most recent call last):
...
pysimplevalidate.ValidationException: Number must be greater than 3.
"""
# Even though validateNum *could* return a int, it won't if _numType is 'float', so ignore mypy's complaint:
return validateNum(
value=value,
blank=blank,
strip=strip,
allowRegexes=allowRegexes,
blockRegexes=blockRegexes,
_numType="float",
min=min,
max=max,
lessThan=lessThan,
greaterThan=greaterThan,
) | e11bbef1b0f53fa803918f9871e9779549e3cdb8 | 3,637,774 |
from typing import Dict
from typing import Any
def send_sms(mobile: str, sms_code: str) -> Dict[str, Any]:
"""发送短信"""
sdk: SmsSDK = SmsSDK(
celery.app.config.get("SMS_ACCOUNT_ID"),
celery.app.config.get("SMS_ACCOUNT_TOKEN"),
celery.app.config.get("SMS_APP_ID")
)
try:
ret: str = sdk.sendMessage(
celery.app.config.get("SMS_TEMPLATE_ID"), # 模板ID
mobile, # 用户手机号
(sms_code, celery.app.config.get("SMS_EXPIRE_TIME") // 60) # 模板变量信息
)
# 容联云短信返回的结果是json格式的字符串,需要转换成dict
result: Dict[str, Any] = orjson.loads(ret)
# 6个0表示短信发送成功,将验证码缓存到redis中
if result["statusCode"] == "000000":
pipe: Pipeline = redis.pipeline()
pipe.multi() # 开启事务
# 保存短信记录到redis中
pipe.setex("sms_%s" % mobile, celery.app.config.get("SMS_EXPIRE_TIME"), sms_code)
# 进行冷却倒计时
pipe.setex("int_%s" % mobile, celery.app.config.get("SMS_INTERVAL_TIME"), "_")
pipe.execute() # 提交事务
return result
else:
raise Exception
except Exception as exc:
celery.app.logger.error("短信发送失败!\r\n%s" % exc)
return result | f1117d0543cc84d0429ce67f1415e6ab371ef2a6 | 3,637,775 |
def from_dataframe(df, name='df', client=None):
"""
convenience function to construct an ibis table
from a DataFrame
EXPERIMENTAL API
Parameters
----------
df : DataFrame
name : str, default 'df'
client : Client, default new PandasClient
client dictionary will be mutated with the
name of the DataFrame
Returns
-------
Table
"""
if client is None:
return connect({name: df}).table(name)
client.dictionary[name] = df
return client.table(name) | 23d64170f078652e60d65be5346293ea3c4aedb5 | 3,637,776 |
import argparse
def make_parser() -> argparse.ArgumentParser:
"""Make parser for CLI arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"site_name", help="name of the site you want to get data for",
)
parser.add_argument(
"--no-expand-meta",
action="store_true",
help="don't include links that use the old domain name structure",
)
parser.add_argument(
"-d",
"--download",
action="store_true",
help="redownload data, even if it exists in the cache",
)
parser.add_argument(
"--min",
type=int,
default=0,
help="minimum sized networks to include in output",
)
parser.add_argument(
"--max",
type=int,
default=float("inf"),
help="maximum sized networks to include in output",
)
parser.add_argument(
"-o", "--output", default="{site_name}", help="output file name",
)
parser.add_argument(
"--cache-dir", default=".cache/", help="cache directory",
)
return parser | 221338e003fd07b350bb6ff9d3f95cac33a078cc | 3,637,777 |
import os
def get_files_to_parse(relative_path):
"""Walks through given directory and returns all files with ending
with an accepted file extension
Arguments:
relative_path {string} -- path to pull files from recursively
Returns:
List<String> -- list of filenames with fullpath
"""
files = []
filepath = os.path.realpath(relative_path)
if os.path.isfile(filepath):
files.append(filepath)
else:
for r, d, f in os.walk(filepath):
for file in f:
if not file.split(".")[-1] in ACCEPTED_FILE_EXTENSIONS:
continue
full_file_path = os.path.join(r, file)
files.append(os.path.join(r, full_file_path))
return files | 0cc53815de09e71c14c07b35840a533a16544cd7 | 3,637,778 |
import os
async def stat_data(full_path: str, isFolder=False) -> dict:
"""
only call this on a validated full path
"""
file_stats = os.stat(full_path)
filename = os.path.basename(full_path)
return {
'name': filename,
'path': full_path,
'mtime': int(file_stats.st_mtime*1000), # given in seconds, want ms
'size': file_stats.st_size,
'isFolder': isFolder
} | f78a27ac9cbe116c6a04e5a5dbc45e454b26f02b | 3,637,779 |
import os
def start_browser(cfg):
"""
Start browser with disabled "Save PDF" dialog
Download files to data folder
"""
my_options = Options()
if cfg.headless:
my_options.headless = True
my_options.add_argument('--window-size=1920,1200')
my_profile = webdriver.FirefoxProfile()
my_profile.set_preference('general.useragent.override', cfg.user_agent)
my_profile.set_preference('browser.download.folderList', 2)
my_profile.set_preference('browser.download.manager.showWhenStarting', False)
my_profile.set_preference('browser.download.manager.useWindow', False)
my_profile.set_preference('pdfjs.disabled', True)
my_profile.set_preference('browser.download.dir',
os.path.join(os.getcwd(), 'data'))
my_profile.set_preference('browser.helperApps.neverAsk.openFile',
'application/octet-stream, application/pdf, application/x-www-form-urlencoded')
my_profile.set_preference('browser.helperApps.neverAsk.saveToDisk',
'application/octet-stream, application/pdf, application/x-www-form-urlencoded')
return webdriver.Firefox(executable_path=gecko_path(), options=my_options, firefox_profile=my_profile) | 9ad79c1450937f120d69bd8634c81116051fa67e | 3,637,780 |
def filter_list(prev_list, current_list, zeta):
"""
apply filter to the all elements
of the list one by one
"""
filtered_list = []
for i, current_val in enumerate(current_list):
prev_val = prev_list[i]
filtered_list.append(
moving_average_filter(current_val, prev_val, zeta))
return filtered_list | 842d71f58b07dbe771c7fdd43797f26e75565ef5 | 3,637,781 |
def has_prefix(sub_s):
"""
:param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid
:return: (bool) If there is any words with prefix stored in sub_s
"""
for word in dict_list:
if word.startswith(sub_s):
return True
return False | 78900ed757d4a1a94832f5a2f6d19da784935966 | 3,637,782 |
from sys import path
def get_dir_size_recursive(directoryPath):
"""
Returns the size of a directory's contents (recursive) in bytes.
:param directoryPath: string, path of directory to be analyzed
:return: int, size of sum of files in directory in bytes
"""
# Collect directory size recursively
total_size = 0
for dirpath, dirnames, filenames in walk(directoryPath):
for f in filenames:
fp = path.join(dirpath, f)
total_size += path.getsize(fp)
return total_size | c371a6135b8dcda71fb1d51e78872a84afcfcd16 | 3,637,783 |
import yaml
def main():
""" """
try:
# read parameters configuration file yaml
with open(setupcfg.extraParam, "r") as stream:
try:
param = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
# check parameters file
return _check_param(param)
except Exception:
_logger.exception(
f"Something goes wrong when loading extra parameters file -{setupcfg.extraParam}-."
)
raise | 67da82991e8ae5b36dae81c6ac107099a54ab7e4 | 3,637,784 |
def primary_key(field_type):
"""
* Returns the field to be treated as the "primary key" for this type
* Primary key is determined as the first of:
* - non-null ID field
* - ID field
* - first String field
* - first field
*
* @param {object_type_definition} type
* @returns {FieldDefinition} primary key field
"""
# Find the primary key for the type
# first field with a required ID
# if no required ID type then first required type
pk = first_non_null_and_id_field(field_type)
if not pk:
pk = first_id_field(field_type)
if not pk:
pk = first_non_null_field(field_type)
if not pk:
pk = first_field(field_type)
return pk | 5beef62f9311b013b6c6cbe3c36260783bc61506 | 3,637,785 |
def get_discussion_data_list_with_percentage(session: Session, doi, limit: int = 20, min_percentage: float = 1,
dd_type="lang"):
""" get discussion types with count an percentage from postgresql """
query = """
WITH result AS
(
(
SELECT "value",
count as c,
ROUND(count / CAST(SUM(count) OVER () AS FLOAT) * 1000) / 10 as p
FROM counted_discussion_data
JOIN discussion_data as dd ON (discussion_data_point_id = dd.id)
WHERE type = :type and value != 'und' and value != 'unknown'
ORDER BY c DESC
LIMIT :limit
)
UNION
(
SELECT 'total' as "value", SUM(count) as c, 100 as p
FROM counted_discussion_data
JOIN discussion_data as dd ON (discussion_data_point_id = dd.id)
WHERE type = :type and value != 'und' and value != 'unknown'
)
)
SELECT "value", c as count, p
FROM result
WHERE result.p >= :mp
ORDER BY count DESC;
"""
params = {
'type': dd_type,
'limit': limit,
'mp': min_percentage
}
if doi:
query = """
WITH result AS
(
(
SELECT "value",
SUM(count) as c,
ROUND(SUM(count) / CAST(SUM(SUM(count)) OVER () AS FLOAT) * 1000) / 10 as p
FROM (SELECT "value", "count"
FROM discussion_data_point as ddp
JOIN discussion_data as dd ON (ddp.discussion_data_point_id = dd.id)
WHERE type = :type and value != 'und' and value != 'unknown'
AND publication_doi=:doi
) temp
GROUP BY "value"
ORDER BY c DESC
LIMIT :limit
)
UNION
(
SELECT 'total' as "value", SUM(count) as c, 100 as p
FROM discussion_data_point as ddp
JOIN discussion_data as dd ON (ddp.discussion_data_point_id = dd.id)
WHERE type = :type and value != 'und' and value != 'unknown'
AND publication_doi=:doi
)
)
SELECT "value", c as count, p
FROM result
WHERE result.p >= :mp
ORDER BY count DESC;
"""
params['doi'] = doi
s = text(query)
# print(query)
# print(params)
if 'doi' in params:
s = s.bindparams(bindparam('type'), bindparam('limit'), bindparam('mp'), bindparam('doi'))
else:
s = s.bindparams(bindparam('type'), bindparam('limit'), bindparam('mp'))
return session.execute(s, params).fetchall() | 4842566f7a891ce53cfc8170cc0fb5db2a6b298b | 3,637,786 |
import collections
import torch
import time
def validate(config, model, val_iterator, criterion, scheduler=None):
"""Runs one standard validation pass over the val_iterator.
This function automatically measures timing for various operations such
as host to device transfer and processing time for the batch.
It also automatically detects and places the data on the given GPU device
if available.
Raises:
ValueError if multiple models/schedulers are provided. You
are expected to have a custom validation function if you wish
to use multiple models/schedulers.
Args:
config: (dict): A user configuration provided into the Trainer
constructor.
model: The model as created by the model_creator.
train_iterator: An iterator created from the DataLoader which
wraps the provided Dataset.
criterion: The loss object created by the loss_creator.
scheduler (optional): The torch.optim.lr_scheduler object
as created by the scheduler_creator. By default,
this is not used in this function.
Returns:
A dict of metrics from the evaluation.
"""
if isinstance(model, collections.Iterable) or isinstance(
scheduler, collections.Iterable):
raise ValueError(
"Need to provide custom validation function if using multi-model "
"or multi-scheduler training.")
batch_time = AverageMeter()
losses = AverageMeter()
# switch to evaluate mode
model.eval()
correct = 0
total = 0
batch_idx = 0
with torch.no_grad():
end = time.time()
for batch_idx, (features, target) in enumerate(val_iterator):
if torch.cuda.is_available():
features = features.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(features)
loss = criterion(output, target)
_, predicted = torch.max(output.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
# measure accuracy and record loss
losses.update(loss.item(), features.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if config.get(TEST_MODE) and batch_idx == 0:
break
stats = {
BATCH_COUNT: batch_idx + 1,
"batch_time": batch_time.avg,
"validation_loss": losses.avg,
"mean_accuracy": correct / total,
"mean_loss": losses.sum / total,
}
return stats | 4f10e68c2e863e11e33f4f49b8378de51ff2b8fe | 3,637,787 |
import os
def fix_path(file_path):
"""fixes a path so project files can be located via a relative path"""
script_path = os.path.dirname(__file__)
return os.path.normpath(os.path.join(script_path, file_path)) | f733b0c0eb12ced5193393013198d89cd774297a | 3,637,788 |
import json
import subprocess
import sys
def cmd(cmd_name, source, args: list = [], version={}, params={}):
"""Wrap command interaction for easier use with python objects."""
in_json = json.dumps({
"source": source,
"version": version,
"params": params,
})
command = ['/opt/resource/' + cmd_name] + args
output = subprocess.check_output(command,
stderr=sys.stderr, input=bytes(in_json, 'utf-8'))
return json.loads(output.decode()) | be1ebe77c70ce2b377cb64d6d54f043c39dde85a | 3,637,789 |
def geq_indicate(var, indicator, var_max, thr):
"""Generates constraints that make indicator 1 iff var >= thr, else 0.
Parameters
----------
var : str
Variable on which thresholding is performed.
indicator : str
Identifier of the indicator variable.
var_max : int
An upper bound on var.
the : int
Comparison threshold.
Returns
-------
List[str]
A list holding the two constraints.
"""
lb = "- %s + %d %s <= 0" % (var, thr, indicator)
ub = "- %s + %d %s >= -%d" % (var, var_max - thr + 1, indicator, thr - 1)
return [lb, ub] | 319f18f5343b806b7108dd9c02ca5d647e132dab | 3,637,790 |
import re
def parse_manpage_number(path):
"""
Parse number of man page group.
"""
# Create regular expression
number_regex = re.compile(r".*/man(\d).*")
# Get number of manpage group
number = number_regex.search(path)
only_number = ""
if number is not None:
number = number.group(1)
return number | b45edb65705592cd18fd1fd8ee30bb389dbd8dff | 3,637,791 |
def sample_coordinates_from_coupling(c, row_points, column_points, num_samples=None, return_all = False, thr = 10**(-6)):
"""
Generates [x, y] samples from the coupling c.
If return_all is True, returns [x,y] coordinates of every pair with coupling value >thr
"""
index_samples = sample_indices_from_coupling(c, num_samples = num_samples, return_all = return_all, thr = thr)
return np.array([ [row_points[s[0], :], column_points[s[1],:]] for s in index_samples]) | a8343291a34ff31a2fc7b86c9b83872e7c787b76 | 3,637,792 |
import ast
def is_suppress_importerror(node: ast.With):
"""
Returns whether the given ``with`` block contains a
:func:`contextlib.suppress(ImportError) <contextlib.suppress>` contextmanager.
.. versionadded:: 0.5.0 (private)
:param node:
""" # noqa: D400
item: ast.withitem
for item in node.items:
if not isinstance(item.context_expr, ast.Call):
continue
try:
name = '.'.join(get_attribute_name(item.context_expr.func))
except NotImplementedError: # pragma: no cover
continue
if name not in {"suppress", "contextlib.suppress", "contextlib2.suppress"}:
continue
for arg in item.context_expr.args:
try:
arg_name = '.'.join(get_attribute_name(arg))
except NotImplementedError: # pragma: no cover
continue
if arg_name in {"ImportError", "ModuleNotFoundError"}:
return True
return False | 341d106b62d7940e4d84a359cd2f2ca254d3434e | 3,637,793 |
def random_flip_left_right(data):
""" Randomly flip an image or batch of image left/right uniformly
Args:
data: tensor of shape (H, W, C) or (N, H, W, C)
Returns:
Randomly flipped data
"""
data_con, C, N = _concat_batch(data)
data_con = tf.image.random_flip_left_right(data_con)
return _unconcat_batch(data_con, C, N) | bcdd0dfd35ff7ee0237d585d5a6cd70f92d7df2b | 3,637,794 |
from sys import path
import multiprocessing
import shutil
def run_cnfs(fets, args, sims):
""" Trains a model for each provided configuration. """
# Assemble configurations.
cnfs = [
{**vars(args), "features": fets_, "sims": sims, "sync": True,
"out_dir": path.join(args.out_dir, subdir),
"tmp_dir": path.join("/tmp", subdir)}
for fets_, subdir in zip(
fets,
# Create a subdirectory name for each list of features.
[",".join([
str(fet).replace(" ", "_").replace("/", "p")
for fet in fets_])
for fets_ in fets])]
# Train configurations.
if defaults.SYNC:
res = [train.run_trials(cnf) for cnf in cnfs]
else:
with multiprocessing.Pool(processes=4) as pol:
res = pol.map(train.run_trials, cnfs)
# Remove temporary subdirs.
for cnf in cnfs:
try:
shutil.rmtree(cnf["tmp_dir"])
except FileNotFoundError:
pass
# Note that accuracy = 1 - loss.
return dict(zip(
[tuple(cnf["features"]) for cnf in cnfs], 1 - np.array(res))) | 9c35f1df874df88e5d04e342967592f5db78b506 | 3,637,795 |
import argparse
def ParseArgs(argv):
"""Parses command line arguments."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-b', '--bundle-identifier', required=True,
help='bundle identifier for the application')
parser.add_argument(
'-o', '--output', default='-',
help='path to the result; - means stdout')
return parser.parse_args(argv) | 507935ea2ea42dea66bfff545caecb7fc2cded55 | 3,637,796 |
import os
def GetCurrentBaselinePath():
"""Returns path of folder containing baseline file corresponding to the current test."""
currentTestPath = os.path.dirname(os.getenv('PYTEST_CURRENT_TEST').split(":")[0])
currentBaselinePath = baselinePath + "/" + currentTestPath + "/"
return currentBaselinePath | a374c4ca8c487fa84748ab160ef2043e4cbbeef2 | 3,637,797 |
def get_all_lobbyists(official_id, cycle=None, api_key=None):
"""
https://www.opensecrets.org/api/?method=candContrib&cid=N00007360&cycle=2020&apikey=__apikey__
"""
if cycle is None:
cycle = 2020 # I don't actually know how the cycles work; I assume you can't just take the current year?
# if API key none, get it from some sort of appwide config defined above
w = Wrapper(api_key)
return w.get({'method':'candContrib', 'cid': official_id, 'cycle': cycle}) | a2d8267881e871cb54201d243357739e689f187e | 3,637,798 |
def get_sale(this_line):
"""Convert the input into a dictionary, with keys matching
the CSV column headers in the scrape_util module.
"""
sale = {}
sale['consignor_name'] = this_line.pop(0)
sale['consignor_city'] = this_line.pop(0).title()
try:
maybe_head = this_line[0].split()
int(maybe_head[0])
sale['cattle_head'] = maybe_head[0]
sale['cattle_cattle'] = ' '.join(maybe_head[1:])
this_line.pop(0)
except:
sale['cattle_cattle'] = this_line.pop(0)
sale['cattle_avg_weight'] = this_line.pop(0)
price_string = this_line.pop(0)
sale['cattle_price_cwt'] = price_string.replace(',', '')
return sale | 39fee66b4c92a2cb459722f238e4a3b6e5848f4d | 3,637,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.