content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import gdata.gauth
def credentials_to_token(credentials):
"""
Transforms an Oauth2 credentials object into an OAuth2Token object
to be used with the legacy gdata API
"""
credentials.refresh(httplib2.Http())
token = gdata.gauth.OAuth2Token(
client_id=credentials.client_id,
client_secret=credentials.client_secret,
scope=credentials.scope,
user_agent='Google App Engine / Ferris Framework',
access_token=credentials.access_token,
refresh_token=credentials.refresh_token)
return token | 549b1041c275c542d96e1d048832f71969d727d7 | 3,632,500 |
def solve2x2(lhs, rhs):
"""Solve a square 2 x 2 system via LU factorization.
This is meant to be a stand-in for LAPACK's ``dgesv``, which just wraps
two calls to ``dgetrf`` and ``dgetrs``. We wrap for two reasons:
* We seek to avoid exceptions as part of the control flow (which is
what :func:`numpy.linalg.solve` does).
* We seek to avoid excessive type- and size-checking, since this
special case is already known.
Args:
lhs (numpy.ndarray): A ``2 x 2`` array of real numbers.
rhs (numpy.ndarray): A 1D array of 2 real numbers.
Returns:
Tuple[bool, float, float]: A triple of
* A flag indicating if ``lhs`` is a singular matrix.
* The first component of the solution.
* The second component of the solution.
"""
# A <--> lhs[0, 0]
# B <--> lhs[0, 1]
# C <--> lhs[1, 0]
# D <--> lhs[1, 1]
# E <--> rhs[0]
# F <--> rhs[1]
if np.abs(lhs[1, 0]) > np.abs(lhs[0, 0]):
# NOTE: We know there is no division by zero here since ``C``
# is **strictly** bigger than **some** value (in magnitude).
# [A | B][x] = [E]
# [C | D][y] [F]
ratio = lhs[0, 0] / lhs[1, 0]
# r = A / C
# [A - rC | B - rD][x] [E - rF]
# [C | D ][y] = [F ]
# ==> 0x + (B - rD) y = E - rF
denominator = lhs[0, 1] - ratio * lhs[1, 1]
if denominator == 0.0:
return True, None, None
y_val = (rhs[0] - ratio * rhs[1]) / denominator
# Cx + Dy = F ==> x = (F - Dy) / C
x_val = (rhs[1] - lhs[1, 1] * y_val) / lhs[1, 0]
return False, x_val, y_val
else:
if lhs[0, 0] == 0.0:
return True, None, None
# [A | B][x] = [E]
# [C | D][y] [F]
ratio = lhs[1, 0] / lhs[0, 0]
# r = C / A
# [A | B ][x] = [E ]
# [C - rA | D - rB][y] [F - rE]
# ==> 0x + (D - rB) y = F - rE
denominator = lhs[1, 1] - ratio * lhs[0, 1]
if denominator == 0.0:
return True, None, None
y_val = (rhs[1] - ratio * rhs[0]) / denominator
# Ax + By = E ==> x = (E - B y) / A
x_val = (rhs[0] - lhs[0, 1] * y_val) / lhs[0, 0]
return False, x_val, y_val | 2f773a0e452ce1401dc5fb5c17256b662614c366 | 3,632,501 |
def fetch_global_notifications(count=0) -> dict:
"""
Always returns notifications in user view.
"""
cfg = get_config()
if count == 0:
count = cfg.default_max_notes
global_feed = get_global_feed()
global_notes = global_feed.get_notifications(count=count, user_view=True)
return global_notes | 661232b2477eabd0c5a3b706b8253e4956e1aabd | 3,632,502 |
def getClusterPositionsRedshift(hd_clu, cluster_params, redshift_limit):
"""
Function to get the positions and redshifts of the clusters that pass the required criterion
@hd_clu :: list of cluster headers (each header ahs info on 1000 clusters alone)
@cluster_params :: contains halo_mass_500c and central_Mvir
@redshift_limit :: upper limit on redshift
@Returns :: pos_z_clu :: [ra, dec, redshift] of all the clusters in the 3 files
"""
pos_z_cen_all, pos_z_sat_all = [], []
for i in range(len(hd_clu)):
# get positions and redshift of all the selected clusters
pos_z_cen, pos_z_sat = getClusterData(hd_clu[i], cluster_params, redshift_limit)
pos_z_cen_all.append(pos_z_cen)
pos_z_sat_all.append(pos_z_sat)
# concatenates the ra, dec, and z for the central clusters
pos_z_cen_clu = concatMultipleArrays(pos_z_cen_all, len(hd_clu))
pos_z_sat_clu = concatMultipleArrays(pos_z_sat_all, len(hd_clu))
return pos_z_cen_clu, pos_z_sat_clu | 4b30bb44a82daa3f2f113c14b7f044ac22d20c6c | 3,632,503 |
def lark_to_field_definition_node(tree: "Tree") -> "FieldDefinitionNode":
"""
Creates and returns a FieldDefinitionNode instance extracted from the
parsing of the tree instance.
:param tree: the Tree to parse in order to extract the proper node
:type tree: Tree
:return: a FieldDefinitionNode instance extracted from the parsing of the
tree
:rtype: FieldDefinitionNode
"""
node_info = _extract_node_info(
tree.children,
types_to_value=[
"description",
"name",
"arguments_definition",
"type",
"directives",
],
)
return FieldDefinitionNode(
description=node_info.get("description"),
name=node_info["name"],
arguments=node_info.get("arguments_definition") or [],
type=node_info["type"],
directives=node_info.get("directives") or [],
location=lark_to_location_node(tree.meta),
) | f00d0ed11d3ff61d8017cecbb0226b88accb9854 | 3,632,504 |
def variant_wraps(vfunc, wrapped_attributes=VARIANT_WRAPPED_ATTRIBUTES):
"""Update the variant function wrapper a la ``functools.wraps``."""
f = vfunc.__main_form__
class SentinelObject:
"""A unique sentinel that is not None."""
sentinel = SentinelObject()
for attr in wrapped_attributes:
attr_val = getattr(f, attr, sentinel)
if attr is not sentinel:
setattr(vfunc, attr, attr_val)
return vfunc | 4143a0e2cb5676c80314096575c9d33fbdcdb788 | 3,632,505 |
from typing import Iterable
def gradient_activity(activity, periods=1, append=True, columns=None):
"""Compute the gradient for all given columns.
Read more in the :ref:`User Guide <gradient>`.
Parameters
----------
activity : DataFrame
The activity to use to compute the gradient.
periods : int or array-like, default=1
Periods to shift to compute the gradient. If an array-like is given,
several gradient will be computed.
append : bool, optional
Whether to append the gradients to the original activity.
columns : list, optional
The name of the columns to use to compute the gradient. By default, all
the columns are used.
Returns
-------
gradient : DataFrame
The computed gradient from the activity.
Examples
--------
>>> from skcycling.datasets import load_fit
>>> from skcycling.io import bikeread
>>> from skcycling.extraction import gradient_activity
>>> ride = bikeread(load_fit()[0], drop_nan='columns')
>>> new_ride = acceleration(ride)
"""
if columns is not None:
data = activity[columns]
else:
data = activity
if isinstance(periods, Iterable):
gradient = [data.diff(periods=p) for p in periods]
gradient_name = ['gradient_{}'.format(p) for p in periods]
else:
gradient = [data.diff(periods=periods)]
gradient_name = ['gradient_{}'.format(periods)]
if append:
# prepend the original information
gradient = [activity] + gradient
gradient_name = ['original'] + gradient_name
return pd.concat(gradient, axis=1, keys=gradient_name) | fd05c9323d406b9f0c5089f7715b8ec55ceeaae2 | 3,632,506 |
def password_validators_help_texts(password_validators=None):
"""
Return a list of all help texts of all configured validators.
"""
help_texts = []
if password_validators is None:
password_validators = get_default_password_validators()
for validator in password_validators:
help_texts.append(validator.get_help_text())
return help_texts | 3df7b1a669ee7ef01b1e28645d3d7fca5816cf8d | 3,632,507 |
import torch
def spherical_schwarzchild_metric(x,M=1):
""" Computes the schwarzchild metric in cartesian like coordinates"""
bs,d = x.shape
t,r,theta,phi = x.T
rs = 2*M
a = (1-rs/r)
gdiag = torch.stack([-a,1/a,r**2,r**2*theta.sin()**2],dim=-1)
g = torch.diag_embed(gdiag)
print(g.shape)
return g | 4e65d520a88f4b9212bab43c7ebc4dfc30245bd3 | 3,632,508 |
def _get_pathless_grib_file_names(
init_time_unix_sec, model_name, grid_id=None, lead_time_hours=None):
"""Returns possible pathless file names for the given model/grid.
:param init_time_unix_sec: Model-initialization time.
:param model_name: See doc for `nwp_model_utils.check_grid_name`.
:param grid_id: Same.
:param lead_time_hours: Lead time (valid time minus init time).
:return: pathless_file_names: 1-D list of possible pathless file names.
"""
pathless_file_name_prefixes = _get_pathless_file_name_prefixes(
model_name=model_name, grid_id=grid_id)
grib_file_types = nwp_model_utils.model_to_grib_types(model_name)
if model_name == nwp_model_utils.NARR_MODEL_NAME:
lead_time_hours = 0
pathless_file_names = []
for this_prefix in pathless_file_name_prefixes:
for this_file_type in grib_file_types:
this_pathless_file_name = '{0:s}_{1:s}_{2:s}{3:s}'.format(
this_prefix,
time_conversion.unix_sec_to_string(
init_time_unix_sec, TIME_FORMAT_HOUR),
_lead_time_to_string(lead_time_hours),
grib_io.file_type_to_extension(this_file_type)
)
pathless_file_names.append(this_pathless_file_name)
return pathless_file_names | 6435348fa760b5bd36e8c54df192b024210dcf4f | 3,632,509 |
def get_picard_mrkdup(config):
"""
input: sample config file output from BALSAMIC
output: mrkdup or rmdup strings
"""
picard_str = "mrkdup"
if "picard_rmdup" in config["QC"]:
if config["QC"]["picard_rmdup"] == True:
picard_str = "rmdup"
return picard_str | 87e24c0bf43f9ac854a1588b80731ed445b6dfa5 | 3,632,510 |
import random
def ChoiceColor():
""" 模板中随机选择bootstrap内置颜色 """
color = ["default", "primary", "success", "info", "warning", "danger"]
return random.choice(color) | 15779e8039c6b389301edef3e6d954dbe2283d54 | 3,632,511 |
import platform
def filter_command_line(line):
"""Returns and updates the command line starting with the flag"""
line_flag = line.strip().split(":")[0].strip()
new_line = line.strip()[len(line_flag) + 1:].strip().lstrip(":").strip()
if line_flag == EXEC_FLAG:
return new_line
elif line_flag == ERRCHECK_FLAG:
if platform.system() == "Windows":
return new_line + " 2>&1 1>$NUL | compare %f"
else:
return new_line + " 2>&1 1>/dev/null | compare %f"
return None | ff5560b1ba23544902e0904bc08030b2d24a40e4 | 3,632,512 |
def load_image(path: str):
"""
Return a loaded image from a given path.
:param path: (str) relative path of the iamge.
:return: PIL image.
"""
image = Image.open(path)
return image | 8c9d96d5cea2fdac67ba937b7d01bc01a860d1d7 | 3,632,513 |
from operator import concat
def mergeSeries(sdata, resetIdx=False):
"""
Merge Series
Inputs:
> sdata: Either a list of dictionary of Series data
> resetIdx (False by default): should we reset the indices?
Output:
> The merged Series
"""
if isinstance(sdata, list):
retval = concat(sdata)
return retval
if isinstance(sdata, dict):
retval = concat(sdata.values)
return retval
return None
fKey,lKeys = getKeys(sdata)
if isinstance(fKey, Series):
initSeries = fKey
retval = initSeries.append(lKeys)
else:
initSeries = sdata[fKey]
mSeries = [sdata[x] for k in lKeys]
retval = initSeries.append(mSeries)
return retval | dc0382581d3e14dc46abe9c5b40d685f3d80ec20 | 3,632,514 |
def ranking_overview(request):
"""
Show history of rankings for top N teams in current ranking
"""
# Check which rounds are complete
rnd_complete = get_completed_rounds()
# Calculate ranking after each of these rounds
increment_rnds = []
ranking_matrix = []
round_names = []
for comprnd in rnd_complete:
increment_rnds.append(comprnd)
ranking = get_ranked_results(increment_rnds)
ranking_matrix.append(ranking)
round_names.append(comprnd.round_name)
# Team leading after most recent round: compose line with rankings over all other rounds
# next team idem until all N lines composed
N = 5 # Number of top teams to track
if QTeam.objects.count() < N:
# Limit N to the amount of teams
N = QTeam.objects.count()
top_positions = []
team_names = []
if ranking_matrix:
for i in range(N):
position_list = []
# Find the name of the team that finished N + 1
tn = ranking_matrix[-1][i][1]
# Find the positions of this team over all rounds
for ranking in ranking_matrix:
ti = [x for x in ranking if tn in x][0]
position_list.append(ranking.index(ti) + 1)
top_positions.append(position_list)
team_names.append(tn)
# matrix transpose for easier plotting
position_sequence = [list(i) for i in zip(*top_positions)]
# The image
fig, ax1 = plt.subplots(1,1,figsize=(7,7))
fig.set_tight_layout(True)
# Invert y axis so that leading team is on top
ax1.invert_yaxis()
ax1.plot(position_sequence, linewidth=2)
ind = np.arange(len(round_names))
ax1.set_xticks(ind)
ax1.set_xticklabels(round_names)
ax1.tick_params(axis='both', which='both', labelbottom=True, labeltop=False, labelleft=True, labelright=True)
ax1.set_xlabel(_("Round"))
ax1.set_ylabel(_("Position"))
ax1.legend(team_names, loc='best')
ax1.grid(True)
canvas = FigureCanvas(fig)
response = HttpResponse(content_type='image/png')
canvas.print_png(response)
return response | c68505833f72d529d4ea1617d4d8455120c5321d | 3,632,515 |
def release_lock(lock_name, identifier):
"""
:param lock_name: 锁名称
:param identifier: uid
:return: True or False
"""
lock = "string:lock:" + lock_name
pip = redis_client.pipeline(True)
while True:
try:
pip.watch(lock)
lock_value = redis_client.get(lock)
if not lock_value:
return True
if lock_value.decode() == identifier:
pip.multi()
pip.delete(lock)
pip.execute()
return True
pip.unwatch()
break
except redis.exceptions.WatchError:
pass
return False | 8df9f174d11a36ffad9ed2310c2293630669dc4c | 3,632,516 |
def add_user():
""" Add a user"""
payload = request.json
for required_key in users_schema:
if required_key not in payload.keys():
return jsonify({"message": f"Missing {required_key} parameter"}), 400
user = db.users.find_one({"email": payload["email"]})
if user is not None:
return (
jsonify(
{
"success": False,
"message": f'Duplicate email detected. User {payload["email"]} already exists.',
}
),
400,
)
# payload = change_case(payload, "lower")
db.users.insert_one(payload)
return jsonify({"success": True, "user": clean_dict_helper(payload)}), 201 | 1c674dbb70caa0ae43819391c97694d3dd82a235 | 3,632,517 |
import hashlib
def __get_str_md5(string):
"""
一个字符串的MD5值
返回一个字符串的MD5值
"""
m0 = hashlib.md5()
m0.update(string.encode('utf-8'))
result = m0.hexdigest()
return result | 1d55cd42dc16a4bf674907c9fb352f3b2a100d6c | 3,632,518 |
from typing import List
def create_optimizers() -> List[OptimizationProcedure]:
"""Creates a list of all optimization procedures"""
optimizers: List[OptimizationProcedure] = []
ordering_rules = TaskOrderingRule.__subclasses__()
for rule in ordering_rules:
optimizers.append(StationOrientedStrategy(rule()))
optimizers.append(TaskOrientedStrategy(rule()))
for num_iter in [5, 10]:
optimizers.append(GRASP(num_iter))
return optimizers | f5cade59c03b017435c18a97423eb059418a6e20 | 3,632,519 |
import urllib
def command_get_file_list(ip_addr, directory):
"""command.cgi?op=100: Get list of files in a directory. Not recursive.
:raise FlashAirBadResponse: When API returns unexpected/malformed data.
:raise FlashAirDirNotFoundError: When the queried directory does not exist on the card.
:raise FlashAirHTTPError: When API returns non-200 HTTP status code.
:raise FlashAirNetworkError: When there is trouble reaching the API.
:raise FlashAirURLTooLong: When the queried directory path is too long.
:param str ip_addr: IP address of FlashAir to connect to.
:param str directory: Remote directory to get file list from.
:return: Unprocessed text response.
:rtype: str
"""
url = 'http://{}/command.cgi?op=100&DIR={}'.format(ip_addr, urllib.parse.quote(directory))
# Hit API.
status_code, text = http_get_post(url)
if status_code == 404:
raise exceptions.FlashAirDirNotFoundError(directory, status_code, text)
if status_code != 200:
split = urllib.parse.urlsplit(url)
if len('{}?{}'.format(split.path, split.query)) > 280:
raise exceptions.FlashAirURLTooLong(url, status_code, text)
raise exceptions.FlashAirHTTPError(status_code, status_code, text)
if not text.startswith('WLANSD_FILELIST'):
raise exceptions.FlashAirBadResponse(text, status_code, text)
return text | 40f8c9b84113be84358959f246a868424d382947 | 3,632,520 |
def axml_content(d):
"""
OwcContent dict to Atom XML
:param d:
:return:
"""
# <owc:content type="image/tiff" href=".."
if is_empty(d):
return None
else:
try:
content_elem = etree.Element(ns_elem("owc", "content"), nsmap=ns)
mimetype = extract_p('type', d, None)
if mimetype is not None: content_elem.set("type", mimetype)
url = extract_p('url', d, None)
if url is not None: content_elem.set("href", url)
title = extract_p('title', d, None)
if title is not None: content_elem.set("title", title)
content = extract_p('content', d, None)
if content is None: content_elem.text = content
return content_elem
except Exception as ex:
log.warn('could encode content', ex)
return None | c35c266d4ae7c5026958cbb271598f76369ecc6f | 3,632,521 |
def average_water_consumed(wn):
"""
Compute average water consumed at each node, qbar, computed as follows:
.. math:: qbar=\dfrac{\sum_{k=1}^{K}\sum_{t=1}^{lcm_n}qbase_n m_n(k,t mod (L(k)))}{lcm_n}
where
:math:`K` is the number of demand patterns at node :math:`n`,
:math:`L(k)` is the number of time steps in pattern :math:`k`,
:math:`lcm_n` is the least common multiple of the demand patterns time steps for node :math:`n`,
:math:`qbase_n` is the base demand at node :math:`n` and
:math:`m_n(k,t mod L(k))` is the demand multiplier specified in pattern :math:`k` for node :math:`n` at time :math:`t mod L(k)`.
For example, if a node has two demand patterns specified in the EPANET input (INP) file, and
one pattern repeats every 6 hours and the other repeats every 12 hours, the first
pattern will be repeated once, making its total duration effectively 12 hours.
If any :math:`m_n(k,t mod L(k))` value is less than 0, then that node's population is 0.
Parameters
-----------
wn : WaterNetworkModel
Returns
-------
qbar : pd.Series
A pandas Series that contains average water consumed per node, in m3/s
"""
qbar = pd.Series()
for name, node in wn.nodes(Junction):
# Future release should support mutliple base demand and demand patterns per node
numdemands = 1
L = {}
pattern = {}
for i in range(numdemands):
pattern_name = node.demand_pattern_name
if not pattern_name:
pattern_name = wn.options.pattern
pattern[i] = wn.get_pattern(pattern_name)
L[i] = len(pattern[i])
lcm_n = _lcml(L.values())
qbar_n = 0
for i in range(numdemands):
base_demand = node.base_demand
for t in range(lcm_n):
m = pattern[i][np.mod(t,len(pattern[i]))]
qbar_n = qbar_n + base_demand*m/lcm_n
qbar[name] = qbar_n
return qbar | bf88a45035b993d00fb31ff7151e48a0a1d59c83 | 3,632,522 |
def _shuffle(arr1, arr2):
"""
Shuffles arr1 and arr2 in the same order
"""
random_idxs = np.arange(len(arr1))
np.random.shuffle(random_idxs)
return arr1[random_idxs], arr2[random_idxs] | 785ecd0b9e92d5695cd2466fec6649d463f55feb | 3,632,523 |
from datetime import datetime
import pytz
def assign_output(request):
"""assigns the given files as version outputs for the given entity
"""
logger.debug('assign_output')
logged_in_user = get_logged_in_user(request)
full_paths = request.POST.getall('full_paths[]')
original_filenames = request.POST.getall('original_filenames[]')
entity_id = request.params.get('entity_id', -1)
entity = Entity.query.filter_by(id=entity_id).first()
daily_id = request.params.get('daily_id', -1)
daily = Daily.query.filter_by(id=daily_id).first()
# Tags
tags = get_tags(request)
logger.debug('daily_id : %s' % daily_id)
logger.debug('full_paths : %s' % full_paths)
logger.debug('original_filenames : %s' % original_filenames)
logger.debug('entity_id : %s' % entity_id)
logger.debug('entity : %s' % entity)
logger.debug('tags : %s' % tags)
version_id = entity.id
version_number = entity.version_number
version_take_name = entity.take_name
version_published = entity.is_published
links = []
if entity and full_paths:
mm = MediaManager()
for full_path, original_filename in zip(full_paths, original_filenames):
with open(full_path, 'rb') as f:
link = mm.upload_version_output(entity, f, original_filename)
link.created_by = logged_in_user
date_created = datetime.datetime.now(pytz.utc)
link.date_created = date_created
link.date_updated = link.date_created
for tag in tags:
if tag not in link.tags:
link.tags.extend(tags)
DBSession.add(link)
links.append(link)
if daily:
if link not in daily.links:
daily.links.append(link)
# to generate ids for links
transaction.commit()
DBSession.add_all(links)
# return new links as json data
# in response text
return [
{
'id': link.id,
'original_filename': link.original_filename,
'hires_full_path': link.full_path,
'webres_full_path': link.thumbnail.full_path,
'thumbnail_full_path': link.thumbnail.thumbnail.full_path
if link.thumbnail.thumbnail else link.thumbnail.full_path,
'tags': [tag.name for tag in link.tags],
'version_id': version_id,
'version_number': version_number,
'version_take_name': version_take_name,
'version_published':version_published
} for link in links
] | 7a0a6f0131a4bee9cc3248ee8e695980b5850024 | 3,632,524 |
def make_pipeline(tfidf_vectorizer, model):
""" Creates sklearn NLP pipeline
:param vectorizer: Vectorizer object
:param model: Model object
:return: Pipeline object
"""
tfidf_vectorizer = tfidf_vectorizer
model = model
pipeline = Pipeline([("tfidf", tfidf_vectorizer),
("nmf", model)])
return pipeline | 423b59d2635bf34169091c7456e71eca2dbdf5d6 | 3,632,525 |
def pnf_peeling_mechanism(item_counts, k, epsilon):
"""Computes epsilon-DP top-k counts by the permute-and-flip peeling mechanism.
The peeling mechanism (https://arxiv.org/pdf/1905.04273.pdf) adaptively uses
the counts as a utility function for the exponential mechanism. Once an item
is selected, the item is removed from the item set and we repeat this
procedure with the remaining items until k items are selected. Here we use
permute-and-flip as a replacement for the exponential mechanism as
permute-and-flip dominates it for pure DP (https://arxiv.org/abs/2010.12603).
We further use the exponential noise implementation of permute-and-flip for
speed (https://arxiv.org/abs/2105.07260). Contribution bound c and
NeighborType are not input parameters because the peeling mechanism has the
same definition regardless of c or the neighboring relation used.
Args:
item_counts: Array of integers defining item counts.
k: An integer indicating the number of desired items.
epsilon: The overall mechanism will be epsilon-DP.
Returns:
An array containing the indices of the items with the top-k noisy counts.
"""
mask = np.zeros(item_counts.size, dtype=bool)
selected_items = np.empty(k, dtype=int)
for i in range(k):
noisy_counts = item_counts + np.random.exponential(
scale=k / epsilon, size=item_counts.size)
masked_noisy_counts = np.ma.array(noisy_counts, mask=mask)
selected_items[i] = np.argmax(masked_noisy_counts)
mask[int(selected_items[i])] = True
return selected_items | 2c2fe9e59addc4905211ca70efd1f88bdeb5a976 | 3,632,526 |
from datetime import datetime
def format_date(timestamp):
"""Reusable timestamp -> date."""
return datetime.date.fromtimestamp(timestamp).isoformat() | 0f735dc18700332238ab4441677c786f9accc069 | 3,632,527 |
import sys
def getpwuid(uid):
"""
getpwuid(uid) -> (pw_name,pw_passwd,pw_uid,
pw_gid,pw_gecos,pw_dir,pw_shell)
Return the password database entry for the given numeric user ID.
See pwd.__doc__ for more on password database entries.
"""
if uid > sys.maxint or uid < 0:
raise KeyError(uid)
entry = _posix_impl.getpwuid(uid)
if not entry:
raise KeyError(uid)
return struct_passwd(entry) | e2917351bb23ece2fabb5274391bf2939df590c6 | 3,632,528 |
import time
def train(model, optimizer, loader, epoch):
"""
Train the models on the dataset.
"""
# running statistics
batch_time = AverageMeter("time", ":.2f")
data_time = AverageMeter("data time", ":.2f")
# training statistics
top1 = AverageMeter("top1", ":.3f")
top5 = AverageMeter("top5", ":.3f")
losses = AverageMeter("loss", ":.3e")
end = time.perf_counter()
model.train()
criterion = nn.CrossEntropyLoss().cuda()
for iter_epoch, (inp, target) in enumerate(loader):
# measure data loading time
data_time.update(time.perf_counter() - end)
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# forward
output = model(inp)
# compute cross entropy loss
loss = criterion(output, target)
# compute the gradients
optimizer.zero_grad()
loss.backward()
# step
optimizer.step()
# update stats
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), inp.size(0))
top1.update(acc1[0], inp.size(0))
top5.update(acc5[0], inp.size(0))
batch_time.update(time.perf_counter() - end)
end = time.perf_counter()
# verbose
if args.rank == 0 and iter_epoch % 50 == 0:
logger.info(
"Epoch[{0}] - Iter: [{1}/{2}]\t"
"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
"Data {data_time.val:.3f} ({data_time.avg:.3f})\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Prec {top1.val:.3f} ({top1.avg:.3f})\t"
"LR trunk {lr}\t"
"LR head {lr_W}".format(
epoch,
iter_epoch,
len(loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=top1,
lr=optimizer.param_groups[0]["lr"],
lr_W=optimizer.param_groups[1]["lr"],
)
)
return epoch, losses.avg, top1.avg.item(), top5.avg.item() | 3d65714a50f1842c32c85fe0cd3c02d7069a88f9 | 3,632,529 |
import functools
def ResidualBlock(name, input_dim, output_dim, filter_size, inputs, resample=None, he_init=True):
"""
resample: None, 'down', or 'up'
"""
if resample == 'down':
conv_shortcut = functools.partial(lib.ops.conv2d.Conv2D, stride=2)
conv_1 = functools.partial(
lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim // 2)
conv_1b = functools.partial(
lib.ops.conv2d.Conv2D, input_dim=input_dim // 2, output_dim=output_dim // 2, stride=2)
conv_2 = functools.partial(
lib.ops.conv2d.Conv2D, input_dim=output_dim // 2, output_dim=output_dim)
elif resample == 'up':
conv_shortcut = SubpixelConv2D
conv_1 = functools.partial(
lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim // 2)
conv_1b = functools.partial(
lib.ops.deconv2d.Deconv2D, input_dim=input_dim // 2, output_dim=output_dim // 2)
conv_2 = functools.partial(
lib.ops.conv2d.Conv2D, input_dim=output_dim // 2, output_dim=output_dim)
elif resample == None:
conv_shortcut = lib.ops.conv2d.Conv2D
conv_1 = functools.partial(
lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim // 2)
conv_1b = functools.partial(
lib.ops.conv2d.Conv2D, input_dim=input_dim // 2, output_dim=output_dim // 2)
conv_2 = functools.partial(
lib.ops.conv2d.Conv2D, input_dim=input_dim // 2, output_dim=output_dim)
else:
raise Exception('invalid resample value')
if output_dim == input_dim and resample == None:
shortcut = inputs # Identity skip-connection
else:
shortcut = conv_shortcut(name + '.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1,
he_init=False, biases=True, inputs=inputs)
output = inputs
output = tf.nn.relu(output)
output = conv_1(name + '.Conv1', filter_size=1,
inputs=output, he_init=he_init, weightnorm=False)
output = tf.nn.relu(output)
output = conv_1b(name + '.Conv1B', filter_size=filter_size,
inputs=output, he_init=he_init, weightnorm=False)
output = tf.nn.relu(output)
output = conv_2(name + '.Conv2', filter_size=1, inputs=output,
he_init=he_init, weightnorm=False, biases=False)
output = Batchnorm(name + '.BN', [0, 2, 3], output)
return shortcut + (0.3 * output) | 1b78b86ea42dd225f5a2bde6bcb6ed47b055ec00 | 3,632,530 |
def run_uGLAD_direct(
Xb,
trueTheta=None,
eval_offset=0.1,
EPOCHS=250,
lr=0.002,
INIT_DIAG=0,
L=15,
VERBOSE=True
):
"""Running the uGLAD algorithm in direct mode
Args:
Xb (np.array 1xMxD): The input sample matrix
trueTheta (np.array 1xDxD): The corresponding
true graphs for reporting metrics or None
eval_offset (float): eigenvalue offset for
covariance matrix adjustment
lr (float): Learning rate of glad for the adam optimizer
INIT_DIAG (int): 0/1 for initilization strategy of GLAD
L (int): Num of unrolled iterations of GLAD
EPOCHS (int): The number of training epochs
VERBOSE (bool): if True, prints to sys.out
Returns:
predTheta (torch.Tensor 1xDxD): Predicted graphs
compare_theta (dict): returns comparison metrics if
true precision matrix is provided
model_glad (class object): Returns the learned glad model
"""
# Calculating the batch covariance
Sb = prepare_data.getCovariance(Xb, offset=eval_offset) # BxDxD
# Converting the data to torch
Xb = prepare_data.convertToTorch(Xb, req_grad=False)
Sb = prepare_data.convertToTorch(Sb, req_grad=False)
if trueTheta is not None:
trueTheta = prepare_data.convertToTorch(
trueTheta,
req_grad=False
)
B, M, D = Xb.shape
# NOTE: We fix the batch size B=1 for `uGLAD`
# model and optimizer for uGLAD
model_glad, optimizer_glad = init_uGLAD(
lr=lr,
theta_init_offset=1.0,
nF=3,
H=3
)
PRINT_EVERY = int(EPOCHS/10)
# print max 10 times per training
# Optimizing for the glasso loss
for e in range(EPOCHS):
# reset the grads to zero
optimizer_glad.zero_grad()
# calculate the loss
predTheta, loss = forward_uGLAD(
Sb,
model_glad,
L=L,
INIT_DIAG=INIT_DIAG
)
# calculate the backward gradients
loss.backward()
if not e%PRINT_EVERY and VERBOSE: print(f'epoch:{e}/{EPOCHS} loss:{loss.detach().numpy()}')
# updating the optimizer params with the grads
optimizer_glad.step()
# reporting the metrics if true thetas provided
compare_theta = None
if trueTheta is not None:
for b in range(B):
compare_theta = reportMetrics(
trueTheta[b].detach().numpy(),
predTheta[b].detach().numpy()
)
print(f'Compare - {compare_theta}')
return predTheta, compare_theta, model_glad | 5b362abfec18a217c768bbe45bce3059ece8be22 | 3,632,531 |
def twitter_api():
"""Returns an authenticated tweepy.API instance.
Returns None on failure.
"""
try:
auth = tweepy.OAuthHandler(TWITTER_API_KEY, TWITTER_API_KEY_SECRET)
auth.set_access_token(TWITTER_ACCESS_TOKEN,
TWITTER_ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
except tweepy.error.TweepError as ex:
print(ex)
print(ex.response.text)
api = None
except Exception as ex:
print(ex)
api = None
finally:
return api | 5f0e748740025d4065c6317fdf03eead25e7ba95 | 3,632,532 |
def as_general_categories(cats, name="cats"):
"""Return a tuple of Unicode categories in a normalised order.
This function expands one-letter designations of a major class to include
all subclasses:
>>> as_general_categories(['N'])
('Nd', 'Nl', 'No')
See section 4.5 of the Unicode standard for more on classes:
https://www.unicode.org/versions/Unicode10.0.0/ch04.pdf
If the collection ``cats`` includes any elements that do not represent a
major class or a class with subclass, a deprecation warning is raised.
"""
if cats is None:
return None
major_classes = ("L", "M", "N", "P", "S", "Z", "C")
cs = categories()
out = set(cats)
for c in cats:
if c in major_classes:
out.discard(c)
out.update(x for x in cs if x.startswith(c))
elif c not in cs:
raise InvalidArgument(
"In %s=%r, %r is not a valid Unicode category." % (name, cats, c)
)
return tuple(c for c in cs if c in out) | 391185d75dce63df7deb724f8dea035389122b94 | 3,632,533 |
from pathlib import Path
import glob
def upload_directory(
directory: str = './',
upsert=False,
ignore_duplicate_error=False,
recursive=False,
pattern='*'):
"""
Upload files in a directory to the database.
:param directory: [Optional] The root directory to upload. Defaults to the current working directory.
:param upsert: [Optional] Replace existing documents in the database if they have the same id.
:param ignore_duplicate_error: [Optional] If True, then the duplicate error that is raised when attempting to
overwrite a document already in the database will be ignored.
:param recursive: [Optional] Recurse through the subdirectories when uploading.
:param pattern: [Optional] Glob pattern of file path.
:return: The list of documents which were successfully uploaded to the database.
"""
glob_pattern = (Path(directory) / pattern).as_posix()
files = [f for f in glob(glob_pattern, recursive=recursive) if Path(f).is_file()]
return upload_bulk(files, upsert=upsert, ignore_duplicate_error=ignore_duplicate_error) | d156c44f511182958172ba30bfb5422bb5da8dcd | 3,632,534 |
def tExtract(rft, T_INDEX):
"""T_INDEX is either of T_WL, T_SPEC, T_COM """
tdat = [dat[T_INDEX] for dat in rft[RFT_T] if (dat[T_WL] >= rft[RFT_R][R_WL] and dat[T_WL] <= rft[RFT_R][R_WH])]
return tdat | c1cdf75b377851316a5da050b8f248f14cfd34e5 | 3,632,535 |
from typing import Union
from pathlib import Path
from typing import Optional
def open_txt(path: Union[str, Path], cf_table: Optional[dict] = cmor) -> xr.Dataset:
"""Extract daily HQ meteorological data and convert to xr.DataArray with CF-Convention attributes."""
meta, data = extract_daily(path)
return to_cf(meta, data, cf_table) | 3a77ed5a501c1d455299504e4dd36e55cea580e9 | 3,632,536 |
def dc_coordinates():
"""Return coordinates for a DC-wide map"""
dc_longitude = -77.016243706276569
dc_latitude = 38.894858329321485
dc_zoom_level = 10.3
return dc_longitude, dc_latitude, dc_zoom_level | c07812ad0a486f549c63b81787a9d312d3276c32 | 3,632,537 |
import argparse
def get_arguments():
"""
Obtains command-line arguments.
:rtype: argparse.Namespace
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--clusters',
type=argparse.FileType('rU'),
required=True,
metavar='CLUSTERS',
help='read cluster assignments from CSV file %(metavar)s')
parser.add_argument(
'--diagnoses',
type=argparse.FileType('rU'),
required=True,
metavar='DIAGNOSES',
help='read diagnoses from CSV file %(metavar)s')
parser.add_argument(
'--scores',
type=argparse.FileType('rU'),
required=True,
metavar='SCORES',
help='read scores from CSV file %(metavar)s')
parser.add_argument(
'--base-dir',
required=True,
metavar='BASE-DIR',
help='output all files under directory %(metavar)s')
parser.add_argument(
'--diagnosis-order',
type=argparse.FileType('rU'),
metavar='DIAGNOSIS-ORDER',
help='read the order of diagnoses from text file %(metavar)s')
parser.add_argument(
'--diagnosis-map',
type=argparse.FileType('rU'),
metavar='DIAGNOSIS-MAP',
help='load diagnosis mappings from %(metavar)s')
parser.add_argument(
'--intraclassification-spacing',
type=float,
default=10.,
metavar='INTRACLASSIFICATION-SPACING',
help=('set the intraclassification spacing to %(metavar)s (default '
'%(default)s)'))
parser.add_argument(
'--interclassification-spacing',
type=float,
default=50.,
metavar='INTERCLASSIFICATION-SPACING',
help=('set the interclassification spacing to %(metavar)s (default '
'%(default)s)'))
parser.add_argument(
'--log',
metavar='LOG',
help='output logging information to %(metavar)s')
return parser.parse_args() | 32ce1446d8ac04208a4387bcd2ac31a2609580a3 | 3,632,538 |
def request_id_to_key(request_id):
"""Converts a request id into a TaskRequest key.
Note that this function does NOT accept a task id. This functions is primarily
meant for limiting queries to a task creation range.
"""
return ndb.Key(TaskRequest, request_id ^ task_pack.TASK_REQUEST_KEY_ID_MASK) | a5c3ef9939390d43264ba397e4c123af44758471 | 3,632,539 |
from typing import Optional
def get_web_app_premier_add_on_slot(name: Optional[str] = None,
premier_add_on_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppPremierAddOnSlotResult:
"""
Premier add-on.
:param str name: Name of the app.
:param str premier_add_on_name: Add-on name.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. If a slot is not specified, the API will get the named add-on for the production slot.
"""
__args__ = dict()
__args__['name'] = name
__args__['premierAddOnName'] = premier_add_on_name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20200601:getWebAppPremierAddOnSlot', __args__, opts=opts, typ=GetWebAppPremierAddOnSlotResult).value
return AwaitableGetWebAppPremierAddOnSlotResult(
id=__ret__.id,
kind=__ret__.kind,
location=__ret__.location,
marketplace_offer=__ret__.marketplace_offer,
marketplace_publisher=__ret__.marketplace_publisher,
name=__ret__.name,
product=__ret__.product,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type,
vendor=__ret__.vendor) | 764101a407305b1f6e03fe8b1ccb17cb2ecbd86f | 3,632,540 |
from typing import Callable
from typing import Iterable
from typing import List
def lmap(f: Callable, x: Iterable) -> List:
"""list(map(f, x))"""
return list(map(f, x)) | 51b09a3491769aafba653d4198fde94ee733d68f | 3,632,541 |
import ImportPathHelper as imports
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import TestHelper as helper
import azlmbr.legacy.general as general
import azlmbr.bus
import azlmbr.physics as phys
import azlmbr.math as mathazon
def C4925577_Materials_MaterialAssignedToTerrain():
"""
Summary:
Three spheres are suspended above the terrain. Beneath two of the balls,
there is a different material painted on the terrain.
They should all bounce at different heights per their respective terrains
Terrain entity: PhysX Terrain component: default settings
Ball Entities: Sphere shaped Mesh component
Sphere shaped PhysX Collider component: default settings
PhysX Rigid Body component: Gravity disabled, default settings
Concrete Material: Restitution: 0.0; Restitution Combine: Average
Rubber Material: Restitution: 1.0; Restitution Combine: Average
Expected Behavior:
The three balls start off at the same height. When game mode is entered they will fall towards the terrain.
After the ball collides with the terrain, they will bounce back at different heights respective to their
terrain material collisions. Ball_Default is the control and is dropped on default terrain material.
Ball_Rubber bounces off the rubber terrain material and should bounce higher than the default.
Ball_Concrete strikes the concrete terrain material and should not bounce as high as the default material.
Test Steps:
1) Open level
2) Enter game mode
3) Find entities
4) Check that gravity is disabled for all the balls initially
5) Check that the balls are aligned and all falling from the same height
Steps 6-9 run for each ball
6) Assign the tests and enable handlers to their respective spheres
7) Enable gravity on ball entities
8) Check that the balls collide with the PhysX Terrain
9) Wait for the ball to reach its peak height; record height and freeze it
10) Compare the bounce heights of the balls
11) Exit game mode and close the editor
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
imports.init()
# fmt: off
ZERO_VECTOR = mathazon.Vector3(0.0, 0.0, 0.0)
X_POSITION_RUBBER = 60.0 # Point on X axis material was painted rubber during level setup
X_POSITION_DEFAULT = 70.0 # Area in between other materials where Default material exists
X_POSITION_CONCRETE = 80.0 # Point on X axis material was painted concrete during level setup
Y_POSITION_VALUE = 42.0 # Point on Y axis materials were painted during level setup
POSITION_BUFFER = 4.0 # Material paint radius is 4.0 m
TIMEOUT_IN_SECONDS = 3.0
NUM_WAIT_FRAMES_ENTITY_LOAD = 2 # Frames to wait to allow entities to load in level
# fmt: on
class Terrain:
id = None
name = None
handler = None
class Sphere:
def __init__(self, name):
self.name = name
self.id = general.find_game_entity(self.name)
self.gravity_enabled = phys.RigidBodyRequestBus(azlmbr.bus.Event, "IsGravityEnabled", self.id)
self.world_location_start = self.get_location()
self.handler = None
self.hit_ground = False
self.bounced = False
self.peak_reached = False
self.ground_height = 0.0
self.peak_height = 0.0
def assign_tests(self):
if self.name == "Ball_Default":
self.test_find_ball = Tests.find_ball_default
self.test_terrain_collide = Tests.terrain_collide_default
self.test_peak_reached = Tests.peak_reached_default
elif self.name == "Ball_Rubber":
self.test_find_ball = Tests.find_ball_rubber
self.test_terrain_collide = Tests.terrain_collide_rubber
self.test_peak_reached = Tests.peak_reached_rubber
elif self.name == "Ball_Concrete":
self.test_find_ball = Tests.find_ball_concrete
self.test_terrain_collide = Tests.terrain_collide_concrete
self.test_peak_reached = Tests.peak_reached_concrete
def get_location(self):
# () -> Vector3
return azlmbr.components.TransformBus(azlmbr.bus.Event, "GetWorldTranslation", self.id)
def get_linear_velocity(self):
# () -> Vector3
return phys.RigidBodyRequestBus(azlmbr.bus.Event, "GetLinearVelocity", self.id)
def set_linear_velocity(self, vector):
# (Vector3) -> None
phys.RigidBodyRequestBus(azlmbr.bus.Event, "SetLinearVelocity", self.id, vector)
def freeze_self(self):
# () -> None
self.set_linear_velocity(ZERO_VECTOR)
self.enable_gravity(False)
def check_gravity(self):
# () -> bool
return phys.RigidBodyRequestBus(azlmbr.bus.Event, "IsGravityEnabled", self.id)
def enable_gravity(self, bool_to_set=True):
# (bool) -> None
phys.RigidBodyRequestBus(azlmbr.bus.Event, "SetGravityEnabled", self.id, bool_to_set)
def peak_height_reached(self):
"""
Used for conditional waiting;
If peak is reached: sets the value for self.peak_reached to True, saves peak world height,
freezes self to keep it from continuing to bounce and possibly interfering with another ball instance
"""
current_location = self.get_location()
if current_location.z < self.peak_height:
self.peak_reached = True
Report.info("{} has peaked at {:.6} in the world.".format(self.name, self.peak_height))
self.freeze_self()
return True
self.peak_height = current_location.z
return False
def on_collision_begin(self, args):
# Ball collides with the ground
other_id = args[0]
other_name = azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "GetEntityName", other_id)
if other_name == Terrain.name:
self.hit_ground = True
Report.info("{} has collided with the terrain.".format(self.name))
location = self.get_location()
self.ground_height = location.z
def on_collision_end(self, args):
# Ball bounces off the ground
other_id = args[0]
other_name = azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "GetEntityName", other_id)
if other_name == Terrain.name:
self.bounced = True
Report.info("{} has bounced off the terrain.".format(self.name))
def enable_handler(self):
self.handler = phys.CollisionNotificationBusHandler()
self.handler.connect(self.id)
self.handler.add_callback("OnCollisionBegin", self.on_collision_begin)
self.handler.add_callback("OnCollisionEnd", self.on_collision_end)
def is_close(actual, expected, buffer):
return abs(actual - expected) < buffer
def balls_are_aligned(balls_list):
aligned = True
for ball in balls_list:
# check x axis per level setup
if ball.name == "Ball_Default":
if not is_close(ball.world_location_start.x, X_POSITION_DEFAULT, POSITION_BUFFER):
Report.info("Ball_Default is not close enough to expected X position")
aligned = False
elif ball.name == "Ball_Rubber":
if not is_close(ball.world_location_start.x, X_POSITION_RUBBER, POSITION_BUFFER):
Report.info("Ball_Rubber is not close enough to expected X position")
aligned = False
elif ball.name == "Ball_Concrete":
if not is_close(ball.world_location_start.x, X_POSITION_CONCRETE, POSITION_BUFFER):
Report.info("Ball_Concrete is not close enough to expected X position")
aligned = False
# check y axis per level setup
if not is_close(ball.world_location_start.y, Y_POSITION_VALUE, POSITION_BUFFER):
aligned = False
Report.info("One or more balls are not close enough to expected Y position")
return aligned
def ball_heights_match(balls_list):
heights_match = True
for ball in balls_list:
# check ball heights match each other (z axis)
if ball.world_location_start.z != balls[0].world_location_start.z:
heights_match = False
Report.info("The balls are not falling from the same height.")
Report.failure(Tests.same_starting_height)
return heights_match
helper.init_idle()
# 1) Open level
helper.open_level("Physics", "C4925577_PhysXMaterials_MaterialAssignedToTerrain")
# 2) Enter game mode
helper.enter_game_mode(Tests.game_mode_enter)
general.idle_wait_frames(NUM_WAIT_FRAMES_ENTITY_LOAD)
# 3) Find entities
Terrain.id = general.find_game_entity("Terrain")
Terrain.name = azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "GetEntityName", Terrain.id)
ball_default = Sphere("Ball_Default")
ball_rubber = Sphere("Ball_Rubber")
ball_concrete = Sphere("Ball_Concrete")
balls = (ball_rubber, ball_default, ball_concrete)
Report.critical_result(Tests.find_terrain, Terrain.id.IsValid())
Report.critical_result(Tests.find_ball_default, ball_default.id.IsValid())
Report.critical_result(Tests.find_ball_rubber, ball_rubber.id.IsValid())
Report.critical_result(Tests.find_ball_concrete, ball_concrete.id.IsValid())
# 4) Check that gravity is disabled for all the balls initially
gravity_disabled_for_all = True
for ball in balls:
if ball.gravity_enabled is True:
gravity_disabled_for_all = False
Report.result(Tests.all_gravity_disabled, gravity_disabled_for_all)
# 5) Check that the balls are aligned and all falling from the same height
balls_are_aligned = balls_are_aligned(balls)
Report.critical_result(Tests.balls_are_aligned, balls_are_aligned)
same_starting_height = ball_heights_match(balls)
Report.critical_result(Tests.same_starting_height, same_starting_height)
# Steps 6-9 run for each ball
for ball in balls:
# 6) Assign the tests and enable handlers to their respective spheres
ball.assign_tests()
ball.enable_handler()
# 7) Enable gravity on ball entities
ball.enable_gravity()
# 8) Check that the balls collide with the PhysX Terrain
helper.wait_for_condition(lambda: ball.bounced, TIMEOUT_IN_SECONDS)
Report.result(ball.test_terrain_collide, ball.hit_ground)
# 9) Wait for the ball to reach its peak height; record height and freeze it
helper.wait_for_condition(ball.peak_height_reached, TIMEOUT_IN_SECONDS)
Report.result(ball.test_peak_reached, ball.peak_reached)
# 10) Compare the bounce heights of the balls
# The restitution of rubber is greater than the default; the restitution of concrete is less than the default
height_order_correct = ball_rubber.peak_height > ball_default.peak_height > ball_concrete.peak_height
Report.result(Tests.bounce_height_order_correct, height_order_correct)
# 11) Exit game mode and close the editor
helper.exit_game_mode(Tests.game_mode_exit) | 45b527c1b413c33be81fecf7babd4d11de513a8f | 3,632,542 |
def fallible_to_exec_result_or_raise(
fallible_result: FallibleExecuteProcessResult, description: ProductDescription
) -> ExecuteProcessResult:
"""Converts a FallibleExecuteProcessResult to a ExecuteProcessResult or raises an error."""
if fallible_result.exit_code == 0:
return ExecuteProcessResult(
fallible_result.stdout,
fallible_result.stderr,
fallible_result.output_directory_digest
)
else:
raise ProcessExecutionFailure(
fallible_result.exit_code,
fallible_result.stdout,
fallible_result.stderr,
description.value
) | 774cf9e89fd383a37992fff0b2e1b72ff96bddc8 | 3,632,543 |
def cumprod_np(a: np.ndarray, mod: int) -> np.ndarray:
"""Compute cumprod over modular not in place.
the parameter a must be one dimentional ndarray.
"""
n = a.size
assert a.ndim == 1
m = int(n**0.5) + 1
a = np.resize(a, (m, m))
for i in range(m - 1):
a[:, i + 1] = a[:, i + 1] * a[:, i] % mod
for i in range(m - 1):
a[i + 1] = a[i + 1] * a[i, -1] % mod
return a.ravel()[:n] | b5b1635000bf82b563c350341c8c56edbb0eb9fb | 3,632,544 |
def table_of_contents(df_documentation=''):
"""
Function::: table_of_contents
Description: brief description here (1 line)
Details: Full description with details here
Inputs
doc_csv_file: FILE csv file with documentation of functions
Outputs
tab_contents: STR Table of contents of functions in repository
Dependencies
"""
# Dependencies
# Read in the documentation dataframe for the repository
docu_csv = df_documentation
# Create a list of strings in the correct format for each function
str_fxns = ''
for i in range(len(docu_csv)):
str_fxns = str_fxns + '''|{description}|{script}|[{fxn}](#function-{fxn})| \n'''.format(description=docu_csv.fxn_desc[i],
script=docu_csv.script_name[i],
fxn=docu_csv.fxn_name[i])
# Create the table of contents for a repository
tab_contents = '''# Documentation- \n
## Table of Contents \n
|Description|Script|Functions|
| ------------- | ------------- | ------------- |
{all_fxns} \n
### End Table of Contents <br/> \n'''.format(all_fxns=str_fxns)
return tab_contents | fa5c26b729278bcc312a07fc6822ecf4837e2828 | 3,632,545 |
def mock_get_location_business_from_sam(client, duns_list):
""" Mock function for location_business data as we can't connect to the SAM service """
columns = ['awardee_or_recipient_uniqu'] + list(update_historical_duns.props_columns.keys())
results = pd.DataFrame(columns=columns)
duns_mappings = {
'000000001': {
'awardee_or_recipient_uniqu': ['000000001'],
'address_line_1': ['Test address 1'],
'address_line_2': ['Test address 2'],
'city': ['Test city'],
'state': ['Test state'],
'zip': ['Test zip'],
'zip4': ['Test zip4'],
'country_code': ['Test country'],
'congressional_district': ['Test congressional district'],
'business_types_codes': [['A', 'B', 'C']]
},
'000000002': {
'awardee_or_recipient_uniqu': ['000000002'],
'address_line_1': ['Other Test address 1'],
'address_line_2': ['Other Test address 2'],
'city': ['Other Test city'],
'state': ['Other Test state'],
'zip': ['Other Test zip'],
'zip4': ['Other Test zip4'],
'country_code': ['Other Test country'],
'congressional_district': ['Other Test congressional district'],
'business_types_codes': [['D', 'E', 'F']]
}
}
for duns in duns_list:
if duns in duns_mappings:
results = results.append(pd.DataFrame(duns_mappings[duns]))
return results | 61c9d0c0ee18a3840f7b2c0b70c504c388e83343 | 3,632,546 |
def repeat(N, fn):
"""repeat module N times
:param int N: repeat time
:param function fn: function to generate module
:return: repeated loss
:rtype: MultiSequential
"""
return MultiSequential(*[fn() for _ in range(N)]) | da20e6af56fd227d6eb2c6e083ac21d5c65e71e3 | 3,632,547 |
import numpy
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()-1] = 1
return labels_one_hot | dc4c717a03624708be6b09b040acb5a901d1e8f0 | 3,632,548 |
from typing import Set
from typing import Mapping
def parse_input(data: str) -> (Set[str], Mapping[str, Mapping[str, int]]):
"""Extract the names and associated happines changes from data."""
names = set()
happiness_changes = {}
for line in data.splitlines():
match = INPUT.fullmatch(line)
if not match:
raise ValueError('Illegal input: {}'.format(line))
amount = change_amount(match.group('change'))
name1 = match.group('name1')
name2 = match.group('name2')
names.add(name1)
names.add(name2)
if name1 not in happiness_changes.keys():
happiness_changes[name1] = {}
happiness_changes[name1][name2] = amount
return names, happiness_changes | 147cc6a19160b6e472249e59c5a8a41fb73f6cde | 3,632,549 |
def main(argv):
"""The program.
Returns an error code or None.
"""
try:
# Build an environment from the list of arguments.
env, writer = make_env_and_writer(argv)
try:
cmd = COMMANDS[env.options.command](env, writer)
cmd.execute()
finally:
writer.finish()
if writer.erroneous:
return 1
return 0
except CommandError as e:
print('Error:', e)
return 2 | 00063921703fea4ef21e8767ccdb1156f3c45911 | 3,632,550 |
import pandas
def merger(primary_path:str, secondary_path:str, desired_columns:list, shared_column="time"):
"""
--> Primary path is the global analysis file produced by analyzing NMR spectra
--> Secondary path is the raw-data file recorded by the DAQ.
--> Desired columns is a list of columns that you want to MIGRATE from the
raw-data file INTO the global analysis file.
--> Shared column needs to be some form of timestamp.
"""
primary_df = fetch_df(primary_path)
print(primary_df)
primary_df[shared_column] = pandas.to_datetime(primary_df[variablenames.vd_GA_timecol], format="%Y-%m-%d %H:%M:%S")
primary_df = primary_df.sort_values(by=shared_column)
indexes_to_grab = primary_df.loc[:, shared_column]
"""
if you get an index error, double check the delimeter of
the secondary path (the DAQ csv). If you see things like:
9830 12/7/2020 11:59:58 PM\t3690248398.062093\tOff\...
in the print statement, then make sure the delimeter is correct
"""
# Implemented the multiple-file method of merging daq files.
if type(secondary_path) == list:
things_to_append = []
secondary_df = pandas.DataFrame()
for element in daqdatafile:
things_to_append.append(fetch_df(element))
for index, element in enumerate(things_to_append):
if index == 0:
secondary_df = element
else:
secondary_df = secondary_df.append(element)
else:
############ begin problem child ####################
secondary_df = fetch_df(secondary_path)#, delimiter='\t')
#secondary_df = secondary_df.loc[:, ~secondary_df.columns.str.contains('^Unnamed')]
############ End problem child ####################
print(secondary_df)
#### Subject of frequenct problems right here child right here:
"""
1) is the delimeter correct for primary/secondary dataframes correct?
- If yes, but still get ValueError: time data doesn't match format
----> Ensure format
- if we STILL fail:
2) Duplicate the secondary file on your hard-disk removing extrenuous columns
using software like excel, or libre office.
3) If that doesn't work: give up.
"""
secondary_df[shared_column] = pandas.to_datetime(secondary_df[variablenames.vd_DAQDATA_timecol], format="%m/%d/%Y %I:%M:%S %p")
#if "Time" in secondary_df.columns.tolist():
# secondary_df.drop('Time', inplace=True;)
#####
secondary_df = secondary_df.sort_values(by=shared_column)
print(primary_df)
primary_df = primary_df.set_index(shared_column)
secondary_df = secondary_df.set_index(shared_column)
print(primary_df)
for i in desired_columns:
"""
Using the intersection (subsection) of common timestamps
in both the global analysis and raw-data dataframes
Assign for each column in the global analysis dataframe,
data we want to fetch from the raw-data dataframe.
*Brain implodes*
"""
primary_df.loc[primary_df.index.intersection(indexes_to_grab), i] = secondary_df.loc[secondary_df.index.intersection(indexes_to_grab), i]
print(primary_df)
return primary_df | 8db087622b691bb0b36505e038c8aa5ed5902121 | 3,632,551 |
import unicodedata
import re
def slugify_ref(value: Text, allow_unicode: bool = False) -> Text:
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase.
Also strip leading and trailing whitespace.
:param value: text to slugify
:param allow_unicode: (optional) boolean to allow unicode processing, defaults to False
:return: slugified value
"""
if allow_unicode:
value = unicodedata.normalize("NFKC", value)
value = re.sub(r"[^\w\s-]", "", value, flags=re.U).strip().lower()
return re.sub(r"[-\s]+", "-", value, flags=re.U)
value = (
unicodedata.normalize("NFKD", value).encode("ascii", "ignore").decode("ascii")
)
value = re.sub(r"[^\w\s-]", "", value).strip().lower()
return re.sub(r"[-\s]+", "-", value) | 1ce3893436f1590e55679248290aadc486f0d717 | 3,632,552 |
import json
def user_search():
"""UserSearch"""
value = request.args.get('search')
cur = MY_SQL.connection.cursor()
cur.execute(
'''SELECT id, username FROM accounts.users WHERE
username LIKE '%%%s%%';''',
(value)
)
users = json.dumps(cur.fetchall())
return users | 774234ee4cb4bae2b77a0c469037c695660d250e | 3,632,553 |
def graphcut(img1, img2, mask):
"""
Inputs:
Mask:
The 80px area out of the boundary. 2 dims.
Pixels on the edge of Img1 are marked with 1 and same for Img2. Internal pixels are marked with 3.
Img1 & Img2:
Here Img1 means source img, aka. the input img. Img2 is the candidate.
Both Img1 and Img2 should be in OpenCV format (y, x, c). Both are actually gradient maps.
Outputs:
SegMap:
0 if not in masked area. 1 if be with img1 and 2 if img2.
"""
assert len(img1.shape) == 3 and len(img2.shape) == 3
assert len(mask.shape) == 2
if not (img1.shape[:2] == img2.shape[:2] == mask.shape):
warn("Image/Mask shape does not match! Img1 = {}, Img2 = {}, Mask = {}".format(
img1.shape, img2.shape, mask.shape), domain=__file__)
shape = mask.shape
estimated_nodes = (mask > 0).sum()
estimated_edges = estimated_nodes * 4
g = maxflow.Graph[float](estimated_nodes, estimated_edges)
nodes = g.add_nodes(estimated_nodes)
nodemap = {}
nodemap_inv = {}
for y in range(shape[0]):
for x in range(shape[1]):
if mask[y, x] > 0:
nodemap[len(nodemap)] = (y, x)
nodemap_inv[(y, x)] = len(nodemap_inv)
inf = 1e9
for y in range(shape[0]):
for x in range(shape[1]):
if mask[y, x] == 0:
continue
if y + 1 < shape[0] and mask[y + 1, x] > 0:
value = np.abs(img1[y, x] - img2[y, x]).sum() + \
np.abs(img1[y + 1, x] - img2[y + 1, x]).sum()
g.add_edge(nodes[nodemap_inv[(y, x)]],
nodes[nodemap_inv[(y + 1, x)]], value, value)
if x + 1 < shape[1] and mask[y, x + 1] > 0:
value = np.abs(img1[y, x] - img2[y, x]).sum() + \
np.abs(img1[y, x + 1] - img2[y, x + 1]).sum()
g.add_edge(nodes[nodemap_inv[(y, x)]],
nodes[nodemap_inv[(y, x + 1)]], value, value)
if mask[y, x] == 1: # Connected to src
g.add_tedge(nodes[nodemap_inv[(y, x)]], inf, 0)
if mask[y, x] == 2: # Connected to dst
g.add_tedge(nodes[nodemap_inv[(y, x)]], 0, inf)
flow = g.maxflow()
segmentation_result = 1 + np.array(list(map(g.get_segment, nodes)))
segmap = np.zeros_like(mask)
for i, result in enumerate(segmentation_result):
segmap[nodemap[i][0], nodemap[i][1]] = result
return segmap, flow | a6d113e714191015f446cf1e687d925258dfb06f | 3,632,554 |
def lutForTBMap():
""" produce a look up table for a red-black-blue colormap"""
cmap=mpl.colors.LinearSegmentedColormap.from_list('my_colormap',
['blue','black','red'],
256)
#I'm not entirely sure what this lower is for. Its used with mayavi
lut=[]
for i in range(256):
lut.append(cmap.__call__(i,bytes=True))
return cmap | 1017d2fc7c7279ac3baa1393286244586d0e30c8 | 3,632,555 |
import typing
import tqdm
def val_one_epoch(model: Module, dataloader: DataLoader, criterion,
device: str) -> typing.Tuple[typing.Union[np.ndarray, None],
dict]:
"""
Validate the given model for one epoch.
:param model: model to evaluate
:type model: Module
:param dataloader: data loader for batch loading
:type dataloader: DataLoader
:param criterion: loss function
:type criterion:
:param device: device to train the model on, should be either "cpu" or "cuda"
:type device: str
:return: average loss over all batches or None, if criterion isnt
specified, dictionary containing the
prediction scores ["scores"], bounding boxes ["bboxes"],
and ground truth labels ["lables"]
:rtype: (np.ndarray, dict) if criterion is specified, otherwise (None, dict)
"""
model.eval()
model = model.to(device)
loss_hist = []
pred_dict = {}
for imgs, labels, bb_coords, ids in tqdm(dataloader, desc="Validation"):
imgs = imgs.to(device)
labels = labels.to(device)
outputs = model(imgs)
if criterion:
loss = criterion(outputs, labels.unsqueeze(1))
loss_hist.append(loss.item())
outputs = sigmoid(outputs)
# logging
update_prediction_dict(pred_dict, outputs, labels,
bb_coords, ids)
if criterion:
return np.mean(loss_hist), pred_dict
else:
return None, pred_dict | d51a5fbb064d00de1a48d30927cb1695db0a3168 | 3,632,556 |
def multiply_something(num1, num2):
"""this function will multiply num1 and num2
>>> multiply_something(2, 6)
12
>>> multiply_something(-2, 6)
-12
"""
return(num1 * num2) | 1a726c04df146ab1fa7bfb13ff3b353400f2c4a8 | 3,632,557 |
def post_shift_dp(train_set, vali_set, test_set, logreg_model):
"""Post-shifts log. regression model for demographic parity using vali_set.
Returns the train, validation and test sets with the group attribute appended
as an additional feature, and the post-shifted linear model for the expanded
datasets.
Args:
train_set: (features, labels, groups)
vali_set: (features, labels, groups)
test_set: (features, labels, groups)
logreg_model: (weights, threshold)
Returns:
a tuple containing:
the post_shifted model: (post_shifted_weights, post_shifted_threshold),
the expanded training set: (features, labels, groups),
the expanded validation set: (features, labels, groups),
the expanded test set: (features, labels, groups).
"""
# Compute post-shift thresholds on the *validation* set.
x_vali, y_vali, z_vali = vali_set
weights, threshold = logreg_model
predictions = np.dot(x_vali, weights) + threshold
p = y_vali.mean() # Overall proportion of positives.
# Set threshold for each group so that coverage for that group is equal to p.
threshold0 = np.percentile(predictions[z_vali == 0], q=(1 - p) * 100)
threshold1 = np.percentile(predictions[z_vali == 1], q=(1 - p) * 100)
# Modify the logistic regression weights and threshold to include the
# post-shift correction. Rather than having two separate thresholds for the
# groups, we append the group attribute as a feature to the train, validation
# and test sets, and return the expanded dataset and an expanded linear model.
x_train, y_train, z_train = train_set
x_test, y_test, z_test = test_set
x_train_appended = np.concatenate([x_train, z_train.reshape(-1, 1)], axis=1)
x_vali_appended = np.concatenate([x_vali, z_vali.reshape(-1, 1)], axis=1)
x_test_appended = np.concatenate([x_test, z_test.reshape(-1, 1)], axis=1)
train_set_appended = x_train_appended, y_train, z_train
vali_set_appended = x_vali_appended, y_vali, z_vali
test_set_appended = x_test_appended, y_test, z_test
# Recall that thresholds are added, not subtracted from w.x during evaluation.
post_shifted_weights = np.concatenate([weights, [-threshold1 + threshold0]])
post_shifted_threshold = threshold - threshold0
return ((post_shifted_weights, post_shifted_threshold), train_set_appended,
vali_set_appended, test_set_appended) | 03cfcf5060e6419398042ff08fbb991c34551726 | 3,632,558 |
def determine_acknowledgement(record, report, ignore_string):
"""Mark report for output unless ignored"""
if record[COMMENT_TEXT]:
comment = record[COMMENT_TEXT].lower()
else:
comment = ""
if ignore_string in comment:
report["should"] = False
return True
report["should"] = True
return False | 8fd4d0d2623a0183953a21cb2029da0fadc95bff | 3,632,559 |
def estimate_infectious_rate_constant_vec(event_times,
follower,
t_start,
t_end,
kernel_integral,
count_events=None):
"""
Returns estimation of infectious rate for given event time and followers on defined interval.
Optimized using numpy.
:param event_times: nd-array of event times
:param follower: nd-array of follower counts
:param t_start: time interval start
:param t_end: time interval end
:param kernel_integral: integral function of kernel function
:param count_events: count of observed events in interval (used for time window approach)
:return: estimated values for infectious rate
"""
kernel_int = follower * kernel_integral(t_start - event_times,
t_end - event_times)
if count_events is not None:
return count_events / kernel_int.sum()
else:
return event_times.size / kernel_int.sum() | 207833e1b32885fe39a209bfef227665c8c59ad1 | 3,632,560 |
import urllib
import json
def cotacaoBRL():
"""
Retorna a última cotação do Bitcoin em BRL - Mercado Bitcoin via API BitValor
"""
with urllib.request.urlopen("https://api.bitvalor.com/v1/ticker.json") as url:
data = json.loads(url.read().decode())
last = data['ticker_24h']['exchanges']['MBT']['last']
return last | a6c96aa8c8cff46ab4b410a81d1ef19ac912fcbb | 3,632,561 |
from typing import List
def adder(journal: Journal) -> List[JournalEntry]:
"""A task that requires previous phases to have recorded journal entres with tags 'x' and 'y', which it will sum.
Returns a new journal entry, titled 'x+y', containing the sum of the existing journal entries 'x' and 'y'
"""
x = journal.get_entry('x').data
y = journal.get_entry('y').data
return [JournalEntry('x+y', x + y)] | 03303cd74dffa0631dbb120280666701268871e7 | 3,632,562 |
def find(word,letter):
"""
find letter in word , return first occurence
"""
index=0
while index < len(word):
if word[index]==letter:
#print word,' ',word[index],' ',letter,' ',index,' waht'
return index
index = index + 1
return -1 | bdeb0f0993fb4f7904b4e9f5244ea9d7817fa15f | 3,632,563 |
def subsample_ind(n, k, seed=32):
"""
Return a list of indices to choose k out of n without replacement
"""
rand_state = np.random.get_state()
np.random.seed(seed)
ind = np.random.choice(n, k, replace=False)
np.random.set_state(rand_state)
return ind | 958ddcf3122bc8c8f9cab0539896bfc624d1901f | 3,632,564 |
def HA2(credentails, request):
"""Create HA2 md5 hash
If the qop directive's value is "auth" or is unspecified, then HA2:
HA2 = md5(A2) = MD5(method:digestURI)
If the qop directive's value is "auth-int" , then HA2 is
HA2 = md5(A2) = MD5(method:digestURI:MD5(entityBody))
"""
if credentails.get("qop") == "auth" or credentails.get('qop') is None:
return H(b":".join([request['method'].encode('utf-8'), request['uri'].encode('utf-8')]))
elif credentails.get("qop") == "auth-int":
for k in 'method', 'uri', 'body':
if k not in request:
raise ValueError("%s required" % k)
return H("%s:%s:%s" % (request['method'],
request['uri'],
H(request['body'])))
raise ValueError | 94f9a6b6e6371f1d7c1c6606577cbbced201facf | 3,632,565 |
from typing import Dict
from typing import Any
import hashlib
def name_to_scope(
template: str,
name: str,
*,
maxlen: int = None,
params: Dict[str, Any] = None,
) -> str:
"""Return scope by given template possibly shortened on name part.
"""
scope = template.format(name=name, **params)
if maxlen and len(scope) > maxlen:
surplus = len(scope) - maxlen
name = name[:len(name) - surplus - 8] + hashlib.sha1(name.encode()).hexdigest()[:8]
scope = template.format(name=name, **params)
return _sanitize(scope) | cd6759da406b6072565f693cdffe8eb16107c074 | 3,632,566 |
def bootstrap_idxs(n, rng: np.random.Generator = None):
"""
Generate a set of boostrap indexes of length n, returning the pair (in_bag, out_bag) containing the in-bag and
out-of-bag indexes as numpy arrays
"""
if rng is None or type(rng) is not np.random.Generator:
rng = np.random.default_rng(rng)
in_bag = rng.integers(low=0, high=n, size=n)
out_bag = np.array(list(set(range(n)) - set(in_bag)))
return in_bag, out_bag | 3d76cfea110c91a228bc8bc3ec5698c9a94676b8 | 3,632,567 |
def run(argv=None):
"""Main entry point; defines and runs the wordcount pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument('--input',
dest='input',
default='$GTFS_BUCKET/at/20190429120000/at.zip',
help='Input file to process.')
parser.add_argument('--output',
dest='output',
# CHANGE 1/5: The Google Cloud Storage path is required
# for outputting the results.
default='$GTFS_BUCKET/_DATAFLOW/output',
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_args.extend([
# CHANGE 2/5: (OPTIONAL) Change this to DataflowRunner to
# run your pipeline on the Google Cloud Dataflow Service.
'--runner=DataflowRunner',
# CHANGE 3/5: Your project ID is required in order to run your pipeline on
# the Google Cloud Dataflow Service.
'--project=$PROJECT_ID',
# CHANGE 4/5: Your Google Cloud Storage path is required for staging local
# files.
'--staging_location=$GTFS_BUCKET/_DATAFLOW/staging',
# CHANGE 5/5: Your Google Cloud Storage path is required for temporary
# files.
'--temp_location=$GTFS_BUCKET/_DATAFLOW/temp',
'--job_name=import_gtfs_zip',
])
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
# Read the text file[pattern] into a PCollection.
# zip = p | io.Read("Reading GTFS zip file", beam.io.TextFileSource(known_args.input, coder=coders.BytesCoder()))
csv_files = p | 'ReadFromText' >> beam.io.ReadFromText(
known_args.input,
compression_type=beam.io.filesystem.CompressionTypes.GZIP)
unzipped = zip | transforms.Map(lambda x: x | WriteToText(known_args.output))
vids = (p | 'Read input' >> beam.io.ReadFromText(known_args.input)
| 'Parse input' >> beam.Map(lambda line: csv.reader([line]).next()))
# | 'Run DeepMeerkat' >> beam.ParDo(PredictDoFn(pipeline_args)))
months = p | "Reading month records" >> beam.Create([
{'name': 'Jan', 'num': 1},
{'name': 'Feb', 'num': 2},
])
source_config = relational_db.SourceConfiguration(
drivername='postgresql+pg8000',
host='35.189.51.235',
port=5432,
username='postgres',
password='7f512a41a3506989d1233017f624b35c0fa70bae',
database='at',
create_if_missing=True,
)
table_config = relational_db.TableConfiguration(
name='months',
create_if_missing=True
)
months | 'Writing to DB' >> relational_db.Write(
source_config=source_config,
table_config=table_config
)
def per_column_average(rows, ignore_elms=[ID_INDEX]):
return [sum([row[idx] if idx not in ignore_elms else 0
for row in rows]) / len(row[0])
for idx, _ in enumerate(rows[0])]
keyed_averaged_elm = (months
| beam.Map(lambda x: (x[ID_INDEX], x))
| beam.GroupByKey()
| beam.Map(lambda x: (x[0], per_column_average(rows))
# Count the occurrences of each word.
counts = (
unzipped
| 'Split' >> (beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
.with_output_types(unicode))
| 'PairWithOne' >> beam.Map(lambda x: (x, 1))
| 'GroupAndSum' >> beam.CombinePerKey(sum))
# Format the counts into a PCollection of strings.
def format_result(word_count):
(word, count) = word_count
return '%s: %s' % (word, count)
output = counts | 'Format' >> beam.Map(format_result)
# Write the output using a "Write" transform that has side effects.
# pylint: disable=expression-not-assigned
output | WriteToText(known_args.output) | d4bacf3a16dc53e3e8b6213e6dbbb2be9a7df2b0 | 3,632,568 |
def check_skyscrapers(input_path: str):
"""
Main function to check the status of skyscraper game board.
Return True if the board status is compliant with the rules,
False otherwise.
>>> check_skyscrapers("check.txt")
True
"""
lst = read_input(input_path)
if check_columns(lst) and\
check_uniqueness_in_rows(lst) and\
check_horizontal_visibility(lst) and\
check_columns(lst) == True:
return True
return False | ff57e649bbd87563fe97e870e304259041f0582f | 3,632,569 |
from typing import Dict
from typing import Any
import importlib
def load_preprocessor(preproc_params: Dict[str, Any], device: str) -> Module:
"""Load preprocessor from module preprocessors.name"""
preproc = None
if preproc_params is not None:
preproc_module = importlib.import_module(
f"preprocessors.{preproc_params['name']}")
get_preprocessor = getattr(preproc_module, "get_preprocessor")
preproc = get_preprocessor(preproc_params["params"]).to(device)
return preproc | a6ea8dc293c883f5bcd1841bd1d483b97d393dab | 3,632,570 |
def get_image_ground_truth(image_id, dataset):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
Args:
image_id:
Image id.
Returns:
image:
[height, width, 3]
class_ids:
[instance_count] Integer class IDs
bbox:
[instance_count, (y1, x1, y2, x2)]
mask:
[height, width, instance_count]. The height and width are those of the image unless
use_mini_mask is True, in which case they are defined in MINI_MASK_SHAPE.
"""
# image = load_dicom_image(image_id, to_RGB=True)
image = load_dicom_image(image_id, to_RGB=True, rescale=True)
mask, class_ids = load_mask(image_id, dataset)
_idx = np.sum(mask, axis=(0, 1)) > 0
mask = mask[:, :, _idx]
class_ids = class_ids[_idx]
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = extract_bboxes(mask)
return image, class_ids, bbox, mask | 3894e5714ceb64c7b414f100e69ac5c3b2b36fb8 | 3,632,571 |
import html
def update_stream_metadata(stream_names):
"""
Updates the sidebar with metadata from a board live stream
"""
if not stream_names[0]:
return html.P("Metadata will appear here when you pick a stream"),
print(f"Getting metadata for {stream_names[0]}")
metadata = cfg.redis_instance.hgetall(f"metadata:{stream_names[0]}")
if not metadata:
raise dash.exceptions.PreventUpdate
metadata = {k.decode(): v.decode() for k,v in metadata.items()}
metadata['y_max'] = float(metadata['y_max'] )
metadata['y_min'] = float(metadata['y_min'] )
cfg.spec_datas = {}
cfg.spec_datas['metadata'] = metadata
print("Got stream metadata:\n%s", metadata)
sfreq = float(metadata['sfreq'])
n_samples = int(metadata['n_samples'])
cfreq = float(metadata['cfreq'])
cfg.sa.spec.sample_frequency = sfreq
cfg.sa.spectrogram.sample_frequency = sfreq
# TODO: This is likely not the best way to set center frequency for the graphs
cfg.sa.spec.centre_frequency = sfreq/4
cfg.sa.spectrogram.centre_frequency = sfreq/4
cfg.sa.spec.number_samples = n_samples
cfg.sa.spectrogram.number_samples = n_samples
# TODO: Set the decimation factor some other way?
cfg.sa.spectrogram.decimation_factor = 2
cfg.sa.spec.decimation_factor = 2
if sfreq > 1e9:
sfreq = f"{sfreq / 1e9} GHz"
elif sfreq > 1e6:
sfreq = f"{sfreq / 1e6} MHz"
elif sfreq > 1e3:
sfreq = f"{sfreq / 1e3} kHz"
else:
sfreq = f"{sfreq } Hz"
if cfreq > 1e9:
cfreq = f"{cfreq / 1e9} GHz"
elif cfreq > 1e6:
cfreq = f"{cfreq / 1e6} MHz"
elif sfreq > 1e3:
cfreq = f"{cfreq / 1e3} kHz"
else:
cfreq = f"{cfreq } Hz"
children = [
html.Table([
html.Tr([
html.Th("Name:"),
html.Td(stream_names[0]),
]),
html.Tr([
html.Th("Sample Rate:"),
html.Td(sfreq),
]),
html.Tr([
html.Th(["Center Frequency:"]),
html.Td([cfreq]),
]),
html.Tr([
html.Th(["Channel:"]),
html.Td([cfg.spec_datas['metadata']['channel']]),
]),
],
style={'width': '100%'}),
]
return children | f0db85b232d00deebc9f970c9ab92a472e717288 | 3,632,572 |
def nested_field_map(name: str) -> Mapper:
"""
Arguments
---------
name : str
Name of the property.
Returns
-------
Mapper
Field map.
See Also
--------
field_map
"""
return field_map(
name,
python_to_api=lambda x: [[x]],
api_to_python=lambda x: x[0][0],
) | 7af0a3e8df4f4bc8228a3473d75bbab527bf0eee | 3,632,573 |
def socket_state(realsock, waitfor="rw", timeout=0.0):
"""
<Purpose>
Checks if the given socket would block on a send() or recv().
In the case of a listening socket, read_will_block equates to
accept_will_block.
<Arguments>
realsock:
A real socket.socket() object to check for.
waitfor:
An optional specifier of what to wait for. "r" for read only, "w" for write only,
and "rw" for read or write. E.g. if timeout is 10, and wait is "r", this will block
for up to 10 seconds until read_will_block is false. If you specify "r", then
write_will_block is always true, and if you specify "w" then read_will_block is
always true.
timeout:
An optional timeout to wait for the socket to be read or write ready.
<Returns>
A tuple, (read_will_block, write_will_block).
<Exceptions>
As with select.select(). Probably best to wrap this with is_recoverable_network_exception
and is_terminated_connection_exception. Throws an exception if waitfor is not in ["r","w","rw"]
"""
# Check that waitfor is valid
if waitfor not in ["rw","r","w"]:
raise Exception, "Illegal waitfor argument!"
# Array to hold the socket
sock_array = [realsock]
# Generate the read/write arrays
read_array = []
if "r" in waitfor:
read_array = sock_array
write_array = []
if "w" in waitfor:
write_array = sock_array
# Call select()
(readable, writeable, exception) = select.select(read_array,write_array,sock_array,timeout)
# If the socket is in the exception list, then assume its both read and writable
if (realsock in exception):
return (False, False)
# Return normally then
return (realsock not in readable, realsock not in writeable) | fc2fa9162d3228021c6738e0e2453cabae907899 | 3,632,574 |
def wrap_with_threadpool(obj, worker_threads=1):
"""
Wraps a class in an async executor so that it can be safely used in an event loop like asyncio.
"""
async_executor = ThreadPoolExecutor(worker_threads)
return AsyncWrapper(obj, executor=async_executor), async_executor | 744a428535aa70d7b130e12bb9c144aac3df4d96 | 3,632,575 |
import re
def file_read(lines):
""" Function for the file reading process
Strips file to get ONLY the text; No timestamps or sentence indexes added so returned string is only the
caption text.
"""
# new_text = ""
text_list = []
for line in lines:
if re.search('^[0-9]', line) is None and re.search('^[0-9]{2}:[0-9]{2}:[0-9]{2}', line)\
is None and re.search(
'^$', line) is None:
text_list.append(line.rstrip('\n'))
return text_list | 7d37bb79c6b1cdd43d7b813e03bf3d8b18f5a6ed | 3,632,576 |
from sys import exc_info
def ipn(request):
"""PayPal IPN (Instant Payment Notification)
Cornfirms that payment has been completed and marks invoice as paid.
Adapted from IPN cgi script provided at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/456361"""
payment_module = config_get_group('PAYMENT_PAYPAL')
if payment_module.LIVE.value:
log.debug("Live IPN on %s", payment_module.KEY.value)
url = payment_module.POST_URL.value
account = payment_module.BUSINESS.value
else:
log.debug("Test IPN on %s", payment_module.KEY.value)
url = payment_module.POST_TEST_URL.value
account = payment_module.BUSINESS_TEST.value
PP_URL = url
try:
data = request.POST
log.debug("PayPal IPN data: " + repr(data))
if not confirm_ipn_data(data, PP_URL):
return HttpResponse()
if not 'payment_status' in data or not data['payment_status'] == "Completed":
# We want to respond to anything that isn't a payment - but we won't insert into our database.
log.info("Ignoring IPN data for non-completed payment.")
return HttpResponse()
try:
invoice = data['invoice']
except:
invoice = data['item_number']
gross = data['mc_gross']
txn_id = data['txn_id']
if not OrderPayment.objects.filter(transaction_id=txn_id).count():
# If the payment hasn't already been processed:
order = Order.objects.get(pk=invoice)
order.add_status(status='New', notes=_("Paid through PayPal."))
processor = get_processor_by_key('PAYMENT_PAYPAL')
payment = processor.record_payment(order=order, amount=gross, transaction_id=txn_id)
if 'memo' in data:
if order.notes:
notes = order.notes + "\n"
else:
notes = ""
order.notes = notes + _('---Comment via Paypal IPN---') + u'\n' + data['memo']
order.save()
log.debug("Saved order notes from Paypal")
# Run only if subscription products are installed
if 'product.modules.subscription' in settings.INSTALLED_APPS:
for item in order.orderitem_set.filter(product__subscriptionproduct__recurring=True, completed=False):
item.completed = True
item.save()
# We no longer empty the cart here. We do it on checkout success.
except:
log.exception(''.join(format_exception(*exc_info())))
return HttpResponse() | d04e6b8a5bf08a59c081d912e531be2b7e7539ab | 3,632,577 |
def has_file_ext(view, ext):
"""Returns ``True`` if view has file extension ``ext``.
``ext`` may be specified with or without leading ``.``.
"""
if not view.file_name() or not ext.strip().replace('.', ''):
return False
if not ext.startswith('.'):
ext = '.' + ext
return view.file_name().endswith(ext) | 043edf03874d1ec20e08fcb5795fd205206f7194 | 3,632,578 |
def balanced_accuracy_score(y_true: np.array, y_score: np.array) -> float:
"""
Calculate the balanced accuracy for a ground-truth prediction vector pair.
Args:
y_true (array-like): An N x 1 array of ground truth values.
y_score (array-like): An N x 1 array of predicted values.
Returns:
balanced_accuracy (float): The value of the balanced accuracy score.
"""
balanced_accuracy = sklearn.metrics.balanced_accuracy_score(y_true, y_score)
return balanced_accuracy | 4c7a17e5a5706b8b8cf65d15db51283d7873aca0 | 3,632,579 |
import re
def VOLTS(text):
""" Parse all voltages in tegrastats output
[VDD_name] X/Y
X = Current power consumption in milliwatts.
Y = Average power consumption in milliwatts.
"""
return {name: {'cur': int(cur), 'avg': int(avg)} for name, cur, avg in re.findall(VOLT_RE, text)} | f79934a037b2d995974e833c8b7b045e195637d4 | 3,632,580 |
from typing import Union
async def get_team_id(user_id: int) -> Union[int, None]:
"""Return the team id of a user based on their user id."""
data = await users.find_one(
{"user_id": user_id},
{"team_id": 1, "_id": 0},
)
if data:
team_id = data.get("team_id")
else:
team_id = None
return team_id | e0905e65edc6ff84d35d25ec43eb98f3898295af | 3,632,581 |
def _clone_static_fields(ex: TensorDict,) -> TensorDict:
"""Clone static fields to each ray.
Args:
ex: A single-camera or multi-camera example. Must have the following fields
-- frame_name, scene_name.
Returns:
Modified version of `ex` with `*_name` features cloned once per pixel.
"""
# Identify batch shape.
batch_shape: tf.TensorShape = ex['color_image'].shape[0:-1]
# TODO(duckworthd): Duplicate ALL static fields, including those specified
# in additional_frame_specs.
def _clone(v):
return tf.fill(batch_shape, v)
# Clone individual fields.
ex['scene_name'] = _clone(ex['scene_name'])
ex['frame_name'] = _clone(ex['frame_name'])
ex['camera_name'] = _clone(ex['camera_name'])
return ex | 126d564e4704ee3a7c878630cadcd3adcb58eeaf | 3,632,582 |
def get_atom_types_selected(smi_file, database):
""" Determines the atom types present in an input SMILES file.
Args:
smi_file (str) : Full path/filename to SMILES file.
"""
# list of atom types to be selected
if database == "GDB-13":
atom_types = ['H', 'C', 'N', 'O', 'Cl']
pt = rdkit.Chem.GetPeriodicTable()
atom_types = [pt.GetAtomicNumber(atom) for atom in atom_types]
elif database == "MOSES":
atom_types = ['H', 'C', 'N', 'O', 'F', 'Cl', 'Br'] #like MOSES
pt = rdkit.Chem.GetPeriodicTable()
atom_types = [pt.GetAtomicNumber(atom) for atom in atom_types]
else:
raise NotImplementedError
molecules = load_molecules(path=smi_file)
n_mols = 0
filename = smi_file[:-4] + '_selected-atoms.smi'
smi_writer = rdkit.Chem.rdmolfiles.SmilesWriter(filename, includeHeader=False)
for mol in molecules:
flag = 0
for atom in mol.GetAtoms():
if atom.GetAtomicNum() not in atom_types:
flag = 1
break
if flag == 0:
if mol is not None:
try:
smi_writer.write(mol)
n_mols += 1
except:
pass
atom_types = [pt.GetElementSymbol(atom) for atom in atom_types]
# return the symbols, for convenience
return n_mols, atom_types | 26a92a44db7c4f187f21e6dfe8dd64694fabc29a | 3,632,583 |
def run_profile(times, schedule, msid, model_spec, init, pseudo=None):
""" Run a Xija model for a given time and state profile.
:param times: Array of time values, in seconds from '1997:365:23:58:56.816' (cxotime.CxoTime epoch)
:type times: np.ndarray
:param schedule: Dictionary of pitch, roll, etc. values that match the time values specified above in `times`
:type schedule: dict
:param msid: Primary MSID for model being run
:type msid: str
:param model_spec: Dictionary of model parameters or file location where parameters can be imported
:type model_spec: dict or string
:param init: Dictionary of Xija model initialization parameters, can be empty but not recommended
:type init: dict
:param pseudo: Name of one or more pseudo MSIDs used in the model, if any, only necessary if one
wishes to retrieve model results for this pseudo node, if it exists
:type pseudo: str or None, optional
:returns: Results, keys are node names (e.g. 'aacccdpt', 'aca0'), values are Xija model component objects
:rtype: dict
Example::
times = np.array(CxoTime(['2019:001:00:00:00', '2019:001:12:00:00', '2019:002:00:00:00',
'2019:003:00:00:00']).secs)
pitch = np.array([150, 90, 156, 156])
schedule = {'pitch': pitch}
model_specs = load_model_specs()
init = {'1dpamzt': 20., 'dpa0': 20., 'eclipse': False, 'roll': 0, 'vid_board': True, 'clocking': True,
'fep_count': 5, 'ccd_count': 5, 'sim_z': 100000}
results = run_profile(times, pitch, '1dpamzt', model_specs['1dpamzt'], init, pseudo='dpa0')
Note:
Any parameters specified in `init` will be overwritten by those specified in the body of this function, if they
happen to be defined in both places.
"""
model = setup_model(msid, times[0], times[-1], model_spec, init)
for key, value in schedule.items():
model.comp[key].set_data(value, times=times)
model.make()
model.calc()
tmsid = model.get_comp(msid)
results = {msid: tmsid}
if pseudo is not None:
results[pseudo] = model.get_comp(pseudo)
return results | 92ffe057738183d50aac40693d572a232354e621 | 3,632,584 |
def extract_optimized_structure(out_file, n_atoms, atom_labels):
"""
After waiting for the constrained optimization to finish, the
resulting structure from the constrained optimization is
extracted and saved as .xyz file ready for TS optimization.
"""
optimized_xyz_file = out_file[:-4]+".xyz"
optimized_energy = None
with open(out_file, 'r') as ofile:
line = ofile.readline()
while line:
if 'SCF Done:' in line:
optimized_energy = line.split()[4]
if 'Standard orientation' in line or 'Input orientation' in line:
coordinates = np.zeros((n_atoms, 3))
for i in range(5):
line = ofile.readline()
for i in range(n_atoms):
coordinates[i, :] = np.array(line.split()[-3:])
line = ofile.readline()
line = ofile.readline()
with open(optimized_xyz_file, 'w') as _file:
_file.write(str(n_atoms)+'\n\n')
for i in range(n_atoms):
_file.write(atom_labels[i])
for j in range(3):
_file.write(' '+"{:.5f}".format(coordinates[i, j]))
_file.write('\n')
print("optimized energy ("+out_file+") = "+str(optimized_energy))
return optimized_xyz_file, optimized_energy | 203dfd85987c29ec4f2479ca47be0d497a230480 | 3,632,585 |
def get_genes(exp_file, samples, threshold, max_only):
"""
Reads in and parses the .bed expression file.
File format expected to be:
Whose format is tab seperated columns with header line:
CHR START STOP GENE <sample 1> <sample 2> ... <sample n>
Args:
exp_file (str): Name of expression file.
samples (list): Names of the samples in the vcf file.
threshold (float): Expression threshold to filter lowly/unexpressed genes.
max_only (bool): if true, gene_dict value is 1 value = max expression
if false gene_dict value is list of expression values
YYY: WARNING: if want list to have meaning
then values needs to be tied to header sample names
Returns:
gene_dict (dict): {gene_name: [expression_vals]}.
Only include values for samples in the vcf.
"""
data_cols = []
gene_dict = {}
print('start read exp_file:' + format(exp_file))
if max_only:
# read and only return max exp value in gene_dict
with open(exp_file) as f:
header = f.readline().strip().split('\t')
for samp in header[4:]:
if samp in samples:
data_idx = header.index(samp)
data_cols.append(data_idx)
# Read in expression levels for each gene.
for line in f:
line = line.strip().split('\t')
gene_name = line[3].upper()
exp_val = -1e1000
for idx in data_cols:
if float(line[idx]) > exp_val:
exp_val = float(line[idx])
gene_dict[gene_name] = exp_val
else:
# read and return exp value list in gene_dict
with open(exp_file) as f:
header = f.readline().strip().split('\t')
for samp in header[4:]:
if samp in samples:
data_idx = header.index(samp)
data_cols.append(data_idx)
# Read in expression levels for each gene.
for line in f:
line = line.strip().split('\t')
gene_name = line[3].upper()
exp_vals = []
for idx in data_cols:
exp_vals.append(line[idx])
gene_dict[gene_name] = exp_vals
return gene_dict | 62b27eef9c863078c98dee0d09bada5e058909e2 | 3,632,586 |
def conv_name_to_c(name):
"""Convert a device-tree name to a C identifier
This uses multiple replace() calls instead of re.sub() since it is faster
(400ms for 1m calls versus 1000ms for the 're' version).
Args:
name: Name to convert
Return:
String containing the C version of this name
"""
new = name.replace('@', '_at_')
new = new.replace('-', '_')
new = new.replace(',', '_')
new = new.replace('.', '_')
return new | 150af670d8befea7374bbb5b13da9d6e0734863e | 3,632,587 |
from typing import Tuple
from typing import Optional
from typing import List
import io
from re import I
import textwrap
def generate(
symbol_table: intermediate.SymbolTable, namespace: csharp_common.NamespaceIdentifier
) -> Tuple[Optional[str], Optional[List[Error]]]:
"""
Generate the C# code of the visitors based on the intermediate representation
The ``namespace`` defines the AAS C# namespace.
"""
blocks = [csharp_common.WARNING] # type: List[Rstripped]
writer = io.StringIO()
writer.write(f"namespace {namespace}\n{{\n")
writer.write(f"{I}public static class Visitation\n" f"{I}{{\n")
visitation_blocks = [
_generate_ivisitor(symbol_table=symbol_table),
_generate_visitor_through(symbol_table=symbol_table),
_generate_abstract_visitor(symbol_table=symbol_table),
_generate_ivisitor_with_context(symbol_table=symbol_table),
_generate_abstract_visitor_with_context(symbol_table=symbol_table),
_generate_itransformer(symbol_table=symbol_table),
_generate_abstract_transformer(symbol_table=symbol_table),
_generate_itransformer_with_context(symbol_table=symbol_table),
_generate_abstract_transformer_with_context(symbol_table=symbol_table),
]
for i, visitation_block in enumerate(visitation_blocks):
if i > 0:
writer.write("\n\n")
writer.write(textwrap.indent(visitation_block, II))
writer.write(f"\n{I}}} // public static class Visitation")
writer.write(f"\n}} // namespace {namespace}")
blocks.append(Stripped(writer.getvalue()))
blocks.append(csharp_common.WARNING)
out = io.StringIO()
for i, block in enumerate(blocks):
if i > 0:
out.write("\n\n")
assert not block.startswith("\n")
assert not block.endswith("\n")
out.write(block)
out.write("\n")
return out.getvalue(), None | 53e905a0ad37b5f6e47220439747a004db7f8203 | 3,632,588 |
def get_account_id(role_arn):
"""
Returns the account ID for a given role ARN.
"""
# The format of an IAM role ARN is
#
# arn:partition:service:region:account:resource
#
# Where:
#
# - 'arn' is a literal string
# - 'service' is always 'iam' for IAM resources
# - 'region' is always blank for IAM resources
# - 'account' is the AWS account ID with no hyphens
#
# See https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns
try:
arn, _, service, region, account, _ = role_arn.split(":")
except ValueError:
raise ValueError(f"Is this a valid AWS ARN? {role_arn}")
if arn != "arn":
raise ValueError(f"Is this a valid AWS ARN? {role_arn}")
if service != "iam" or region != "" or not account.isnumeric():
raise ValueError(f"Is this an IAM role ARN? {role_arn}")
return account | 623eb66eefd59b9416deb478c527062ae4454df7 | 3,632,589 |
def retrieve_context_topology_node_total_potential_capacity_total_potential_capacity(uuid, node_uuid): # noqa: E501
"""Retrieve total-potential-capacity
Retrieve operation of resource: total-potential-capacity # noqa: E501
:param uuid: ID of uuid
:type uuid: str
:param node_uuid: ID of node_uuid
:type node_uuid: str
:rtype: Capacity
"""
return 'do some magic!' | 5a4cdee9e14783598ad622fd7faacc5c11b2ed70 | 3,632,590 |
def GHP_Op_max(Q_max_GHP_W, tsup_K, tground_K):
"""
For the operation of a Geothermal heat pump (GSHP) at maximum capacity supplying DHN.
:type tsup_K : float
:param tsup_K: supply temperature to the DHN (hot)
:type tground_K : float
:param tground_K: ground temperature
:type nProbes: float
:param nProbes: bumber of probes
:rtype qhotdot: float
:returns qhotdot: heating energy provided from GHSP
:rtype COP: float
:returns COP: coefficient of performance of GSHP
"""
COP = HP_ETA_EX * (tsup_K + HP_DELTA_T_COND) / ((tsup_K + HP_DELTA_T_COND) - tground_K)
qhotdot_Wh = Q_max_GHP_W /( 1 - ( 1 / COP ) )
return qhotdot_Wh, COP | 3025a70d8d32030cb098b2087e0d9e0eef16b315 | 3,632,591 |
def attention_lm_decoder(decoder_input,
decoder_self_attention_bias,
hparams,
name="decoder"):
"""A stack of attention_lm layers.
Args:
decoder_input: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
Returns:
y: a Tensors
"""
x = decoder_input
with tf.variable_scope(name):
for layer in range(hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(
x, hparams), None, decoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)
x = common_layers.layer_postprocess(x, y, hparams)
with tf.variable_scope("ffn"):
y = common_layers.conv_hidden_relu(
common_layers.layer_preprocess(x, hparams),
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout)
x = common_layers.layer_postprocess(x, y, hparams)
return common_layers.layer_preprocess(x, hparams) | 90ff631cdf8898dfde86e965ad70c317936f0b1c | 3,632,592 |
from typing import Any
def list_to_dict(data: list, value: Any = {}) -> dict:
"""Convert list to a dictionary.
Parameters
----------
data: list
Data type to convert
value: typing.Any
Default value for the dict keys
Returns
-------
dictionary : dict
Dictionary of the input data
"""
return {item: value for item in data} | 1e73bb6ca98b5e2d9b1e0f8d4cb19fc044a9ce63 | 3,632,593 |
def Routing_Meta():
"""Routing_Meta() -> MetaObject"""
return _DataModel.Routing_Meta() | f5fc17eb8dc8e428e03ec6fe37cb9dec2c32f355 | 3,632,594 |
def get_tag_name(tag):
"""
Extract the name portion of a tag URI.
Parameters
----------
tag : str
Returns
-------
str
"""
return tag[tag.rfind("/") + 1:tag.rfind("-")] | e24f0ae84ed096ec71f860291d1e476c75bf8370 | 3,632,595 |
import requests
def create_user(token, user_name, maps_to_id):
"""
Creates the user account in Keycloak
"""
users_url = '{keycloak}/auth/admin/realms/{realm}/users'.format(
keycloak=KEYCLOAK['SERVICE_ACCOUNT_KEYCLOAK_API_BASE'],
realm=KEYCLOAK['SERVICE_ACCOUNT_REALM'])
headers = {'Authorization': 'Bearer {}'.format(token)}
data = {
'enabled': True,
'username': user_name,
'attributes': {
'user_id': maps_to_id
}
}
response = requests.post(users_url,
headers=headers,
json=data)
if response.status_code != 204:
raise RuntimeError(
'bad response code: {}'.format(response.status_code))
created_user_response = requests.get(response.headers['Location'],
headers=headers)
return created_user_response.json()['id'] | e7a4b9cce99343156dc3933d7726cbc8ff5a1597 | 3,632,596 |
import os
def construct_url(test):
"""Construct URL for the REST API call."""
server_env_var = test["Server"]
server_url = os.environ.get(server_env_var)
if server_url is None:
log.error("The following environment variable is not set {var}".format(var=server_env_var))
return None
url = "{base}{prefix}{method}".format(base=add_slash(server_url),
prefix=add_slash(test["Prefix"]),
method=test["Endpoint"])
return url | 2623313d9c34170b87bfb756905da2802cc1b805 | 3,632,597 |
def view_profile(request, username=None):
"""view a user's profile
"""
message = "You must select a user or be logged in to view a profile."
if not username:
if not request.user:
messages.info(request, message)
return redirect("collections")
user = request.user
else:
user = get_object_or_404(User, username=username)
context = {"profile": user}
return render(request, "users/profile.html", context) | 5ab171f5d1f414100b8c8e36b652511b3df37a9b | 3,632,598 |
import torch
def bf_shannon_entropy(w: 'Tensor[N, N]') -> 'Tensor[1]':
"""
Compute the Shannon entropy of w.
Warning: this method is very inefficient.
It should only be used on small examples, e.g., for testing purposes.
"""
Z = torch.zeros(1).double().to(device)
H = torch.zeros(1).double().to(device)
for _, weight in all_single_root_trees(w):
Z += weight
H += weight * torch.log(weight)
return torch.log(Z) - H / Z | b606c97cd43ead270b82d73bf9af2a0aed2b9a08 | 3,632,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.