content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from typing import List
def tokens_to_smiles(tokens: List[str], special_tokens: List[str] = BAD_TOKS) -> str:
"""Combine tokens into valid SMILES string, filtering out special tokens
Args:
tokens: Tokenized SMILES
special_tokens: Tokens to not count as atoms
Returns:
SMILES representation of provided tokens, without the special tokens
"""
bad_toks = set(special_tokens)
return "".join([t for t in tokens if t not in bad_toks]) | ac266d84808bd20cc80e4828372c1591bef63591 | 3,635,500 |
def _get_port_by_uuid(client, port_uuid, **params):
"""Return a neutron port by UUID.
:param client: A Neutron client object.
:param port_uuid: UUID of a Neutron port to query.
:param params: Additional parameters to pass to the neutron client
show_port method.
:returns: A dict describing the neutron port.
:raises: InvalidParameterValue if the port does not exist.
:raises: NetworkError on failure to contact Neutron.
"""
try:
port = client.show_port(port_uuid, **params)
except neutron_exceptions.PortNotFoundClient:
raise exception.InvalidParameterValue(
_('Neutron port %(port_uuid)s was not found') %
{'port_uuid': port_uuid})
except neutron_exceptions.NeutronClientException as exc:
raise exception.NetworkError(_('Could not retrieve neutron port: %s') %
exc)
return port['port'] | d6857d5c6902043a1baebf582024490e70a01bd3 | 3,635,501 |
def is_unit_by_year(text: str) -> bool:
"""
是否是以年为计量单位
@param text:
@return:
@rtype: bool
"""
log.info(f'invoke method -> is_unit_by_year(), time unit text: {text}')
try:
unit = DateUnit(text.strip())
except ValueError as e:
log.error(str(e))
return False
if unit == DateUnit.YEAR:
return True
else:
return False | 7be058b4be02a07da0ae88f7414f58f7f0eba605 | 3,635,502 |
def merge_leading_dims(array_or_tensor, n_dims=2):
"""Merge the first dimensions of a tensor.
Args:
array_or_tensor: Tensor to have its first dimensions merged. Can also
be an array or numerical value, which will be converted to a tensor
for batch application, if needed.
n_dims: Number of dimensions to merge.
Returns:
Either the input value converted to a Tensor, with the requested dimensions
merged, or the unmodified input value if the input has less than `n_dims`
dimensions.
Raises:
ValueError: If the rank of `array_or_tensor` is not well-defined.
"""
tensor = tf.convert_to_tensor(array_or_tensor)
tensor_shape_static = tensor.get_shape()
# Check if the rank of the input tensor is well-defined.
if tensor_shape_static.dims is None:
raise ValueError("Can't merge leading dimensions of tensor of unknown "
"rank.")
tensor_shape_list = tensor_shape_static.as_list()
# We can only merge the n_dims leading dimensions if the rank of the given
# tensor is sufficiently large.
if n_dims > len(tensor_shape_list):
return array_or_tensor
if tensor_shape_static.is_fully_defined():
new_shape = (
[np.prod(tensor_shape_list[:n_dims])] + tensor_shape_list[n_dims:])
return tf.reshape(tensor, new_shape)
# Shape can't be inferred statically.
tensor_shape = tf.shape(tensor)
new_first_dim = tf.reduce_prod(tensor_shape[:n_dims], keepdims=True)
other_dims = tensor_shape[n_dims:]
new_size = tf.concat([new_first_dim, other_dims], 0)
result = tf.reshape(tensor, new_size)
if all(value is not None for value in tensor_shape_list[:n_dims]):
merged_leading_size = np.prod(tensor_shape_list[:n_dims])
else:
merged_leading_size = None
# We need to set the result size of this, as otherwise we won't be able to
# pass to e.g. a Linear. Here we need to know at least the rank of the tensor.
result.set_shape([merged_leading_size] + tensor_shape_list[n_dims:])
return result | dcf80aaa00cad4b49ecdddbe137ce9d5d8fccec8 | 3,635,503 |
def ootf_inverse_HLG_BT2100_1(F_D, L_B=0, L_W=1000, gamma=None):
"""
Defines *Recommendation ITU-R BT.2100* *Reference HLG* inverse opto-optical
transfer function (OOTF / OOCF) as given in *ITU-R BT.2100-1*.
Parameters
----------
F_D : numeric or array_like
:math:`F_D` is the luminance of a displayed linear component
:math:`{R_D, G_D, or B_D}`, in :math:`cd/m^2`.
L_B : numeric, optional
:math:`L_B` is the display luminance for black in :math:`cd/m^2`.
L_W : numeric, optional
:math:`L_W` is nominal peak luminance of the display in :math:`cd/m^2`
for achromatic pixels.
gamma : numeric, optional
System gamma value, 1.2 at the nominal display peak luminance of
:math:`1000 cd/m^2`.
Returns
-------
numeric or ndarray
:math:`E` is the signal for each colour component
:math:`{R_S, G_S, B_S}` proportional to scene linear light and scaled
by camera exposure.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``F_D`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``E`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Borer2017a`, :cite:`InternationalTelecommunicationUnion2017`
Examples
--------
>>> ootf_inverse_HLG_BT2100_1(63.095734448019336) # doctest: +ELLIPSIS
0.1000000...
>>> ootf_inverse_HLG_BT2100_1(63.105103490674857, 0.01)
... # doctest: +ELLIPSIS
0.0999999...
"""
F_D = np.atleast_1d(to_domain_1(F_D))
if F_D.shape[-1] != 3:
usage_warning(
'"Recommendation ITU-R BT.2100" "Reference HLG OOTF" uses '
'RGB Luminance in computations and expects a vector input, thus '
'the given input array will be stacked to compose a vector for '
'internal computations but a single component will be output.')
R_D = G_D = B_D = F_D
else:
R_D, G_D, B_D = tsplit(F_D)
Y_D = np.sum(WEIGHTS_BT2100_HLG * tstack([R_D, G_D, B_D]), axis=-1)
alpha = L_W - L_B
beta = L_B
if gamma is None:
gamma = gamma_function_HLG_BT2100(L_W)
Y_D_beta = (np.abs((Y_D - beta) / alpha) ** ((1 - gamma) / gamma))
R_S = np.where(
Y_D == beta,
0.0,
Y_D_beta * (R_D - beta) / alpha,
)
G_S = np.where(
Y_D == beta,
0.0,
Y_D_beta * (G_D - beta) / alpha,
)
B_S = np.where(
Y_D == beta,
0.0,
Y_D_beta * (B_D - beta) / alpha,
)
if F_D.shape[-1] != 3:
return as_float(from_range_1(R_S))
else:
RGB_S = tstack([R_S, G_S, B_S])
return from_range_1(RGB_S) | 419a7e2f21745849bc63a7491fc144b7714065a4 | 3,635,504 |
def get_categories_for_area(area_id):
"""
Return a list of rows from the category table that all contain the given area.
"""
return request_or_fail("/area/" + str(area_id) + "/category") | 4841d072d41eae51e4d943207935f79ca9e67b57 | 3,635,505 |
def iou(box1, box2, x1y1x2y2=True):
""" iou = intersection / union """
if x1y1x2y2:
# min and max of 2 boxes
mx = min(box1[0], box2[0])
Mx = max(box1[2], box2[2])
my = min(box1[1], box2[1])
My = max(box1[3], box2[3])
w1 = box1[2] - box1[0]
h1 = box1[3] - box1[1]
w2 = box2[2] - box2[0]
h2 = box2[3] - box2[1]
else: # (x, y, w, h)
mx = min(box1[0] - box1[2] / 2, box2[0] - box2[2] / 2)
Mx = max(box1[0] + box1[2] / 2, box2[0] + box2[2] / 2)
my = min(box1[1] - box1[3] / 2, box2[1] - box2[3] / 2)
My = max(box1[1] + box1[3] / 2, box2[1] + box2[3] / 2)
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
carea = 0
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
corea = cw * ch
uarea = area1 + area2 - carea
return carea / uarea | 6ad0d3d7dd3a3031d28f8a0b9d0075ecf9362792 | 3,635,506 |
def mock_get_data(mocker, remote_path):
"""Mock the get_data funcion of SegmentClient class.
Arguments:
mocker: The mocker fixture.
remote_path: The remote path of data.
Returns:
The patched mocker and response data.
"""
response_data = [RemoteData(remote_path=remote_path)]
return (
mocker.patch(
f"{segment.__name__}.SegmentClient.get_data",
return_value=response_data,
),
response_data,
) | 586493a65f6428ccbd2f904a87c9c39609cddec2 | 3,635,507 |
def gen_string(**kwargs) -> str:
"""
Generates the string to put in the secrets file.
"""
return f"""\
apiVersion: v1
kind: Secret
metadata:
name: keys
namespace: {kwargs['namespace']}
type: Opaque
data:
github_client_secret: {kwargs.get('github_client_secret')}
""" | ed2702c171f20b9f036f07ec61e0a4d74424ba03 | 3,635,508 |
import random
def draw_one_group_members(applications, winners_num, set_just=True,
**kwargs):
"""internal function
decide win (waiting) or lose for each group
"""
target_status = \
kwargs['target_status'] if 'target_status' in kwargs else "pending"
win_status = \
kwargs['win_status'] if 'win_status' in kwargs else "won"
lose_status = \
kwargs['lose_status'] if 'lose_status' in kwargs else "lose"
target_apps = [app for app in applications if app.status == target_status]
winner_apps = []
loser_apps = []
winner_reps = []
loser_reps = []
def set_group_result(rep, is_won):
if is_won:
status, to_apps, to_reps = win_status, winner_apps, winner_reps
else:
status, to_apps, to_reps = lose_status, loser_apps, loser_reps
rep.set_status(status)
to_apps.append(rep) # record results
to_reps.append(rep)
for member in rep.group_members:
member.own_application.set_status(status)
to_apps.append(member.own_application)
def unset_group_result(rep, from_apps, from_reps):
from_apps.remove(rep) # remove recorded old results
from_reps.remove(rep)
for member in rep.group_members:
from_apps.remove(member.own_application)
reps = [app for app in target_apps if app.is_rep]
probability_dict = get_probability_dict(target_apps, winners_num)
for i, rep in enumerate(reps):
set_group_result(rep,
random.random() < probability_dict[rep])
n_group_members = sum(len(rep.group_members) + 1
for rep in reps)
n_normal_users = len(target_apps) - n_group_members
def adjust():
# when too few groups accidentally won
while loser_reps and len(winner_apps) < winners_num - n_normal_users:
new_winner = random.choice(loser_reps)
unset_group_result(new_winner, loser_apps, loser_reps)
set_group_result(new_winner, True)
# when too many groups accidentally won
while len(winner_apps) > winners_num:
new_loser = random.choice(winner_reps)
unset_group_result(new_loser, winner_apps, winner_reps)
set_group_result(new_loser, False)
if not set_just:
adjust()
for user in chain(winner_apps, loser_apps):
db.session.add(user)
return winner_apps
while (loser_reps and len(winner_apps) < winners_num - n_normal_users or
len(winner_apps) > winners_num):
adjust()
for user in chain(winner_apps, loser_apps):
db.session.add(user)
return winner_apps | 02c3f0edefea004ad731df0eae7b6aedb95a5ad2 | 3,635,509 |
def get_props_from_row(row):
"""Return a dict of key/value pairs that are props, not links."""
return {k: v for k, v in row.iteritems() if "." not in k and v != ""} | a93dfbd1ef4dc87414492b7253b1ede4e4cc1888 | 3,635,510 |
def generate_url(resource, bucket_name, object_name, expire=3600):
"""Generate URL for bucket or object."""
client = resource.meta.client
url = client.generate_presigned_url(
"get_object",
Params={"Bucket": bucket_name, "Key": object_name},
ExpiresIn=expire,
)
return url | 8a74618d5cfcd39c8394577035b497ecb5835765 | 3,635,511 |
from typing import Iterable
from typing import List
def khal_list(
collection,
daterange: Iterable[str] = None,
conf: dict = None,
agenda_format=None,
day_format: str=None,
once=False,
notstarted: bool = False,
width: bool = False,
env=None,
datepoint=None,
):
"""returns a list of all events in `daterange`"""
assert daterange is not None or datepoint is not None
assert conf is not None
# because empty strings are also Falsish
if agenda_format is None:
agenda_format = conf['view']['agenda_event_format']
if daterange is not None:
if day_format is None:
day_format = conf['view']['agenda_day_format']
start, end = start_end_from_daterange(
daterange, conf['locale'],
default_timedelta_date=conf['default']['timedelta'],
default_timedelta_datetime=conf['default']['timedelta'],
)
logger.debug(f'Getting all events between {start} and {end}')
elif datepoint is not None:
if not datepoint:
datepoint = ['now']
try:
start, allday = parse_datetime.guessdatetimefstr(
datepoint, conf['locale'], dt.date.today(),
)
except ValueError:
raise FatalError('Invalid value of `{' '.join(datepoint)}` for a datetime')
if allday:
logger.debug(f'Got date {start}')
raise FatalError('Please supply a datetime, not a date.')
end = start + dt.timedelta(seconds=1)
if day_format is None:
day_format = style(
start.strftime(conf['locale']['longdatetimeformat']),
bold=True,
)
logger.debug(f'Getting all events between {start} and {end}')
event_column: List[str] = []
once = set() if once else None
if env is None:
env = {}
original_start = conf['locale']['local_timezone'].localize(start)
while start < end:
if start.date() == end.date():
day_end = end
else:
day_end = dt.datetime.combine(start.date(), dt.time.max)
current_events = get_events_between(
collection, locale=conf['locale'], agenda_format=agenda_format, start=start,
end=day_end, notstarted=notstarted, original_start=original_start,
env=env,
seen=once,
width=width,
)
if day_format and (conf['default']['show_all_days'] or current_events):
if len(event_column) != 0 and conf['view']['blank_line_before_day']:
event_column.append('')
event_column.append(format_day(start.date(), day_format, conf['locale']))
event_column.extend(current_events)
start = dt.datetime(*start.date().timetuple()[:3]) + dt.timedelta(days=1)
if event_column == []:
event_column = [style('No events', bold=True)]
return event_column | e199512958b023456386891aab2e404320b324c9 | 3,635,512 |
from datetime import datetime
def get_current_time(tzinfo=timezone.utc):
"""Get current time."""
return datetime.utcnow().replace(tzinfo=tzinfo) | b77b32e1e11060dd3a5a68c7fba0b391d92f864d | 3,635,513 |
import math
def F7(x):
"""Easom function"""
s = -math.cos(x[0])*math.cos(x[1])*math.exp(-(x[0] - math.pi)**2 - (x[1]-math.pi)**2)
return s | a17060f046df9c02690e859e789b7ef2591d1a3c | 3,635,514 |
def get_local_real_format():
"""
Returns : char **rf,int *rflen
*args :
C prototype: int cbf_get_local_real_format (char ** real_format );
CBFLib documentation:
DESCRIPTION
cbf_get_local_integer_byte_order returns the byte order of integers
on the machine on which the API is being run in the form of a
character string returned as the value pointed to by byte_order.
cbf_get_local_real_byte_order returns the byte order of reals on the
machine on which the API is being run in the form of a character
string returned as the value pointed to by byte_order.
cbf_get_local_real_format returns the format of floats on the machine
on which the API is being run in the form of a character string
returned as the value pointed to by real_format. The strings returned
must not be modified in any way.
The values returned in byte_order may be the strings "little_endian
" or "big-endian ". The values returned in real_format may be the
strings "ieee 754-1985 " or "other ". Additional values may be
returned by future versions of the API.
ARGUMENTS
byte_order pointer to the returned string real_format pointer to
the returned string
RETURN VALUE
Returns an error code on failure or 0 for success.
----------------------------------------------------------------------
"""
return _pycbf.get_local_real_format() | b35ad022544c1ec8614bed8b87cb1ce786fafa7e | 3,635,515 |
import torch
def zaxis_to_world(kpt: torch.Tensor):
"""Transform kpt from 2D+Z to 3D Real World Coordinates (RWC) for ITOP Dataset
Args:
kpt (np.ndarray): Array containing keypoints to transform
Returns:
np.ndarray: Converted keypoints
"""
tmp = kpt.clone()
tmp[..., 0] = (tmp[..., 0].clone() - 160) * 0.0035 * tmp[..., 2].clone()
tmp[..., 1] = -(tmp[..., 0].clone() - 120) * 0.0035 * tmp[..., 2].clone()
return tmp | d925382c62d370a991fa2dfd4c51cb43d051423e | 3,635,516 |
from io import StringIO
def mock_response(req, resp_obj, resp_code):
""" Mock response for MyHTTPSHandler
"""
resp = urllib2.addinfourl(StringIO(resp_obj),
'This is a mocked URI!',
req.get_full_url())
resp.code = resp_code
resp.msg = "OK"
return resp | b786dae396b97cd7b67597496b0bb6204656c3f3 | 3,635,517 |
def http_login(request):
"""
Called after successfull basic HTTP authentication and check if user filled
his profile.
"""
logger.debug('Request full path: %s', request.get_full_path())
redirection = ""
# Should we redirect after login ?
if "next" in request.GET:
qr = request.GET.copy()
next_url = qr.pop('next')[0]
remains = qr.urlencode()
redirection = '{0}?{1}'.format(next_url, remains)
logger.debug('Should redirect to: %s', redirection)
# Find user
if settings.DEBUG:
user = User.objects.get(username='test@corp')
userauth = authenticate(username=user.username, password='test')
login(request, userauth)
else:
user = User.objects.get(username=request.META['REMOTE_USER'])
operating_system, browser = httpagentparser.simple_detect(
request.META.get('HTTP_USER_AGENT'))
logger.info('%s logged in using browser %s on %s.',
user.username, browser, operating_system)
# Validation
if not user.is_active \
or user.username.startswith('0') \
or not user.username.endswith('@corp'):
# Be sure that the login will not be used anymore
user.is_active = False
user.save()
logger.info('User name %s is mal-formed !', user.username)
return HttpResponse('Invalid account. Please use your '
'<strong>normal</strong> user account and append '
'<em>@corp</em>.')
# Auto fill profile (if possible)
user.first_name = request.META.get('AUTHENTICATE_GIVENNAME', '')
user.last_name = request.META.get('AUTHENTICATE_SN', '')
user.email = request.META.get('AUTHENTICATE_MAIL', '')
user.save()
if (user.first_name or user.last_name) and user.email:
if redirection:
response = redirect(redirection)
else:
response = redirect('index')
else:
# Profile is not completed
logger.info('User %s has not filled his profile.', user.username)
if redirection:
response = redirect('%s?redirect=%s' % (reverse('user_profile'),
redirection))
else:
response = redirect('user_profile')
return response | 0232f22c4947e5588833f7ddcb993328e0fe5b2e | 3,635,518 |
def py_func_bernoulli(input):
"""
Binormial python function definition
"""
prob_array = sigmoid(np.array(input))
sample = np.random.binomial(1, prob_array)
return sample | 70d7583e07b062f74ea3fa8e7219e3dd1304dc9c | 3,635,519 |
def dist(subnetworks, node_id,
path_method='dijkstra',
inter_group=False, inter_group_dist=None, rep_dist=None):
"""
Parameters
----------
subnetworks (subg): LIST. List of sub-graphs for each sub-network.
node_id : INT. Source node ID for calculating in-group distances.
path_method : CATEGORICAL ('dijkstra', 'bellman_ford'), default = 'dijkstra'.
inter_group : BOOLEAN, default = False. Indicate whether or not calculate inter-group distances.
inter_group_dist : DICT. Gives group-to-group distances.
rep_dist : DICT. Gives in-group distances within each group to their representatives.
Returns
-------
sorted_distances : DICT. In-group nodes and sorted distances.
"""
# FIND SUBGRAPH FOR node_id
subgi = [node_id in subnetworks[i] for i in range(len(subnetworks))]
subi = next(i for i, v in enumerate(subgi) if v)
if path_method == 'dijkstra':
node_distances = spw.single_source_dijkstra_path_length(subnetworks[subi], node_id)
elif path_method == 'bellman_ford':
node_distances = spw.single_source_bellman_ford_path_length(subnetworks[subi], node_id)
else:
raise ValueError('Invalid shortest path algorithm.')
return
# INCLUDE INTER_GROUP DISTANCES
if inter_group:
# BASELINE MULTIPLIER
basedist = dict({(i, subnetworks[subi].number_of_nodes() * v)
for i, v in inter_group_dist[subi].items()})
# ADDING INTERGROUP DISTANCES
for subj in range(len(subnetworks)):
if subj != subi:
distj = dict({(i, basedist[subj] + v)
for i, v in rep_dist[subj].items()})
node_distances.update(distj)
# SORT BY DISTANCE
sorted_distances = {k: v for k, v in \
sorted(node_distances.items(), key=lambda x: x[1])}
return sorted_distances | dcea802ee2e996447f5ad9c4e87db6bf96bee522 | 3,635,520 |
def update_account():
"""
Update an account
"""
account = Account.query.filter(Account.id == session['user']['account']['id']).first()
for key, value in request.form.items():
setattr(account, key, value)
db_session.add(account)
db_session.commit()
session['user']['account'] = account.dict()
return jsonify({
"account": session['user']['account']
}) | 18ddf48079b04da6c2c8f96a306fdb5e06738839 | 3,635,521 |
import numpy
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
>>> q = quaternion_multiply([1, -2, 3, 4], [-5, 6, 7, 8])
>>> numpy.allclose(q, [-44, -14, 48, 28])
True
"""
x0, y0, z0, w0 = quaternion0
x1, y1, z1, w1 = quaternion1
return numpy.array((
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0,
-x1*x0 - y1*y0 - z1*z0 + w1*w0), dtype=numpy.float64) | bcc6973f169840400c86b5eaf673deb75444a63f | 3,635,522 |
def triangulate_nviews(P, ip):
"""
Triangulate a point visible in n camera views.
P is a list of camera projection matrices.
ip is a list of homogenised image points. eg [ [x, y, 1], [x, y, 1] ], OR,
ip is a 2d array - shape nx3 - [ [x, y, 1], [x, y, 1] ]
len of ip must be the same as len of P
"""
if not len(ip) == len(P):
raise ValueError('Number of points and number of cameras not equal.')
n = len(P)
M = np.zeros([3*n, 4+n])
for i, (x, p) in enumerate(zip(ip, P)):
M[3*i:3*i+3, :4] = p
M[3*i:3*i+3, 4+i] = -x
V = np.linalg.svd(M)[-1]
X = V[-1, :4]
return X / X[3] | e9cdb99070ea5a4a2a1667237ee03b0f67b29018 | 3,635,523 |
import os
def get_require_files(path,require_file_list,regex=True,matched_part='xls',if_walk_path=True):
"""
检查某个路径是否包含必须的文档
:param path:路径
:param require_file_list: 要检查的文档/文件夹是否存在
:param regex : 是否需要用re去匹配
:return : 如果不存在,返回空字典,如果文档存在 返回需要文档的对应绝对路径字典
"""
if type(require_file_list) != list:
require_file_list = [ require_file_list ]
if if_walk_path == True:
path_files = get_walk_abs_files(path)
else:
path_files = [ os.path.join(path,x) for x in os.listdir(path) ]
path_files.sort()
require_dict = defaultdict(str)
#如果是精确匹配
if regex == False :
require_list = [ (x,y) for x in require_file_list
for y in path_files if '~$' not \
in x and '.py' not in x and x == y.split('\\')[-1] ]
else:
if matched_part == None:
require_list = [ (x,y) for x in require_file_list \
for y in path_files if x.lower() in y.lower().split('\\')[-1] \
and '~$' not in y ]
else:
require_list = [ (x,y) for x in require_file_list \
for y in path_files if x.lower() in y.lower().split('\\')[-1] \
and '~$' not in y and matched_part in y ]
for r in require_list:
if r[0] not in require_dict.keys():
#记录返回的目标路径,已经有记录的不再更新
require_dict[r[0]] = r[1]
#对比哪些文档缺失
require_list = require_dict.keys()
#缺失的文件/文档
file_lack = set(require_file_list) - set(require_list)
if file_lack :
enter_exit('Required files not found:{0}'.format(','.join(file_lack)))
return {}
else:
return require_dict | d180a64a509131c8300cba0390a07760fa31c2cf | 3,635,524 |
from typing import Dict
import re
def decompose_entry_to_dict_2107_Stavropol(entry:str)-> Dict:
"""
Выделяем данные из одной записи в dictionary
------------------------------------------------------------------------------------------------------
03.07.2021 12:52 -> Перевод с карты -> 3 500,00 -> 28 655,30
03.07.2021 123456 -> SBOL перевод 1234****1234 Н. ИГОРЬ РОМАНОВИЧ
------------------------------------------------------------------------------------------------------
либо такой
--------------------------------------------------------------------------------------------------
28.06.2021 00:00 -> Неизвестная категория(+) +21107,75 22113,73
28.06.2021 - -> Прочие выплаты
----------------------------------------------------------------------------------------------------
ещё один пример (с 3 линиями)
---------------------------------------------------------------------------------------------------------
03.07.2021 11:54 -> Перевод с карты -> 4 720,00 -> 45 155,30
03.07.2021 258077 -> SBOL перевод 1234****5678 А. ВАЛЕРИЯ
ИГОРЕВНА
----------------------------------------------------------------------------------------------------------
либо такой с иностранной вылютой
---------------------------------------------------------------------------------------------------------
08.07.2021 18:27 -> Все для дома -> 193,91 -> 14593,30
09.07.2021 -> 254718 -> XXXXX XXXXX -> 2,09 €
---------------------------------------------------------------------------------------------------------
В последнем примере:
{'authorisation_code': '254718',
'category': 'Все для дома',
'description': 'XXXXX XXXXX',
'operation_date': '08.07.2021 18:27',
'processing_date': '09.07.2021',
'remainder_account_currency': 14593.30,
'value_account_currency': -193.91б
'operational_currency': '€'
}
"""
lines=entry.split('\n')
lines=list(filter(None,lines))
if len(lines) <2 or len(lines) >3:
raise exceptions.InputFileStructureError("entry is expected to have from 2 to 3 lines\n" + str(entry))
result={}
#************** looking at the 1st line
line_parts=split_Sberbank_line(lines[0])
# print( f"1st line line_parts {line_parts}")
result['operation_date']=line_parts[0] +" "+ line_parts[1]
result['category']=line_parts[2]
result['value_account_currency']=get_float_from_money(line_parts[3],True)
result['remainder_account_currency']=get_float_from_money(line_parts[4])
# ************** looking at the 2nd line
line_parts = split_Sberbank_line(lines[1])
if len(line_parts) <3 or len(line_parts)>4:
raise exceptions.SberbankPDFtext2ExcelError("Line is expected to have 3 or 4 parts :" + str(lines[1]))
# print(line_parts[0])
# processing_date__authorisation_code = re.search(r'(dd\.dd\.dddd)\s(.*)', line_parts[0])
result['processing_date'] = line_parts[0]
result['authorisation_code'] = line_parts[1]
result['description']=line_parts[2]
# Выделяем сумму в валюте оперции, если присуиствует
if len(line_parts)==4:
found=re.search(r'(.*?)\s(\S*)',line_parts[3]) #processing string like '6,79 €'
if found:
result['value_operational_currency']=get_float_from_money(found.group(1),True)
result['operational_currency']=found.group(2)
else:
raise exceptions.InputFileStructureError("Ошибка в обработке текста. Ожидалась струтура типа '6,79 €', получено: " + line_parts[3])
# ************** looking at the 3rd line
if len(lines) == 3:
line_parts = split_Sberbank_line(lines[2])
result['description'] = result['description']+' '+line_parts[0]
# print(result)
return result | 95ea242516619505fff9a8039c51097c20935235 | 3,635,525 |
import array
def _create_data_sources(data, index_sort="none"):
"""
Returns datasources for index and value based on the inputs. Assumes that
the index data is unsorted unless otherwise specified.
"""
# if not isinstance(data, ndarray) and (len(data) < 2):
# raise RuntimeError("Unable to create datasources.")
index = data[0]
if type(index) in (list, tuple, ndarray):
index = ArrayDataSource(array(index), sort_order=index_sort)
elif not isinstance(index, AbstractDataSource):
msg = "Need an array or list of values or a DataSource, got {} " \
"instead.".format(type(index))
raise RuntimeError(msg)
if len(data) == 1:
return index
value = data[1]
if type(value) in (list, tuple, ndarray):
value = ArrayDataSource(array(value))
elif not isinstance(value, AbstractDataSource):
msg = "Need an array or list of values or a DataSource, got {} " \
"instead.".format(type(value))
raise RuntimeError(msg)
if len(data) == 2:
return index, value
if len(data) >= 3:
adtl_data1 = data[2]
if type(adtl_data1) in (list, tuple, ndarray):
adtl_data1 = ArrayDataSource(array(adtl_data1))
elif not isinstance(value, AbstractDataSource):
msg = "Need an array or list of values or a DataSource, got {} " \
"instead.".format(type(adtl_data1))
raise RuntimeError(msg)
if len(data) == 3:
return index, value, adtl_data1
if len(data) == 4:
adtl_data2 = data[3]
if type(adtl_data2) in (list, tuple, ndarray):
adtl_data2 = ArrayDataSource(array(adtl_data2))
elif not isinstance(value, AbstractDataSource):
msg = "Need an array or list of values or a DataSource, got {} " \
"instead.".format(type(adtl_data2))
raise RuntimeError(msg)
return index, value, adtl_data1, data[3] | 9125afad2b1ad8ee350ae81b87ba3b9970e3e218 | 3,635,526 |
def discriminate(outputs, classes_to_detect):
"""Select which classes to detect from an output.
Get the dictionary associated with the outputs instances and modify
it according to the given classes to restrict the detection to them
Args:
outputs (dict):
instances (detectron2.structures.instances.Instances): Instance
element which contains, among others, "pred_boxes",
"pred_classes", "scores" and "pred_masks".
classes_to_detect (list: int): Identifiers of the dataset on which
the model was trained.
Returns:
ouputs (dict): Same dict as before, but modified to match
the detection classes.
"""
print('aaaaa')
print(outputs['instances'].pred_classes)
pred_classes = np.array(outputs['instances'].pred_classes)
# Take the elements matching *classes_to_detect*
mask = np.isin(pred_classes, classes_to_detect)
# Get the indexes
idx = np.nonzero(mask)
# # Get the current Instance values
# pred_boxes = outputs['instances'].pred_boxes
# pred_classes = outputs['instances'].pred_classes
# pred_masks = outputs['instances'].pred_masks
# scores = outputs['instances'].scores
# Get Instance values as a dict and leave only the desired ones
out_fields = outputs['instances'].get_fields()
for field in out_fields:
out_fields[field] = out_fields[field][idx]
return outputs | a50be55dbdb546cb4857b87389fe94c5eb64b961 | 3,635,527 |
import requests
def request_get_with_timeout_retry(url: str, retries: int) -> Response:
"""
Makes a GET request, and retries if the server responds with a 504 (timeout)
Args:
url (str): The URL of the Mailgun API endpoint
retries (int): The number of times to retry the request
Returns:
response (requests.models.Response): The requests library response object
Raises:
requests.exceptions.HTTPError: Raised if the response has a status code indicating an error
"""
resp = requests.get(url)
# If there was a timeout (504), retry before giving up
tries = 1
while resp.status_code == HTTPStatus.GATEWAY_TIMEOUT and tries <= retries:
tries += 1
log.warning(
"GET request timed out (%s). Retrying for attempt %d...", url, tries
)
resp = requests.get(url)
resp.raise_for_status()
return resp | 41f85933d6a036d3ef2abf9f172eb6dd54871eba | 3,635,528 |
import talib
def SMA(value, day):
"""
返回简单移动平均序列。传入可以是列表或序列类型。传出是历史到当前周期为止的简单移动平均序列。
"""
# result = statistics.mean(value[-day:])
result = talib.SMA(value, day)
return result | 2ac504552d8a6b259c61cc53b0bb0c267535b1f5 | 3,635,529 |
def get_filtered_ecs_service_names(ecs_client, ecs_cluster_name, name_prefix):
"""Retrives the service names for the given cluster, using an optional regex
Keyword arguments:
ecs_client -- Autoscaling boto3 client (if None, will create one)
ecs_cluster_name -- the name of the cluster the service is in
name_prefix -- the prefix to filter the names
"""
service_names = []
ecs_client_qualified = (
ecs_client if ecs_client is not None else get_client(service_name="ecs")
)
service_arns = ecs_client_qualified.list_services(cluster=ecs_cluster_name)[
"serviceArns"
]
batch_size = 10
for i in range(0, len(service_arns), batch_size):
batch = service_arns[i : i + batch_size]
services = ecs_client_qualified.describe_services(
cluster=ecs_cluster_name, services=batch
)
service_names_for_batch = [
service["serviceName"] for service in services["services"]
]
for service_name in service_names_for_batch:
if service_name.startswith(name_prefix):
service_names.append(service_name)
console_printer.print_info(f"Retrieved service name list as '{service_names}'")
return service_names | a50f6c3af71af3893c4f313edd25b4a05e64433f | 3,635,530 |
def remove_zero_pairs(xy):
"""Returns new xy-pair Numpy array where x=y=0 pairs have been removed
Arguments:
xy(numpy array): input array
"""
mask = np.where((xy[:, __X] != 0.0) & (xy[:, __Y] != 0.0))[0]
return xy[mask, :] | f84a75af111f5371fb1b827cc8151ecb4d80558a | 3,635,531 |
def AddMEBTChopperPlatesAperturesToSNS_Lattice(accLattice,aprtNodes):
"""
Function will add two Aperture nodes at the entrance and exit of
MEBT chopper plates. It returns the list of Aperture nodes.
"""
x_size = 0.060
y_size = 0.018
shape = 3
node_pos_dict = accLattice.getNodePositionsDict()
node1 = accLattice.getNodesForName("MEBT:ChpPlt:Entr")[0]
node2 = accLattice.getNodesForName("MEBT:ChpPlt:Exit")[0]
for node in [node1,node2]:
node_name = node.getName()
(posBefore, posAfter) = node_pos_dict[node]
apertureNode = LinacApertureNode(shape,x_size/2.0,y_size/2.0,posBefore)
apertureNode.setName(node_name+":Aprt")
apertureNode.setSequence(node.getSequence())
node.addChildNode(apertureNode,node.ENTRANCE)
aprtNodes.append(apertureNode)
aprtNodes = sorted(aprtNodes, key = lambda x: x.getPosition(), reverse = False)
return aprtNodes | 98a7809eb0d8f69f51f23eafe7ac2d10fa7fb89f | 3,635,532 |
import argparse
def build_parser():
"""Build argument parser."""
parse = argparse.ArgumentParser(description=("Use this script to generate new APBS input "
"files or split an existing parallel input "
"file into multiple async files"),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parse.add_argument("--asynch", action="store_true",
help="Perform an asynchronous parallel calculation.")
parse.add_argument("--split", action="store_true",
help=("Split an existing parallel input file to multiple "
"async input files."))
parse.add_argument("--potdx", action="store_true",
help=("Create an input to compute an electrostatic potential map."))
parse.add_argument("--method",
help=("Force output file to write a specific APBS ELEC method."),
choices=["para", "auto", "manual", "async"])
parse.add_argument("--cfac", type=float, default=psize.CFAC,
help=("Factor by which to expand molecular dimensions to "
"get coarse grid dimensions."))
parse.add_argument("--fadd", type=float, default=psize.FADD,
help=("Amount to add to molecular dimensions to get fine "
"grid dimensions."))
parse.add_argument("--space", type=float, default=psize.SPACE,
help="Desired fine mesh resolution")
parse.add_argument("--gmemfac", type=int, default=psize.GMEMFAC,
help=("Number of bytes per grid point required for sequential "
"MG calculation"))
parse.add_argument("--gmemceil", type=int, default=psize.GMEMCEIL,
help=("Max MB allowed for sequential MG calculation. Adjust "
"this to force the script to perform faster calculations "
"(which require more parallelism)"))
parse.add_argument("--ofrac", type=float, default=psize.OFRAC,
help="Overlap factor between mesh partitions (parallel)")
parse.add_argument("--redfac", type=float, default=psize.REDFAC,
help=("The maximum factor by which a domain dimension can "
"be reduced during focusing"))
parse.add_argument("--istrng", help="Ionic strength (M). Na+ anc Cl- ions will be used")
parse.add_argument("filename")
return parse | 7e7ba1d7a2d818959b825b7044a9e03c2196e293 | 3,635,533 |
def find_adjective(sent):
"""Given a sentence, find the best candidate adjective."""
adj = None
for w, p in sent.pos_tags:
if p == 'JJ': # This is an adjective
adj = w
break
return adj | 1aabeafb1c73f1b0f2e128c8dd09f977efc99442 | 3,635,534 |
def tensor_index_by_number(data, number):
"""Tensor getitem by a Number which may be integer/float/bool value"""
number_type = const_utils.check_number_index_type(number)
if number_type == const_utils.BOOL_:
return tensor_index_by_bool(data, number)
if number_type == const_utils.INT_:
return tensor_index_by_integer(data, number)
return const_utils.raise_index_error("Only support integers, slices(`:`), ellipsis(`...`), None and bool.") | 0b47eeb4d55a928a0a9525e58a682adf8f3decf8 | 3,635,535 |
from datetime import datetime
def _get_stop_as_datetime(event_json)->datetime:
"""Reads the stop timestamp of the event and returns it as a datetime
object.
Args:
event_json (json): The event encapsulated as json.
Returns
datetime: Timestamp of the stop of the event.
"""
name = event_json['info']['name']
payload_stop = 'meta.raw_payload.' + name + '-stop'
stop_timestamp_string = event_json['info'][payload_stop]['timestamp']
stop_date_string, stop_time_string = stop_timestamp_string.split('T')
stop_time_string, _ = stop_time_string.split('.')
date_and_time_string = stop_date_string + ' ' + stop_time_string
return datetime.strptime(date_and_time_string, '%Y-%m-%d %H:%M:%S') | 958915a568c66a04da3f44abecf0acca90181f43 | 3,635,536 |
def ascon_finalize(S, rate, a, key):
"""
Ascon finalization phase - internal helper function.
S: Ascon state, a list of 5 64-bit integers
rate: block size in bytes (8 for Ascon-128, Ascon-80pq; 16 for Ascon-128a)
a: number of initialization/finalization rounds for permutation
key: a bytes object of size 16 (for Ascon-128, Ascon-128a; 128-bit security) or 20 (for Ascon-80pq; 128-bit security)
returns the tag, updates S
"""
assert(len(key) in [16,20])
S[rate//8+0] ^= bytes_to_int(key[0:8])
S[rate//8+1] ^= bytes_to_int(key[8:16])
S[rate//8+2] ^= bytes_to_int(key[16:])
ascon_permutation(S, a)
S[3] ^= bytes_to_int(key[-16:-8])
S[4] ^= bytes_to_int(key[-8:])
tag = int_to_bytes(S[3], 8) + int_to_bytes(S[4], 8)
if debug: printstate(S, "finalization:")
return tag | 7a1115c0dfbc543e9e0cceb8fbac678e8bff4f55 | 3,635,537 |
def _click_command(
state: State,
path: str,
files: str,
batch: int,
runid_log: str = None,
wait: bool = False,
skip_existing: str = False,
simulate: bool = False,
):
"""Ingest files into OSDU."""
return ingest(state, path, files, batch, runid_log, wait, skip_existing, simulate) | 741dd7a7a39360ba69d55dda4f08848559f4e9d4 | 3,635,538 |
def datasheet_search_query(doctype, txt, searchfield, start, page_len, filters):
"""
:param doctype:
:param txt:
:param searchfield:
:param start:
:param page_len:
:param filters:
:return:
"""
db_name = frappe.conf.get("db_name")
sql = f"""
SELECT
`m`.`name`
, `m`.`title`
FROM `{db_name}`.tabDC_Doc_Datasheet_Meta AS `m`
WHERE
(`m`.`name` LIKE %(search)s OR `m`.`title` LIKE %(search)s)
ORDER BY
`m`.`name` ASC, `m`.`title` ASC"""
res = frappe.db.sql(
sql + ';',
{
'search': '%{}%'.format(txt)
}
)
return res | 15eaf231adf54792ad6b678de702eb349ac6a219 | 3,635,539 |
def client():
"""Define client connection to server BaseManager
Returns: BaseManager object
"""
port, auth = get_auth()
mgr = BaseManager(address=('', port), authkey=auth)
mgr.register('set_event')
mgr.connect()
return mgr | 9a61d7b546a72eb0f5b5e91a7297715a773da516 | 3,635,540 |
def set_remote_sense(is_remote=False):
"""Docstring"""
built_packet = build_cmd(0x56, value=int(is_remote))
resp = send_recv_cmd(built_packet)
return resp | 3931c8b4935cb92712a892189a28fb36c68de973 | 3,635,541 |
def printImproperDihedral(dihedral, shift, molecule, alchemicalTransformation):
"""Generate improper dihedral line
Parameters
----------
dihedral : Angle Object
Angle Object
shift : int
Shift produced by structural dummy atoms
molecule : molecule object
Molecule object
alchemicalTransformation : bool
True if alchemical transformation
Returns
-------
dihedralLine : str
Dihedral line data
"""
a = 180.0
V1 = (dihedral.V2*0.5)*kcalToKj
V2 = 2.0
ftype = 4
line = ''
atomAdihedral = molecule.atoms[dihedral.atomA.serialOriginal -1].serial-shift
atomBdihedral = molecule.atoms[dihedral.atomB.serialOriginal -1].serial-shift
atomCdihedral = molecule.atoms[dihedral.atomC.serialOriginal -1].serial-shift
atomDdihedral = molecule.atoms[dihedral.atomD.serialOriginal -1].serial-shift
if alchemicalTransformation:
V1_B = (dihedral.V2_B*0.5)*kcalToKj
line = '%5d%5d%5d%5d %d %6.3f %6.3f %6d %6.3f %6.3f %6d \n' % \
(atomAdihedral, atomBdihedral, atomCdihedral, atomDdihedral, ftype, a, V1, V2, a, V1_B, V2)
else: line = '%5d%5d%5d%5d %d %6.3f %6.3f %6d \n' % \
(atomAdihedral, atomBdihedral, atomCdihedral, atomDdihedral, ftype, a, V1, V2)
return line | 77c39e1b9884ba8dd5b940f62b3c4dc0c698b60a | 3,635,542 |
def mmd_est(x, y, c):
"""
Function for estimating the MMD between samples x and y using Gaussian RBF
with scale c.
Args:
x (np.ndarray): (n_samples, n_dims) samples from first distribution.
y (np.ndarray): (n_samples, n_dims) samples from second distribution.
Returns:
float: The mmd estimate."""
n_x = x.shape[0]
n_y = y.shape[0]
factor1 = 0.
for i in range(n_x):
for j in range(n_x):
if (j == i): continue
factor1 += _gauss_rbf(x[i:i+1], x[j:j+1], c)
factor1 /= (n_x*(n_x-1))
factor2 = 0.
for i in range(n_y):
for j in range(n_y):
if (j == i): continue
factor2 += _gauss_rbf(y[i:i+1], y[j:j+1], c)
factor2 /= (n_y*(n_y-1))
factor3 = 0.
for i in range(n_x):
for j in range(n_y):
factor3 += _gauss_rbf(x[i:i+1], y[j:j+1], c)
factor3 *= 2/(n_x*n_y)
return factor1 + factor2 - factor3 | b0de0f7725e6f5c3fa35096d2e9e48f52a341727 | 3,635,543 |
from typing import Dict
def contents_append_notable_sequence_event_types(sequence, asset_sequence_id) -> Dict:
"""Appends a dictionary of filtered data to the base list for the context
Args:
sequence: sequence object
asset_sequence_id: asset sequence ID
Returns:
A contents list with the relevant notable sequence event types
"""
content = {
'eventType': sequence.get('eventType'),
'displayName': sequence.get('displayName'),
'count': sequence.get('count'),
'sequenceId': asset_sequence_id
}
return content | fca27e5242968fa0db3c9d450588d77e4b307d1e | 3,635,544 |
def get_tx_in_db(session: Session, tx_sig: str) -> bool:
"""Checks if the transaction signature already exists for Challenge Disburements"""
tx_sig_db_count = (
session.query(ChallengeDisbursement).filter(
ChallengeDisbursement.signature == tx_sig
)
).count()
exists = tx_sig_db_count > 0
return exists | 51570741326bfa1393d1c885c8a43b80e25e422b | 3,635,545 |
def saturation_correlate(Ch_L, L_L):
"""
Returns the correlate of *saturation* :math:`S_L`.
Parameters
----------
Ch_L : numeric or array_like
Correlate of *chroma* :math:`Ch_L`.
L_L : numeric or array_like
Correlate of *Lightness* :math:`L_L`.
Returns
-------
numeric or ndarray
Correlate of *saturation* :math:`S_L`.
Examples
--------
>>> Ch_L = 0.008650662051714
>>> L_L = 37.368047493928195
>>> saturation_correlate(Ch_L, L_L) # doctest: +ELLIPSIS
0.0002314...
"""
Ch_L = as_float_array(Ch_L)
L_L = as_float_array(L_L)
S_L = Ch_L / L_L
return S_L | 08b401caa24369a46c4f38b1c6006479c7b86421 | 3,635,546 |
def RHS(qmc_data):
"""
RHS(qmc_data)
-------------
We solve A x = b with a Krylov method. This function extracts
b from Sam's qmc_data structure by doing a transport sweep with
zero scattering term.
"""
G = qmc_data.G
Nx = qmc_data.Nx
Nv = Nx*G
zed = np.zeros((Nx,G))
bout = SI_Map(zed,qmc_data)
return bout | c5300ad0c197eaf483d346a36f0957b8881b575f | 3,635,547 |
def parseThesaurus(eInfo):
"""Return thesaurus object
"""
assert (isinstance(eInfo, pd.Series))
try:
res = eInfo.apply(parseJsonDatum)
except:
# print "\nWarning: parseThesaurus(): blanks or non pd.Series"
res = eInfo.apply(lambda x: "" if x is None else x)
return res | 736182fad6405fe01a0c31aec5286da99abeb90c | 3,635,548 |
import string
import random
def gen_pass(length=8, no_numerical=False, punctuation=False):
"""Generate a random password
Parameters
----------
length : int
The length of the password
no_numerical : bool, optional
If true the password will be generated without 0-9
punctuation : bool, optional
If true the password will be generated with punctuation
Returns
-------
string
The generated password
"""
characters = [string.ascii_letters]
# Add user options to the character set
if not no_numerical:
characters.append(string.digits)
if punctuation:
characters.append(string.punctuation)
# Shuffle the character set
random.SystemRandom().shuffle(characters)
chars_left = length - (len(characters) - 1)
char_amounts = []
# Decide on number of characters per character set
for char_set in characters:
i = random.SystemRandom().randint(1, chars_left)
char_amounts.append(i)
chars_left -= i - 1
char_amounts[-1] += chars_left - 1
# Generate the password's characters
password = ''
for i, length in enumerate(char_amounts):
password +=''.join(random.SystemRandom().choice(characters[i]) for _ in range(length))
# Shuffle the password
password = list(password)
random.SystemRandom().shuffle(password)
password = ''.join(password)
return password | dc0ca0c228be11a5264870112e28f27817d4bbc8 | 3,635,549 |
def document_version_title(context):
"""Document version title"""
return context.title | 1589a76e8bb4b4a42018783b7dbead9efc91e21a | 3,635,550 |
def get_validation_errors(schema, value, validate_invariants=True):
"""
Validate that *value* conforms to the schema interface *schema*.
This includes checking for any schema validation errors (using
`get_schema_validation_errors`). If that succeeds, and
*validate_invariants* is true, then we proceed to check for any
declared invariants.
Note that this does not include a check to see if the *value*
actually provides the given *schema*.
:return: If there were any validation errors, either schema or
invariant, return a two tuple (schema_error_dict,
invariant_error_list). If there were no errors, returns a
two-tuple where both members are empty.
"""
schema_error_dict = get_schema_validation_errors(schema, value)
invariant_errors = []
# Only validate invariants if there were no previous errors. Previous
# errors could be missing attributes which would most likely make an
# invariant raise an AttributeError.
if validate_invariants and not schema_error_dict:
try:
schema.validateInvariants(value, invariant_errors)
except Invalid:
# validateInvariants raises a wrapper error around
# all the errors it got if it got errors, in addition
# to appending them to the errors list. We don't want
# that, we raise our own error.
pass
return (schema_error_dict, invariant_errors) | 857f2527ac3df8325154c78b79fd8bf2f4f535fb | 3,635,551 |
def kruskal_suboptimal_mst(graph):
""" Computes the MST of a given graph using Kruskal's algorithm.
Complexity: O(m*n) - it's dominated by determining if adding a new edge
creates a cycle which is O(n). This implementation does not use union-find.
This algorithm also works for directed graphs.
Discovered in 1956 by Joseph Kruskal.
Args:
graph: object, data structure to hold the graph data.
Returns:
A Graph instance reperesenting the MST.
"""
mst_edges = []
edges = graph.get_edges()
num_vertices = len(graph.get_vertices())
edges = graph.get_edges()
edges.sort(key=lambda e: e[2]) # sort edges asc by length.
index = 0
mst = Graph.build(edges=[], directed=False)
while index < num_vertices:
edge = edges[index]
index += 1
# Make sure the picked edge does not create a cycle in the existing MST.
[tail, head, __] = graph.split_edge(edge)
explored = bfs(mst, tail)
if head not in explored:
mst.add_edge(edge)
return mst | 2ff1f96618324deee59ff61f57dcdb4715442fbb | 3,635,552 |
def set_url_for_recrawl(db, url):
"""Set url for recrawl later"""
url_hash = urls.hash(url)
result = db['Urls'].find_one_and_update({'_id': url_hash},
{'$set': {'queued': False,
'visited': False}})
return result is not None | 00f36e9a313c8dae07541bc016da56ce47f7ee45 | 3,635,553 |
def vertical(hfile):
"""Reads psipred output .ss2 file.
@param hfile psipred .ss2 file
@return secondary structure string.
"""
result = ''
for l in hfile:
if l.startswith('#'):
continue
if not l.strip():
continue
l_arr = l.strip().split()
result += l_arr[2]
return result | c118b61be6edf29b42a37108c5fe21a0e62b801a | 3,635,554 |
from operations import run
def run(command, use_sudo=False, user='', group='', freturn=False, err_to_out=False, input=None, use_which=True, sumout='', sumerr='', status=0):
"""Dummy executing command on host via ssh or subprocess.
If use_which is not False, original run command will be executed with 'which' command,
and it returns will be used as new sumout, somerr, status if original is not exists.
Args:
command (str): command for executing
use_sudo (bool): running with sudo prefix if True and current user not root, default is False
user (str): username for sudo -u prefix
group (str): group for sudo -g prefix
freturn (bool): return tuple if True, else return str, default is False
err_to_out (bool): redirect stderr to stdout if True, default is False
input (str or tuple of str): str will be flushed to stdin after executed command, default is None
use_which (bool): tries to strip command line and and run 'which' for each binary, default is True
works only for unix
sumout (str): fake string that contained all stdout messages, default is ''
sumerr (str): fake string that contained all stderr, default is ''
status (int): fake return code of command, default is 0
Return:
str if freturn is False: string that contained all stdout messages
tuple if freturn is True:
string that contained all stdout messages
string that contained all stderr
int that mean return code of command
"""
logger = envs.connect.logger
logger.debug('executing dry-run function')
logger.debug('arguments for executing and another locals: %s', locals())
original_command = command
command = command_patching_for_sudo(command, use_sudo, user, group)
# logging
write_message_to_log(command, 'dry-in: ')
if use_which:
# separate sudo modificator
if original_command != command:
st = command.find(original_command)
command = command[:st] + '|' + command[st:]
ncommand = ''
command = re.split('\\&|\\||\\;', command)
for part in command:
ncommand += '{0} {1}; '.format(
envs.common.which_binary,
re.findall(r"[\w']+", part)[0]
)
# import current run implementation
try:
run = envs.common.functions['run']
except KeyError:
if not (sumout and sumerr and status):
sumout, sumerr, status = run(ncommand, freturn=True, err_to_out=err_to_out, force=True)
else:
run(ncommand, err_to_out=err_to_out, force=True)
if freturn:
logger.debug('return sumout %s, sumerr %s, status %s', sumout, sumerr, status)
return (sumout, sumerr, status)
logger.debug('return sumout %s', sumout)
return sumout | cc6b7fb311993f91b4fa8a82fd6d87694922f432 | 3,635,555 |
def decide_play(lst):
"""
This function will return the boolean to control whether user should continue the game.
----------------------------------------------------------------------------
:param lst: (list) a list stores the input alphabet.
:return: (bool) if the input character is alphabet and if only one character is in the string.
"""
if len(lst) == 4:
for char in lst:
if char.isalpha() and len(char) == 1:
pass
else:
return False
return True
else:
return False | 3062e1335eda572049b93a60a0981e905ff6ca0d | 3,635,556 |
def vocabfile_to_hashdict(vocabfile):
"""
A basic vocabulary hashing strategy just uses the line indices
of each vocabulary word to generate sequential hashes. Thus,
unique hashes are provided for each word in the vocabulary, and the
hash is trivially reversable for easy re-translation.
"""
hash_dict = {}
hash_index = 0
with open(vocabfile, "rb") as file:
for line in file:
line = line.decode('utf-8')
line = line.strip().replace('\n', '') # to prevent bad encoding
hash_dict[line] = hash_index
hash_index += 1
return hash_dict | f26515fbb406897f4f348436a8776fd2b86ce5e4 | 3,635,557 |
def lnprior(theta, ref_time, fit_qm=False, prior_params=prior_params_default):
"""
Function to compute the value of ln(prior) for a given set of parameters.
We compute the prior using fixed definitions for the prior distributions
of the parameters, allowing some optional parameters for some of them via
the 'prior_params' dictionary. Except for T0 and P the priors on each of
the parameters is taken to be independent of each other, and defined in the
following way:
- Omega_angle: uniform between 0-180 degrees
- omega_angle: uniform between -180 - 180 degrees
- i_angle: uniform between 0 - 90 degrees
- a_axis: half-normal with optional loc/scale
- ecc: uniform between 0 - 1
- period: uniform with optional loc/scale
- T0: uniform between ref_time and period (this is to restrict the
result to a single-valued parameter)
- mu_delta, mu_alpha: normal with optional loc/scale
- pi_p: halfnormal with optional loc/scale
- Ddelta_ref, Dalpha_ref: normal with optional loc/scale
- q_m (only considered if fit_qm=True): halfnormal with optional
loc/scale
INPUT:
theta: array of parameters (ndim=12 or 13), contains the model
parameters following the ordering defined by param_list_all
(with or without q_m at the end)
ref_time: reference time, used to define the prior on T0 [years]
fit_qm: whether we are fitting for the mass ratio q_m or not
prior_params: dictionary containing the optional parameters for some
of the model parameters (should be defined as prior_params_default)
OUTPUT:
lprior: value of ln(prior) at this position in parameter space
"""
if fit_qm:
Omega_angle, omega_angle, i_angle, a_axis, ecc, period, T0, mu_delta, \
mu_alpha, pi_p, Ddelta_ref, Dalpha_ref, q_m = theta
else:
Omega_angle, omega_angle, i_angle, a_axis, ecc, period, T0, mu_delta, \
mu_alpha, pi_p, Ddelta_ref, Dalpha_ref = theta
lprior = 0
lprior += st.uniform.logpdf(Omega_angle, loc=0, scale=180)
lprior += st.uniform.logpdf(omega_angle, loc=-180, scale=360)
lprior += st.uniform.logpdf(i_angle, loc=0, scale=90)
lprior += st.halfnorm.logpdf(a_axis, **prior_params['a_axis'])
lprior += st.uniform.logpdf(ecc, loc=0, scale=1)
lprior += st.uniform.logpdf(period, **prior_params['period'])
lprior += st.uniform.logpdf(T0, loc=ref_time, scale=period)
lprior += st.norm.logpdf(mu_delta, **prior_params['mu_delta'])
lprior += st.norm.logpdf(mu_alpha, **prior_params['mu_alpha'])
lprior += st.halfnorm.logpdf(pi_p, **prior_params['pi_p'])
lprior += st.norm.logpdf(Ddelta_ref, **prior_params['Ddelta_ref'])
lprior += st.norm.logpdf(Dalpha_ref, **prior_params['Dalpha_ref'])
if fit_qm:
lprior += st.halfnorm.logpdf(q_m, **prior_params['q_m'])
return lprior | 49c2befb75fa5b8e1fa678101ddddd0de6fe8fc2 | 3,635,558 |
from datetime import datetime
def doy_to_month(year, doy):
"""
Converts a three-digit string with the day of the year to a two-digit
string representing the month. Takes into account leap years.
:param year: four-digit year
:param doy: three-digit day of the year
:return: two-digit string month
"""
dt = datetime.datetime.strptime(f'{year} {doy}', '%Y %j')
return dt.strftime('%m') | b897c25e048dd5cd0e4d4371160d7ea7aa75cf90 | 3,635,559 |
def process_shot(top, full_prefix):
"""
Given the top directory and full prefix,
return essential info about the shot
Parameters
----------
top: string
directory place of the shot
full_prefix: string
shot description
returns: tuple
shot parameters
"""
shot_y, shot_z = names_helper.parse_shot( full_prefix )
radUnit, outerCup, innerCupSer, innerCupNum, coll = names_helper.parse_file_prefix( full_prefix )
tddose = get_3ddose(top, full_prefix)
sh_x, sh_y, sh_z, dm = dmax_shot(tddose, shot_y, shot_z)
bx, cx = dmax_curve_x(tddose, shot_y, shot_z)
by, cy = dmax_curve_y(tddose, shot_y, shot_z)
bz, cz = dmax_curve_z(tddose, shot_y, shot_z)
# for k in range(0, len(cx)):
# print(bx[k], bx[k+1], cx[k])
# print("=====================\n")
# for k in range(0, len(cy)):
# print(by[k], by[k+1], cy[k])
# print("=====================\n")
# for k in range(0, len(cz)):
# print(bz[k], bz[k+1], cz[k])
# print("=====================\n")
xw25 = calc_window(bx, cx, 0.20*dm)
xw50 = calc_window(bx, cx, 0.50*dm)
xw75 = calc_window(bx, cx, 0.80*dm)
yw25 = calc_window(by, cy, 0.20*dm)
yw50 = calc_window(by, cy, 0.50*dm)
yw75 = calc_window(by, cy, 0.80*dm)
zw25 = calc_window(bz, cz, 0.20*dm)
zw50 = calc_window(bz, cz, 0.50*dm)
zw75 = calc_window(bz, cz, 0.80*dm)
return (innerCupSer, innerCupNum, coll,
shot_y, shot_z, dm,
xw25[0], xw25[1],
xw50[0], xw50[1],
xw75[0], xw75[1],
yw25[0], yw25[1],
yw50[0], yw50[1],
yw75[0], yw75[1],
zw25[0], zw25[1],
zw50[0], zw50[1],
zw75[0], zw75[1]) | 6e0daa7163e0971bbf87417fd0ee84c73a512b0e | 3,635,560 |
def open(filename, debug=False):
"""This function opens an existing object pool, returning a
:class:`PersistentObjectPool`.
Raises RuntimeError if the file cannot be opened or mapped.
:param filename: Filename must be an existing file containing an object
pool as created by :func:`nvm.pmemlog.create`.
The application must have permission to open the file
and memory map it with read/write permissions.
:return: a :class:`PersistentObjectPool` instance that manages the pool.
When the pool is opened, if the previous shutdown was not clean the
pool is cleaned up, including running the 'gc' method.
"""
log.debug('open: %s, debug=%s', filename, debug)
# Make sure the file exists.
return PersistentObjectPool(filename, flag='w', debug=debug) | 491f9ceaffbe4aa4801afc26dd66aaa5f9d8d5c4 | 3,635,561 |
def dplnckqn(spectral, temperature):
"""Temperature derivative of Planck function in wavenumber domain for photon rate.
Args:
| spectral (scalar, np.array (N,) or (N,1)): wavenumber vector in [cm^-1]
| temperature (scalar, list[M], np.array (M,), (M,1) or (1,M)): Temperature in [K]
Returns:
| (scalar, np.array[N,M]): spectral radiant exitance in q/(s.m^2.cm^-1)
Raises:
| No exception is raised, returns None on error.
"""
xx=(pconst.c2n * spectral /temperature)
f=xx*np.exp(xx)/(temperature*(np.exp(xx)-1))
y=pconst.c1qn * spectral **2 / (np.exp(pconst.c2n * spectral \
/ temperature)-1)
dplanckA = f*y;
return dplanckA | c17b7340a09eb793c7b12af9ba27d00a74eaae1b | 3,635,562 |
def specificity(ground_true, predicted):
"""Computes the specificity.
Args:
ground_true ground_true (np.ndarray[bool]): ground true mask to be compared with predicted one.
predicted predicted (np.ndarray[bool]): predicted mask.
Should be the same dimension as `ground_true`.
Returns:
double: The specificity.
"""
N = np.prod(ground_true.shape) - np.sum(ground_true)
TN = np.sum(np.logical_not(ground_true) * np.logical_not(predicted))
return N / TN | b0b40509fd663236b8e8ac13875e47c493921c02 | 3,635,563 |
from typing import Type
from typing import FrozenSet
def _get_node_feature_mapper(
node_feature_mapper_cls: Type[NodeFeatureMapper],
current_state: FrozenSet[Proposition],
problem: STRIPSProblem,
) -> NodeFeatureMapper:
"""
The node feature mappers need to be instantiated based on the current
state and goal states. Hence, a separate one is needed for each
state and planning problem
Parameters
----------
current_state: the current state
problem: the STRIPS problem
Returns
-------
NodeFeatureMapper
"""
if node_feature_mapper_cls == PropositionInStateAndGoal:
# Create node feature mapper for current state and the goal
return PropositionInStateAndGoal(
current_state=current_state, goal_state=problem.goals
)
else:
raise RuntimeError(
f"Unsupported node feature mapper {node_feature_mapper_cls}"
) | 03ffeeda63a3333c3d795e3b3af2952108bac1f3 | 3,635,564 |
def curvInterp(curv,p1,p2,size):
"""
Args:
curv: 2D ndarray
N-by-2 matrix, N points
p1,p2: list or ndarray
length = 2, p1 left point, p2 right point
size: int
the size of new curv (number of points)
"""
if curv[0,0]>curv[-1,0]:
print("\033[1;35mError occured. Feature points x coordinates will be exchanged\033[0m")
curv[0,0],curv[-1,0] = curv[-1,0],curv[0,0]
new_x = np.linspace(curv[0,0], curv[-1,0], size)
new_y = np.interp(new_x, curv[:,0], curv[:,1])
new_x = (new_x-new_x[0])/(new_x[-1]-new_x[0])*(p2[0]-p1[0])+p1[0]
new_y = (new_y-new_y[0])/(new_y[-1]-new_y[0])*(p2[1]-p1[1])+p1[1]
return np.array(list(zip(new_x,new_y))) | b66355d21fa0a4b0a511708283b86b6051df2e29 | 3,635,565 |
def create_key_pair(key_pair_name):
"""Create a new key pair with a provided name and story to a local file"""
pem_outfile = open(f"{key_pair_name}.pem", "w")
response = ec2.create_key_pair(KeyName=key_pair_name)
key_pair = str(response.key_material)
pem_outfile.write(key_pair)
print(f"Create Key Pair: {response}")
return response | 430f54fa4d89d4dcb99c8ae0ddcbc459d1d1d4ee | 3,635,566 |
import hashlib
def chunk_hash( data ):
""" We need to hash data in a data stream chunk and store the hash in mongo. """
return hashlib.md5( data ).digest().encode('base64') | 4c60ef09f5db7e9868a5d44f4bfa1ae5baf81338 | 3,635,567 |
from kombu.abstract import Object as KombuDictType
from datetime import datetime
def jsonify(obj,
builtin_types=(int, float, string_t), key=None,
keyfilter=None,
unknown_type_filter=None):
"""Transforms object making it suitable for json serialization"""
_jsonify = partial(jsonify, builtin_types=builtin_types, key=key,
keyfilter=keyfilter,
unknown_type_filter=unknown_type_filter)
if isinstance(obj, KombuDictType):
obj = obj.as_dict(recurse=True)
if obj is None or isinstance(obj, builtin_types):
return obj
elif isinstance(obj, (tuple, list)):
return [_jsonify(v) for v in obj]
elif isinstance(obj, dict):
return dict((k, _jsonify(v, key=k))
for k, v in items(obj)
if (keyfilter(k) if keyfilter else 1))
elif isinstance(obj, datetime.datetime):
# See "Date Time String Format" in the ECMA-262 specification.
r = obj.isoformat()
if obj.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, datetime.time):
r = obj.isoformat()
if obj.microsecond:
r = r[:12]
return r
elif isinstance(obj, datetime.timedelta):
return str(obj)
else:
if unknown_type_filter is None:
raise ValueError(
'Unsupported type: {0!r} {1!r} (parent: {2})'.format(
type(obj), obj, key))
return unknown_type_filter(obj) | d9504d2fd8a110bb4a8c07220b131fbbefc31141 | 3,635,568 |
import struct
def pack(code, *args):
"""Original struct.pack with the decorator applied.
Will change the code according to the system's architecture.
"""
return struct.pack(code, *args) | 851e8db4d0e710edf2ea15503d92e76d352a2f05 | 3,635,569 |
from typing import Iterable
def find_faces(image: Image) -> Iterable[CropData]:
"""
Get a list of the location of each face found in an image.
"""
detector = cv2.CascadeClassifier(
str(
MODELS_DIR / "haarcascades" / "haarcascade_frontalface_default.xml"
)
)
grayscale_image: Image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return detector.detectMultiScale(
grayscale_image,
scaleFactor=1.3,
minNeighbors=5
) | 18d4d2dc588e15fa7ea620e5a31627a772e1466f | 3,635,570 |
import os
def getpath():
""" Generate filepath to the present file.
:return: filepath to the present file.
:rtype: str
"""
return os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) | 7feb3e0662a512231d6aa02afdb724555cb78ebb | 3,635,571 |
from typing import List
from sys import path
def connect_nodes(contents: List[str]) -> path.NODES:
"""Connect the nodes of the cave system by assigning input pairs.
Args:
contents (List[str]): the file contents
Returns:
path.NODES: a mapping of start to end in a path
"""
nodes: path.NODES = defaultdict(list)
for line in contents:
start, end = line.split('-')
nodes[start].append(end)
nodes[end].append(start)
return nodes | 27d73090b689b7a12fab8188c770db4d2683b245 | 3,635,572 |
def create_clients(KEY, SECRET):
"""
Creates the necessary recources and clients
that will be used to create the redshift cluster
:return: ec2, iam, redshift clients and resources
"""
ec2 = boto3.resource(
'ec2',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
iam = boto3.client(
'iam',
aws_access_key_id=KEY,
aws_secret_access_key=SECRET,
region_name='us-west-2'
)
redshift = boto3.client(
'redshift',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
user_feedback('Clients Received.')
return ec2, iam, redshift | 12b8c58d60f3d2d1f3b4c65a6927118d8c97d974 | 3,635,573 |
import sys
def create_app() -> FastAPI:
"""Create and do initial configuration of fastapi app"""
db = Database()
try:
db.create_database()
except Exception: # pylint: disable=broad-except
sys.exit(1)
app_ = FastAPI()
# Add routers
# app_.include_router(project_controller.router, prefix=API_VERSION_1)
# app_.include_router(user_controller.router, prefix=API_VERSION_1)
# app_.include_router(model_controller.router, prefix=API_VERSION_1)
# app_.include_router(data_source_controller.router, prefix=API_VERSION_1)
# Overide exception handlers
# Override default class method
return app_ | 4ee327a5638788fb4de74f10b7c3e3bb3038aa0e | 3,635,574 |
def _md_fix(text):
"""
sanitize text data that is to be displayed in a markdown code block
"""
return text.replace("```", "``[`][markdown parse fix]") | 2afcad61f4b29ae14c66e04c39413a9a94ae30f8 | 3,635,575 |
def nameOrIdentifier(token):
"""
Determine if the given object is a name or an identifier, and return the
textual value of that name or identifier.
@rtype: L{str}
"""
if isinstance(token, Identifier):
return token.get_name()
elif token.ttype == Name:
return token.value
elif token.ttype == String.Single:
return _destringify(token.value)
elif token.ttype == Keyword:
return token.value
else:
raise ViolatedExpectation("identifier or name", repr(token)) | a7f92d40f3ec1401bbe46d2f0b11506114c10e36 | 3,635,576 |
def exact_kinematic_aug_diff_f(t, y, args_tuple):
"""
"""
_y, _, _ = y
_params, _key, diff_f = args_tuple
aug_diff_fn = lambda __y : diff_f(t, __y, (_params,))
_f, scales, translations = aug_diff_fn(_y)
trace = jnp.sum(scales)
return _f, trace, jnp.sum(scales**2) + jnp.sum(translations**2) | aa5628cd21b1757a17a3e45480db13149a5367a7 | 3,635,577 |
def inverse_hybrid_transform(value):
"""
Transform back from the IRAF-style hybrid log values.
This takes the hybrid log value and transforms it back to the
actual value. That value is returned. Unlike the hybrid_transform
function, this works on single values not a numpy array. That is because
one is not going to have a data array in the hybrid transformation form.
Parameters
----------
value : A real number to be transformed back from the hybrid
log scaling
Returns
-------
newvalue : The associated value that maps to the given hybrid
log value.
"""
if value < 0.:
workvalue = -1.0*value
sign = -1.0
else:
workvalue = value
sign = +1.0
if workvalue < 1.0:
newvalue = 10.*workvalue
else:
newvalue = 10.**workvalue
newvalue = sign * newvalue
return newvalue | 2b8db45901c6f762c970937058670c5c4c5457ea | 3,635,578 |
def regress_trend_channel(arr):
"""
通过arr计算拟合曲线及上下拟合通道曲线,返回三条拟合曲线,组成拟合通道
:param arr: numpy array
:return: y_below, y_fit, y_above
"""
# 通过ABuRegUtil.regress_y计算拟合曲线和模型reg_mode,不使用缩放参数zoom
reg_mode, y_fit = ABuRegUtil.regress_y(arr, zoom=False)
reg_params = reg_mode.params
x = np.arange(0, len(arr))
a = reg_params[0]
b = reg_params[1]
# 通过argmin寻找出原始序列和拟合序列差值的最小点,差值最小代表点位离拟合曲线远,eg: 100 - 80 < 100 - 90
min_ind = (arr.T - y_fit).argmin()
# 根据a, b计算出below值, 注意这里是差,eg: below:100 - 80 = 20
below = x[min_ind] * b + a - arr[min_ind]
# 计算x * b + a但- below,即拟合曲线保持相同的斜率整体下移below值
y_below = x * b + a - below
# 通过argmax寻找出原始序列和拟合序列差值的最大点,差值最小代表点位离拟合曲线远,eg: 120 - 100 > 120 - 110
max_ind = (arr.T - y_fit).argmax()
# 根据a, b计算出above值, 注意这里是差,eg: above 100 - 120 = -20, 即above是负数
above = x[max_ind] * b + a - arr[max_ind]
# 计算x * b + a但整天- above,由于above是负数,即相加 即拟合曲线保持相同的斜率整体上移above值
y_above = x * b + a - above
return y_below, y_fit, y_above | b0c78fac320e4df6f140858079218c1d410ba1e3 | 3,635,579 |
def answers(provider):
"""Default answers data for copier"""
answers = {}
answers["class_name"] = "TemplateTestCharm"
# Note "TestCharm" can't be used, that's the name of the deafult unit test class
answers["charm_type"] = provider
return answers | 9ae26b4eceab5a40d9b342dcb510d3e6843ee640 | 3,635,580 |
def encode(ds, is_implicit_vr, is_little_endian):
"""Encode a *pydicom* :class:`~pydicom.dataset.Dataset` `ds`.
Parameters
----------
ds : pydicom.dataset.Dataset
The dataset to encode
is_implicit_vr : bool
The element encoding scheme the dataset will be encoded with, ``True``
for implicit VR, ``False`` for explicit VR.
is_little_endian : bool
The byte ordering the dataset will be encoded in, ``True`` for little
endian, ``False`` for big endian.
Returns
-------
bytes or None
The encoded dataset as :class:`bytes` (if successful) or ``None`` if
the encoding failed.
"""
# pylint: disable=broad-except
fp = DicomBytesIO()
fp.is_implicit_VR = is_implicit_vr
fp.is_little_endian = is_little_endian
try:
write_dataset(fp, ds)
except Exception as ex:
LOGGER.error("pydicom.write_dataset() failed:")
LOGGER.error(ex)
fp.close()
return None
bytestring = fp.parent.getvalue()
fp.close()
return bytestring | 966aa925eb57a7306ca7f37314938c180bb8d25b | 3,635,581 |
import os
def DetectGae():
"""Determine whether or not we're running on GAE.
This is based on:
https://developers.google.com/appengine/docs/python/#The_Environment
Returns:
True iff we're running on GAE.
"""
server_software = os.environ.get('SERVER_SOFTWARE', '')
return (server_software.startswith('Development/') or
server_software.startswith('Google App Engine/')) | bcfbcbe3480269a0faca40d26d099fe0f9ff74fa | 3,635,582 |
from PIL import Image, ImageDraw, ImageFont, ImageChops
def set_static_assets(all_objects, log):
"""Save reloading the same thing over and over."""
new_objects = []
if len(all_objects) > 0:
try:
except ImportError:
log.import_error('Pillow')
for obj in all_objects:
if obj._type == 'text':
try:
obj.font = ImageFont.truetype(obj.font, obj.size)
except OSError:
if obj.font == 'default':
obj.font = ImageFont.load_default()
else:
log.error(f"Font '{obj.font}' not found.")
if obj._type == 'image':
source = Image.open(obj.src)
source = source.convert('RGBA')
source = source.rotate(obj.rotate, expand=True)
source = ImageChops.multiply(source,
Image.new('RGBA', source.size,
(255, 255, 255, int(obj.opacity * 255))
)
)
obj.src = source
new_objects.append(obj)
return new_objects | a36a50df3d272d92dac7bbefa2e9c32de94b700b | 3,635,583 |
def get_side_effects_from_sider(meddra_all_se_file):
"""
Get the most frequent side effects from SIDER
"""
pubchem_to_umls = {}
umls_to_name = {}
with open(meddra_all_se_file, 'r') as med_fd:
for line in med_fd:
fields = line.strip().split('\t')
pubchem = str(int(fields[1].split('CID')[1]))
concept_type = fields[3].upper()
umls_id = fields[4].upper()
umls_term = fields[5].lower()
if concept_type == 'PT':
pubchem_to_umls.setdefault(pubchem, set()).add(umls_id)
umls_to_name[umls_id] = umls_term
print('NUMBER OF PUBCHEM IDS ASSOCIATED WITH UMLS: {}'.format(len(pubchem_to_umls)))
return pubchem_to_umls, umls_to_name | 4fa012cd2a16e09f01d43ae66f99640f1e090e22 | 3,635,584 |
def beam_constraint_I_design_jac(samples):
"""
Jacobian with respect to the design variables
Desired behavior is when constraint is less than 0
"""
X,Y,E,R,w,t = samples
L = 100
grad = np.empty((samples.shape[1],2))
grad[:,0] = (L*(12*t*X + 6*w*Y))/(R*t**2*w**3)
grad[:,1] = (L*(6*t*X + 12*w*Y))/(R*t**3*w**2)
return -grad | 76553d7ab55221d5a0b52062f6aced8c3d316332 | 3,635,585 |
import os
def get_env_var(name, default_value = None):
"""Get the value of an environment variable, if defined"""
if name in os.environ:
return os.environ[name]
elif default_value is not None:
return default_value
else:
raise RuntimeError('Required environment variable %s not found' % name) | 0f0455ede0e025c9da9fd65769a1d4e52ae520fc | 3,635,586 |
import logging
def check_usage_quota(vol_size_in_MB, tenant_uuid, datastore_url, privileges, vm_datastore_url):
""" Check if the volume can be created without violating the quota. """
if privileges:
error_msg, total_storage_used = get_total_storage_used(tenant_uuid, datastore_url, vm_datastore_url)
if error_msg:
# cannot get the total_storage_used, to be safe, return False
return False
usage_quota = privileges[auth_data_const.COL_USAGE_QUOTA]
logging.debug("total_storage_used=%d, usage_quota=%d", total_storage_used, usage_quota)
# if usage_quota which read from DB is 0, which means
# no usage_quota, function should return True
if usage_quota == 0:
return True
return vol_size_in_MB + total_storage_used <= usage_quota
else:
# no privileges
return True | d02437b3096765f99f8a9e368456bd091489255b | 3,635,587 |
def _css_to_rect(css):
"""
Convert a tuple in (top, right, bottom, left) order to a dlib `rect` object
:param css: plain tuple representation of the rect in (top, right, bottom, left) order
:return: a dlib `rect` object
"""
return dlib.rectangle(css[2], css[1], css[0], css[3]) | 8b60c95d3a7fe965bc66f7ecb3ada4ec249925dd | 3,635,588 |
def parse_arguments():
""" Use arparse to parse the input arguments and return it as a argparse.ArgumentParser. """
ap = standard_parser()
add_annotations_arguments(ap)
add_task_arguments(ap)
return ap.parse_args() | aa6dd1031489ed492190d1e60e512d5b8465d6be | 3,635,589 |
from typing import Optional
from typing import Sequence
def get_alert_contacts(alert_contact_name: Optional[str] = None,
email: Optional[str] = None,
ids: Optional[Sequence[str]] = None,
name_regex: Optional[str] = None,
output_file: Optional[str] = None,
phone_num: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAlertContactsResult:
"""
This data source provides the Arms Alert Contacts of the current Alibaba Cloud user.
> **NOTE:** Available in v1.129.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
ids = alicloud.arms.get_alert_contacts()
pulumi.export("armsAlertContactId1", ids.contacts[0].id)
name_regex = alicloud.arms.get_alert_contacts(name_regex="^my-AlertContact")
pulumi.export("armsAlertContactId2", name_regex.contacts[0].id)
```
:param str alert_contact_name: The name of the alert contact.
:param str email: The email address of the alert contact.
:param Sequence[str] ids: A list of Alert Contact IDs.
:param str name_regex: A regex string to filter results by Alert Contact name.
:param str phone_num: The mobile number of the alert contact.
"""
__args__ = dict()
__args__['alertContactName'] = alert_contact_name
__args__['email'] = email
__args__['ids'] = ids
__args__['nameRegex'] = name_regex
__args__['outputFile'] = output_file
__args__['phoneNum'] = phone_num
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('alicloud:arms/getAlertContacts:getAlertContacts', __args__, opts=opts, typ=GetAlertContactsResult).value
return AwaitableGetAlertContactsResult(
alert_contact_name=__ret__.alert_contact_name,
contacts=__ret__.contacts,
email=__ret__.email,
id=__ret__.id,
ids=__ret__.ids,
name_regex=__ret__.name_regex,
names=__ret__.names,
output_file=__ret__.output_file,
phone_num=__ret__.phone_num) | e885d29e56b1403f5b879723247b4d7b28921710 | 3,635,590 |
def read_response(rfile, request_method, body_size_limit, include_body=True):
"""
Return an (httpversion, code, msg, headers, content) tuple.
By default, both response header and body are read.
If include_body=False is specified, content may be one of the
following:
- None, if the response is technically allowed to have a response body
- "", if the response must not have a response body (e.g. it's a
response to a HEAD request)
"""
line = rfile.readline()
# Possible leftover from previous message
if line == b"\r\n" or line == b"\n":
line = rfile.readline()
if not line:
raise HttpErrorConnClosed(502, "Server disconnect.")
parts = parse_response_line(line)
if not parts:
raise HttpError(502, "Invalid server response: %s" % repr(line))
proto, code, msg = parts
httpversion = parse_http_protocol(proto)
if httpversion is None:
raise HttpError(502, "Invalid HTTP version in line: %s" % repr(proto))
headers = read_headers(rfile)
if headers is None:
raise HttpError(502, "Invalid headers.")
if include_body:
content = read_http_body(
rfile,
headers,
body_size_limit,
request_method,
code,
False
)
else:
# if include_body==False then a None content means the body should be
# read separately
content = None
return Response(httpversion, code, msg, headers, content) | af4eb7c8dcd1f7d0727fe0ae6d07e48b6dc3533c | 3,635,591 |
def epi_approx_tiramisu(image_shape: tuple, num_classes: int,
class_weights=None,
initial_filters: int=48,
growth_rate: int=16,
layer_sizes: list=[4, 5, 7, 10, 12],
bottleneck_size: int=15,
dropout: float=0.2,
learning_rate: float=1e-3,
momentum: float=0.75,
):
"""
Build a Tiramisu model that estimates Epistemic uncertainty.
Args:
image_shape: the image shape to create the model for
num_classes: the number of classes to segment for (e.g. c)
class_weights: the weights for each class
initial_filters: the number of filters in the first convolution layer
growth_rate: the growth rate to use for the network (e.g. k)
layer_sizes: a list with the size of each dense down-sample block.
reversed to determine the size of the up-sample blocks
bottleneck_size: the number of convolutional layers in the bottleneck
dropout: the dropout rate to use in dropout layers
learning_rate: the learning rate for the RMSprop optimizer
momentum: the momentum for the exponential moving average
Returns:
a compiled model of the Tiramisu architecture + Epistemic approximation
"""
# build the base of the network
inputs, logits = build_tiramisu(image_shape, num_classes,
initial_filters=initial_filters,
growth_rate=growth_rate,
layer_sizes=layer_sizes,
bottleneck_size=bottleneck_size,
dropout=dropout,
)
# pass the logits through the Softmax activation to get probabilities
softmax = Activation('softmax')(logits)
# build the Tiramisu model
tiramisu = Model(inputs=[inputs], outputs=[softmax], name='tiramisu')
# the inputs for the Monte Carlo model
inputs = Input(image_shape)
# pass the values through the Tiramisu network
tiramisu_out = tiramisu(inputs)
# create an exponential moving average of softmax to estimate a
# Monte Carlo simulation and provide epistemic uncertainty
mean = MovingAverage(momentum=momentum)(tiramisu_out)
# calculate the epistemic uncertainty as the entropy of the means
entropy = Entropy(name='entropy')(mean)
# build the epistemic uncertainty model
model = Model(inputs=[inputs], outputs=[tiramisu_out, entropy])
# compile the model
model.compile(
optimizer=RMSprop(lr=learning_rate),
loss={'tiramisu': build_categorical_crossentropy(class_weights)},
metrics={'tiramisu': [build_categorical_accuracy(weights=class_weights)]},
)
return model | 455f31c0f9610db90646c409771dd91197421f64 | 3,635,592 |
def matrix_modinv(matrix, m):
"""Return inverse of the matrix modulo m"""
matrix_det = int(round(linalg.det(matrix)))
return modinv(abs(matrix_det), m)*linalg.inv(matrix)*matrix_det*sign(matrix_det) | 776e00f5d34a31d27f9af0dd065f0edde1449457 | 3,635,593 |
def hexStringToRGB(hex):
"""
Converts hex color string to RGB values
:param hex: color string in format: #rrggbb or rrggbb with 8-bit values in hexadecimal system
:return: tuple containing RGB color values (from 0.0 to 1.0 each)
"""
temp = hex
length = len(hex)
if temp[0] == "#":
temp = hex[1:length]
if not len(temp) == 6:
return None
colorArr = bytearray.fromhex(temp)
return colorArr[0], colorArr[1], colorArr[2] | 7adcb7b247e6fe1aefa1713d754c828d1ac4a5b0 | 3,635,594 |
def remove_element(list, remove):
"""[summary]
Args:
list ([list]): [List of objects]
remove ([]): [What element to remove]
Returns:
[list]: [A new list where the element has been removed]
"""
for object in list:
if object._id == remove[0]:
list.remove(object)
return list | 65a9fe296a6d8369127003c33f58022ededfdcba | 3,635,595 |
def warmup_cosine_decay_schedule(
init_value: float,
peak_value: float,
warmup_steps: int,
decay_steps: int,
end_value: float = 0.0
) -> base.Schedule:
"""Linear warmup followed by cosine decay.
Args:
init_value: Initial value for the scalar to be annealed.
peak_value: Peak value for scalar to be annealed at end of warmup.
warmup_steps: Positive integer, the length of the linear warmup.
decay_steps: Positive integer, the total length of the schedule. Note that
this includes the warmup time, so the number of steps during which cosine
annealing is applied is `decay_steps - warmup_steps`.
end_value: End value of the scalar to be annealed.
Returns:
schedule: A function that maps step counts to values.
"""
schedules = [
linear_schedule(
init_value=init_value,
end_value=peak_value,
transition_steps=warmup_steps),
cosine_decay_schedule(
init_value=peak_value,
decay_steps=decay_steps - warmup_steps,
alpha=end_value/peak_value)]
return join_schedules(schedules, [warmup_steps]) | 5f6aeea25eff986711e0b7041f4ff18317b4c2b6 | 3,635,596 |
import uuid
def __transform_template_to_graph(j):
"""
Transforms the simple format to a graph.
:param j:
:return:
"""
g = nx.DiGraph()
for a in j["nodes"]:
g.add_node(a[0], label = a[1], id = str(uuid.uuid4()))
for e in j["edges"]:
g.add_edge(e[0], e[1], label = e[2])
return g | 5c94b8114e2d5c6811abf12b83f1c5f4c24d3192 | 3,635,597 |
from datetime import datetime
def datetime_to_string(dt):
"""
Convert a datetime object to the preferred format for the shopify api. (2016-01-01T11:00:00-5:00)
:param dt: Datetime object to convert to timestamp.
:return: Timestamp string for the datetime object.
"""
if not dt:
return None
if not isinstance(dt, datetime.datetime):
raise ValueError('Must supply an instance of `datetime`.')
# Calculate the utc offset of the current timezone
# 1 is added to the total seconds to account for the time which it takes the operation to calculate
# utcnow and local now.
offset = int(divmod((datetime.datetime.utcnow() - datetime.datetime.now()).total_seconds() + 1, 60)[0] / 60)
offset_str = '-%d:00' % offset
dt_str = dt.strftime('%Y-%m-%dT%H:%M:%S')
return dt_str + offset_str | 0bbda7c2be578245dc24d693b4a52ae69bd1ecf7 | 3,635,598 |
from typing import List
def names(package: str) -> List[str]:
"""List all plug-ins in one package"""
_import_all(package)
return sorted(_PLUGINS[package].keys(), key=lambda p: info(package, p).sort_value) | 545e9d1df93e902940a34bc5537063c4d6ceeb1f | 3,635,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.