content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def get_selector(info, mode="advanced"):
"""
The selector that decides the scope of the dashboard. It MUST have the keywords
?work and ?author.
You can override everything here by adapting the query on WDQS:
https://w.wiki/3Cmd
Args:
info: either a dict containing complex information for the selector or a list of QIDs
mode: a string representing the mode. If "advanced", then a config is expected for the
info parameters. If "basic", a list of QIDs is expected. Defaults to "advanced".
"""
if mode == "advanced":
fields_of_work = info["restriction"]["author_area"]
if fields_of_work is not None:
field_of_work_selector = (
"""
VALUES ?field_of_work """
+ format_with_prefix(fields_of_work)
+ """
?author wdt:P101 ?field_of_work.
"""
)
else:
field_of_work_selector = ""
topic_of_work = info["restriction"]["topic_of_work"]
if topic_of_work is not None:
topic_of_work_selector = (
"""
VALUES ?topics """
+ format_with_prefix(topic_of_work)
+ """
?work wdt:P921/wdt:P279* ?topics.
"""
)
else:
topic_of_work_selector = ""
region = info["restriction"]["institution_region"]
if region is not None:
region_selector = (
"""
VALUES ?regions """
+ format_with_prefix(region)
+ """
?country wdt:P361* ?regions.
?author ( wdt:P108 | wdt:P463 | wdt:P1416 ) / wdt:P361* ?organization .
?organization wdt:P17 ?country.
"""
)
else:
region_selector = ""
gender = info["restriction"]["gender"]
if gender is not None:
gender_selector = (
"""
VALUES ?gender """
+ format_with_prefix(gender)
+ """
?author wdt:P21 ?gender.
"""
)
else:
gender_selector = ""
event = info["restriction"]["event"]
if event is not None:
# P823 - speaker
# P664 - organizer
# P1334 - has participant
# ^P710 - inverse of (participated in)
event_selector = (
"""
VALUES ?event """
+ format_with_prefix(event)
+ """
?event wdt:P823 | wdt:P664 | wdt:P1344 | ^wdt:P710 ?author.
"""
)
else:
event_selector = ""
author_is_topic_of = info["restriction"]["author_is_topic_of"]
if author_is_topic_of is not None:
author_is_topic_of_selector = (
"""
VALUES ?biographical_work """
+ format_with_prefix(author_is_topic_of)
+ """
?biographical_work wdt:P921 ?author.
"""
)
else:
author_is_topic_of_selector = ""
selector = (
field_of_work_selector
+ topic_of_work_selector
+ region_selector
+ gender_selector
+ event_selector
+ author_is_topic_of_selector
+ """
?work wdt:P50 ?author.
"""
)
else:
selector = f"""
VALUES ?work {format_with_prefix(info)} .
?work wdt:P50 ?author .
"""
return selector
|
c499a2b4e29a1afc91e6d40563a71ad5e71b7724
| 3,645,200
|
def get_table_names(self, connection, schema=None, **kw):
"""
Get table names
Args:
connection ():
schema ():
**kw:
Returns:
"""
return self._get_table_or_view_names(
["r", "e"], connection, schema, **kw
)
|
e66ae9eb284e10785c7172ab36c79b25a48dce47
| 3,645,201
|
def get_general(prefix, generator, pars, **kwargs):
""" A general getter function that either gets the asked-for data
from a file or generates it with the given generator function. """
pars = get_pars(pars, **kwargs)
id_pars, pars = get_id_pars_and_set_default_pars(pars)
try:
result = read_tensor_file(prefix=prefix, pars=id_pars,
filename=filename)
except RuntimeError:
result = generator(pars, id_pars)
return result
|
a27e25e0ec992f8faa13b167dafd38edd6eb6a1d
| 3,645,202
|
def gen_case(test):
"""Generates an OK test case for a test
Args:
test (``Test``): OK test for this test case
Returns:
``dict``: the OK test case
"""
code_lines = str_to_doctest(test.input.split('\n'), [])
for i in range(len(code_lines) - 1):
if code_lines[i+1].startswith('>>>') and len(code_lines[i].strip()) > 3 and not code_lines[i].strip().endswith("\\"):
code_lines[i] += ';'
code_lines.append(test.output)
return {
'code': '\n'.join(code_lines),
'hidden': test.hidden,
'locked': False
}
|
586a5436442172d43a022e33185bd84c302fdb9c
| 3,645,203
|
def get_user_list_view(request):
"""
render user admin view
Arguments:
request {object} -- wsgi http request object
Returns:
html -- render html template
"""
if request.user.has_perm('auth.view_user'):
user_list = User.objects.all()
temp_name = 'admin/list_users.html'
context = {
'user_url_path': '用户',
'obj': user_list
}
else:
temp_name = 'admin/error.html'
context = {}
return render(
request,
temp_name,
context=context
)
|
f0ee280ac60a48f61f5da0d7d63b050e16ea6696
| 3,645,204
|
def to_odds(p):
"""
Converts a probability to odds
"""
with np.errstate(divide='ignore'):
return p / (1 - p)
|
b468b75ad736ca67e1fb39fd231bc185d851fbdf
| 3,645,205
|
def step_euler(last, dt, drift, volatility, noise):
"""Approximate SDE in one time step with Euler scheme"""
return last + drift * dt + np.dot(volatility, noise)
|
e9f58425f696316679730397168c7965b3faadd5
| 3,645,206
|
def KK_RC66_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
Kristian B. Knudsen (kknu@berkeley.edu / kristianbknudsen@gmail.com)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
R6 = params["R6"]
R7 = params["R7"]
R8 = params["R8"]
R9 = params["R9"]
R10 = params["R10"]
R11 = params["R11"]
R12 = params["R12"]
R13 = params["R13"]
R14 = params["R14"]
R15 = params["R15"]
R16 = params["R16"]
R17 = params["R17"]
R18 = params["R18"]
R19 = params["R19"]
R20 = params["R20"]
R21 = params["R21"]
R22 = params["R22"]
R23 = params["R23"]
R24 = params["R24"]
R25 = params["R25"]
R26 = params["R26"]
R27 = params["R27"]
R28 = params["R28"]
R29 = params["R29"]
R30 = params["R30"]
R31 = params["R31"]
R32 = params["R32"]
R33 = params["R33"]
R34 = params["R34"]
R35 = params["R35"]
R36 = params["R36"]
R37 = params["R37"]
R38 = params["R38"]
R39 = params["R39"]
R40 = params["R40"]
R41 = params["R41"]
R42 = params["R42"]
R43 = params["R43"]
R44 = params["R44"]
R45 = params["R45"]
R46 = params["R46"]
R47 = params["R47"]
R48 = params["R48"]
R49 = params["R49"]
R50 = params["R50"]
R51 = params["R51"]
R52 = params["R52"]
R53 = params["R53"]
R54 = params["R54"]
R55 = params["R55"]
R56 = params["R56"]
R57 = params["R57"]
R58 = params["R58"]
R59 = params["R59"]
R60 = params["R60"]
R61 = params["R61"]
R62 = params["R62"]
R63 = params["R63"]
R64 = params["R64"]
R65 = params["R65"]
R66 = params["R66"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
+ (R6 / (1 + w * 1j * t_values[5]))
+ (R7 / (1 + w * 1j * t_values[6]))
+ (R8 / (1 + w * 1j * t_values[7]))
+ (R9 / (1 + w * 1j * t_values[8]))
+ (R10 / (1 + w * 1j * t_values[9]))
+ (R11 / (1 + w * 1j * t_values[10]))
+ (R12 / (1 + w * 1j * t_values[11]))
+ (R13 / (1 + w * 1j * t_values[12]))
+ (R14 / (1 + w * 1j * t_values[13]))
+ (R15 / (1 + w * 1j * t_values[14]))
+ (R16 / (1 + w * 1j * t_values[15]))
+ (R17 / (1 + w * 1j * t_values[16]))
+ (R18 / (1 + w * 1j * t_values[17]))
+ (R19 / (1 + w * 1j * t_values[18]))
+ (R20 / (1 + w * 1j * t_values[19]))
+ (R21 / (1 + w * 1j * t_values[20]))
+ (R22 / (1 + w * 1j * t_values[21]))
+ (R23 / (1 + w * 1j * t_values[22]))
+ (R24 / (1 + w * 1j * t_values[23]))
+ (R25 / (1 + w * 1j * t_values[24]))
+ (R26 / (1 + w * 1j * t_values[25]))
+ (R27 / (1 + w * 1j * t_values[26]))
+ (R28 / (1 + w * 1j * t_values[27]))
+ (R29 / (1 + w * 1j * t_values[28]))
+ (R30 / (1 + w * 1j * t_values[29]))
+ (R31 / (1 + w * 1j * t_values[30]))
+ (R32 / (1 + w * 1j * t_values[31]))
+ (R33 / (1 + w * 1j * t_values[32]))
+ (R34 / (1 + w * 1j * t_values[33]))
+ (R35 / (1 + w * 1j * t_values[34]))
+ (R36 / (1 + w * 1j * t_values[35]))
+ (R37 / (1 + w * 1j * t_values[36]))
+ (R38 / (1 + w * 1j * t_values[37]))
+ (R39 / (1 + w * 1j * t_values[38]))
+ (R40 / (1 + w * 1j * t_values[39]))
+ (R41 / (1 + w * 1j * t_values[40]))
+ (R42 / (1 + w * 1j * t_values[41]))
+ (R43 / (1 + w * 1j * t_values[42]))
+ (R44 / (1 + w * 1j * t_values[43]))
+ (R45 / (1 + w * 1j * t_values[44]))
+ (R46 / (1 + w * 1j * t_values[45]))
+ (R47 / (1 + w * 1j * t_values[46]))
+ (R48 / (1 + w * 1j * t_values[47]))
+ (R49 / (1 + w * 1j * t_values[48]))
+ (R50 / (1 + w * 1j * t_values[49]))
+ (R51 / (1 + w * 1j * t_values[50]))
+ (R52 / (1 + w * 1j * t_values[51]))
+ (R53 / (1 + w * 1j * t_values[52]))
+ (R54 / (1 + w * 1j * t_values[53]))
+ (R55 / (1 + w * 1j * t_values[54]))
+ (R56 / (1 + w * 1j * t_values[55]))
+ (R57 / (1 + w * 1j * t_values[56]))
+ (R58 / (1 + w * 1j * t_values[57]))
+ (R59 / (1 + w * 1j * t_values[58]))
+ (R60 / (1 + w * 1j * t_values[59]))
+ (R61 / (1 + w * 1j * t_values[60]))
+ (R62 / (1 + w * 1j * t_values[61]))
+ (R63 / (1 + w * 1j * t_values[62]))
+ (R64 / (1 + w * 1j * t_values[63]))
+ (R65 / (1 + w * 1j * t_values[64]))
+ (R66 / (1 + w * 1j * t_values[65]))
)
|
eb64f86bc0a8a7ff0d88a1246a754563a955c61f
| 3,645,207
|
from typing import List
def similar_in_manner(manner_1: UnmarkableManner) -> List[Manner]:
"""
If the value is a wildcard value, return
all possible manner of articualtion values, otherwise
return the single corresponding manner of articulation value.
"""
if isinstance(manner_1, MarkedManner):
return manner_1.manner
return manner_states
|
fea0c78c93a5f80e4f2bba6eac7f628106fba796
| 3,645,208
|
import re
def wikify(value):
"""Converts value to wikipedia "style" of URLS, removes non-word characters
and converts spaces to hyphens and leaves case of value.
"""
value = re.sub(r'[^\w\s-]', '', value).strip()
return re.sub(r'[-\s]+', '_', value)
|
dc4504ea6eb7905b5e18a1d1f473a4f337697b26
| 3,645,209
|
def build_model(name, num_classes, loss='softmax', pretrained=True,
use_gpu=True, dropout_prob=0.0, feature_dim=512, fpn=True, fpn_dim=256,
gap_as_conv=False, input_size=(256, 128), IN_first=False):
"""A function wrapper for building a model.
"""
avai_models = list(__model_factory.keys())
if name not in avai_models:
raise KeyError('Unknown model: {}. Must be one of {}'.format(name, avai_models))
return __model_factory[name](
num_classes=num_classes,
loss=loss,
pretrained=pretrained,
use_gpu=use_gpu,
dropout_prob=dropout_prob,
feature_dim=feature_dim,
fpn=fpn,
fpn_dim=fpn_dim,
gap_as_conv=gap_as_conv,
input_size=input_size,
IN_first=IN_first
)
|
1278e5c30ebdb73e011b0630de3738936e87dc93
| 3,645,210
|
def policy_absent(name):
"""
Ensure that the named policy is not present
:param name: The name of the policy to be deleted
:returns: The result of the state execution
:rtype: dict
"""
current_policy = __salt__['mdl_vault.get_policy'](name)
ret = {'name': name,
'comment': '',
'result': False,
'changes': {}}
if not current_policy:
ret['result'] = True
ret['comment'] = ('The {policy_name} policy is not present.'.format(
policy_name=name))
elif __opts__['test']:
ret['result'] = None
if current_policy:
ret['changes']['old'] = current_policy
ret['changes']['new'] = {}
ret['comment'] = ('The {policy_name} policy {suffix}.'.format(
policy_name=name,
suffix='will be deleted' if current_policy else 'is not present'))
else:
try:
__salt__['mdl_vault.delete_policy'](name)
ret['result'] = True
ret['comment'] = ('The {policy_name} policy was successfully '
'deleted.')
ret['changes']['old'] = current_policy
ret['changes']['new'] = {}
except __utils__['mdl_vault.vault_error']() as e:
log.exception(e)
ret['comment'] = ('The {policy_name} policy failed to be '
'created/updated'.format(policy_name=name))
return ret
|
6f1498f07a8e14f2e7668d7d5cc8d68128cb6004
| 3,645,211
|
def _tolist(arg):
"""
Assure that *arg* is a list, e.g. if string or None are given.
Parameters
----------
arg :
Argument to make list
Returns
-------
list
list(arg)
Examples
--------
>>> _tolist('string')
['string']
>>> _tolist([1,2,3])
[1, 2, 3]
>>> _tolist(None)
[None]
"""
if isinstance(arg, str):
return [arg]
try:
return list(arg)
except TypeError:
return [arg]
|
e4293991eeb6d15470511281680af44353232c37
| 3,645,212
|
def calc_Qhs_sys(bpr, tsd):
"""
it calculates final loads
"""
# GET SYSTEMS EFFICIENCIES
# GET SYSTEMS EFFICIENCIES
energy_source = bpr.supply['source_hs']
scale_technology = bpr.supply['scale_hs']
efficiency_average_year = bpr.supply['eff_hs']
if scale_technology == "BUILDING":
if energy_source == "GRID":
tsd['E_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "NATURALGAS":
tsd['NG_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "OIL":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "COAL":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['SOLAR_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "WOOD":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "NONE":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
else:
raise Exception('check potential error in input database of LCA infrastructure / HEATING')
elif scale_technology == "DISTRICT":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
elif scale_technology == "NONE":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
else:
raise Exception('check potential error in input database of LCA infrastructure / HEATING')
return tsd
|
6016e2061b441248606b5a3ea95930d38b678525
| 3,645,213
|
import socket
from time import time as now
def wait_net_service(server, port, timeout=None):
""" Wait for network service to appear
@param timeout: in seconds, if None or 0 wait forever
@return: True of False, if timeout is None may return only True or
throw unhandled network exception
"""
s = socket.socket()
if timeout:
# time module is needed to calc timeout shared between two exceptions
end = now() + timeout
while True:
try:
if timeout:
next_timeout = end - now()
if next_timeout < 0:
return False
else:
s.settimeout(next_timeout)
s.connect((server, port))
except (socket.timeout, socket.error):
pass
else:
s.close()
return True
|
f8089ed335c783140ab4b334c44c534ee5c48121
| 3,645,214
|
import re
def latex_nucleus(nucleus):
"""Creates a isotope symbol string for processing by LaTeX.
Parameters
----------
nucleus : str
Of the form `'<mass><sym>'`, where `'<mass>'` is the nuceleus'
mass number and `'<sym>'` is its chemical symbol. I.e. for
lead-207, `nucleus` would be `'207Pb'`.
Returns
-------
latex_nucleus : str
Of the form ``$^{<mass>}$<sym>`` i.e. given `'207Pb'`, the
return value would be ``$^{207}$Pb``
Raises
------
ValueError
If `nucleus` does not match the regex ``^[0-9]+[a-zA-Z]+$``
"""
if re.match(r'\d+[a-zA-Z]+', nucleus):
mass = re.search(r'\d+', nucleus).group()
sym = re.search(r'[a-zA-Z]+', nucleus).group()
return f'$^{{{mass}}}${sym}'
else:
raise ValueError(
f'{cols.R}`nucleus` is invalid. Should match the regex'
f' \\d+[a-zA-Z]+{cols.END}'
)
|
e2a2c04a63284cdd8f06fdd556306149e7092703
| 3,645,215
|
def ConvertToFloat(line, colnam_list):
"""
Convert some columns (in colnam_list) to float, and round by 3 decimal.
:param line: a dictionary from DictReader.
:param colnam_list: float columns
:return: a new dictionary
"""
for name in colnam_list:
line[name] = round(float(line[name]), 3)
return line
|
e95fd6cfa9bb57060fdd835eea139fd9c67bc211
| 3,645,216
|
def rnn_step(x, prev_h, Wx, Wh, b):
"""
Run the forward pass for a single timestep of a vanilla RNN that uses a tanh
activation function.
The input data has dimension D, the hidden state has dimension H, and we use
a minibatch size of N.
Inputs:
- x: Input data for this timestep, of shape (N, D).
- prev_h: Hidden state from previous timestep, of shape (N, H)
- Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
- Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
- b: Biases of shape (H,)
Returns a tuple of:
- next_h: Next hidden state, of shape (N, H)
"""
next_h = np.tanh(x.dot(Wx) + prev_h.dot(Wh) + b)
return next_h
|
78fef10a4c23b7a33a60829e1ad02a2e0381b834
| 3,645,217
|
from typing import List
import json
def transform_application_assigned_users(json_app_data: str) -> List[str]:
"""
Transform application users data for graph consumption
:param json_app_data: raw json application data
:return: individual user id
"""
users: List[str] = []
app_data = json.loads(json_app_data)
for user in app_data:
users.append(user["id"])
return users
|
625c8f662b364bb3fe63bb26b06eaca57ae8be79
| 3,645,218
|
def testapp(app):
"""Create Webtest app."""
return TestApp(app)
|
68a46e993d75fc44de5a9063b409e89647a2738b
| 3,645,219
|
def to_y_channel(img):
"""Change to Y channel of YCbCr.
Args:
img (ndarray): Images with range [0, 255].
Returns:
(ndarray): Images with range [0, 255] (float type) without round.
"""
img = img.astype(np.float32) / 255.
if img.ndim == 3 and img.shape[2] == 3:
img = bgr2ycbcr(img, y_only=True)
img = img[..., None]
return img * 255.
|
64873fabbe6cd12db8c9bc96e4e93f7181c7742d
| 3,645,220
|
def get_user_signatures(user_id):
"""
Given a user ID, returns the user's signatures.
:param user_id: The user's ID.
:type user_id: string
:return: list of signature data for this user.
:rtype: [dict]
"""
user = get_user_instance()
try:
user.load(user_id)
except DoesNotExist as err:
return {'errors': {'user_id': str(err)}}
signatures = user.get_user_signatures()
return [agr.to_dict() for agr in signatures]
|
8c099f84c9a9f383b56019f985f1be63e315503a
| 3,645,221
|
import json
def post_vehicle_action():
""" Add vehicle
:return:
"""
output = JsonOutput()
try:
if not request.is_json:
raise TypeError('Payload is not json')
payload = request.json
usecases.SetVehicleUsecase(db=db, vehicle=payload).execute()
output.add(status=200, response=json.dumps({'data': request.json}))
except Exception as error:
db.session.rollback()
app.logger.critical(str(error))
output.add(status=400, response=json.dumps({'error': str(error)}))
return output.show()
|
a24b4e326fe12c7d2a7054ca4b89245924a96d60
| 3,645,222
|
def get_day_suffix(day):
"""
Returns the suffix of the day, such as in 1st, 2nd, ...
"""
if day in (1, 21, 31):
return 'st'
elif day in (2, 12, 22):
return 'nd'
elif day in (3, 23):
return 'rd'
else:
return 'th'
|
7d9277303357de5405b3f6894cda24726d60ad47
| 3,645,223
|
def retain_images(image_dir,xml_file, annotation=''):
"""Deprecated"""
image_in_boxes_dict=return_image_in_boxes_dict(image_dir,xml_file, annotation)
return [img for img in image_in_boxes_dict if image_in_boxes_dict[img]]
|
54efc8c8b3c31f8466a0044c2e679cbf5a5545ff
| 3,645,224
|
from typing import List
from typing import Tuple
from typing import Dict
from pathlib import Path
from typing import cast
def compute_owa(
metrics: List[Tuple[float, float]],
datasets: Dict[K, DatasetSplit],
metadata: List[MetaData],
) -> float:
"""
Computes the OWA metric from the M4 competition, using a weighted average of the relative
MASE and sMAPE metrics depending on the size of the datasets.
Args:
metrics: The forecast's metrics (MASE and sMAPE).
datasets: The datasets for which the forecasts have been generated, mapped from a hashable
so that computations do not have to be repeated.
metadata: Metadata available for the dataset.
Returns:
The OWA metric value.
"""
assert (
len(metrics) == len(datasets) == len(metadata)
), "The lengths of the provided lists must be equal."
dataset_weights = np.array([len(d.gluonts()) for d in datasets.values()])
dataset_weights = dataset_weights / dataset_weights.sum()
naive_mase = 0
naive_smape = 0
actual_mase = 0
actual_smape = 0
for metric, (dataset_key, split), meta, weight in zip(
metrics, datasets.items(), metadata, dataset_weights
):
cache_file = Path.home() / ".cache" / "naive2" / f"{dataset_key}"
if cache_file.exists():
naive_forecast = QuantileForecasts.load(cache_file)
else:
naive_forecast = _naive_2_forecasts(
split.gluonts(), meta.freq, cast(int, meta.prediction_length)
)
cache_file.parent.mkdir(parents=True, exist_ok=True)
naive_forecast.save(cache_file)
data = split.evaluation()
seasonal_error = naive_error(data.past, get_seasonality(meta.freq))
naive_mase += (
mase(naive_forecast.median, data.future, seasonal_error) * weight
)
naive_smape += smape(naive_forecast.median, data.future) * weight
actual_mase += metric[0] * weight
actual_smape += metric[1] * weight
return 0.5 * (actual_smape / naive_smape + actual_mase / naive_mase)
|
2f4d19eecf85a9be720fb705eafe83c2ac1bced1
| 3,645,225
|
def parse(cell, config):
"""Extract connection info and result variable from SQL
Please don't add any more syntax requiring
special parsing.
Instead, add @arguments to SqlMagic.execute.
We're grandfathering the
connection string and `<<` operator in.
"""
result = {"connection": "", "sql": "", "result_var": None}
pieces = cell.split(None, 3)
if not pieces:
return result
result["connection"] = _connection_string(pieces[0], config)
if result["connection"]:
pieces.pop(0)
if len(pieces) > 1 and pieces[1] == "<<":
result["result_var"] = pieces.pop(0)
pieces.pop(0) # discard << operator
result["sql"] = (" ".join(pieces)).strip()
return result
|
4711b5f873281db520ff4d91646412ca08f7cbb7
| 3,645,226
|
import requests
import json
def createResource(url, user, pWd, resourceName, resourceJson):
"""
create a new resource based on the provided JSON
returns rc=200 (valid) & other rc's from the put
resourceDef (json)
"""
# create a new resource
apiURL = url + "/access/1/catalog/resources/"
header = {"content-type": "application/json"}
print("\tcreating resource: " + resourceName)
newResourceResp = requests.post(
apiURL,
data=json.dumps(resourceJson),
headers=header,
auth=HTTPBasicAuth(user, pWd),
verify=False,
)
print("\trc=" + str(newResourceResp.status_code))
print("\tbody=" + str(newResourceResp.text))
return newResourceResp.status_code
|
71257041a7bf098edd0668de6026539e554baff4
| 3,645,227
|
import string
def generate_invalid_sequence():
"""Generates an invalid sequence of length 10"""
return ''.join(np.random.choice(list(string.ascii_uppercase + string.digits), size=10))
|
33639bc0c97710c411b2bfd0033ed15200c8edff
| 3,645,228
|
from input_surface import circle
def transform_bcs_profile(T, axis, Nc):
""" Translates the profile to body cs and then transforms it for rotation.
"""
profile = circle(Nc, radius = 1, flag = 0)
Pb_new = np.zeros((Nc, 3), dtype = float)
ind_p = np.arange(0, 3*Nc, step = 3, dtype = int)
p_new = np.zeros(3*Nc, dtype = float)
p_new[ind_p] = profile[:, 0] + axis[0]
p_new[ind_p + 1] = profile[:, 1] + axis[1]
p_new[ind_p + 2] = axis[2]
P_new = np.dot(T.toarray(), p_new)
Pb_new[:, 0] = P_new[ind_p]
Pb_new[:, 1] = P_new[ind_p + 1]
Pb_new[:, 2] = P_new[ind_p + 2]
return Pb_new
|
c3cacd480cba73c7a3ce1b6f5e90582fc93e2a4b
| 3,645,229
|
def count_configuration(config, root=True, num_samples_per_dist=1):
"""Recursively count configuration."""
count = 1
if isinstance(config, dict):
for _, v in sorted(config.items()):
count *= count_configuration(
v, root=False, num_samples_per_dist=num_samples_per_dist)
elif callable(config):
assert num_samples_per_dist > 0, ('callable not allowed in config with '
'num_samples_per_dist < 1')
count *= num_samples_per_dist
elif isinstance(config, list):
if root:
count = ()
for c in config:
count += (count_configuration(
c, root=False, num_samples_per_dist=num_samples_per_dist),)
else:
count *= len(config)
return count
|
f7709bfd18744355f5739c4d0e4b52b952f6c8c7
| 3,645,230
|
def transition(state_table):
"""Decorator used to set up methods which cause transitions between states.
The decorator is applied to methods of the context (state machine) class.
Invoking the method may cause a transition to another state. To define
what the transitions are, the nextStates method of the TransitionTable class
is used.
"""
stVarName = state_table.inst_state_name
def wrapper(func):
state_table._addEventHandler(func.__name__)
@wraps(func)
def objCall(self, *args, **kwargs):
state_var = getattr(self, stVarName)
state_var.setXition(func)
rtn = func(self, *args, **kwargs)
state_var.toNextState(self)
return rtn
objCall.wrapping = stVarName
return objCall
return wrapper
|
675a6afe6abd068027892be561c2d032d13be52a
| 3,645,231
|
from typing import Union
def isfinite(x: Union[ivy.Array, ivy.NativeArray], f: ivy.Framework = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Tests each element x_i of the input array x to determine if finite (i.e., not NaN and not equal to positive
or negative infinity).
:param x: Input array.
:type x: array
:param f: Machine learning framework. Inferred from inputs if None.
:type f: ml_framework, optional
:return: an array containing test results. An element out_i is True if x_i is finite and False otherwise.
The returned array must have a data type of bool.
"""
return _cur_framework(x, f=f).isfinite(x)
|
7326343319dae8bb8681a9612caaaddfc947f8c1
| 3,645,232
|
from pathlib import Path
from typing import Optional
def form_overwrite_file(
PATH: Path, QUESTION: Optional[str] = None, DEFAULT_NO: bool = True
) -> bool:
"""Yes/no form to ask whether file should be overwritten if already existing."""
if QUESTION is None:
QUESTION = "Overwrite {PATH}?"
save = True
if PATH.is_file():
save = form_yes_or_no(QUESTION, DEFAULT_NO=DEFAULT_NO)
return save
|
51de7eb948af9e0b6cd354e8e72815e16955200c
| 3,645,233
|
def dist(integer):
"""
Return the distance from center.
"""
if integer == 1:
return 0
c = which_layer(integer)
rows = layer_rows(c)
l = len(rows[0])
mid = (l / 2) - 1
for r in rows:
if integer in r:
list_pos = r.index(integer)
return c + abs(mid - list_pos) - 1
|
8fd27978058ac0d038836bd88dc2c7c590fec6b7
| 3,645,234
|
from datetime import datetime
def get_market_fundamental_by_ticker(date: str, market: str="KOSPI", prev=False) -> DataFrame:
"""특정 일자의 전종목 PER/PBR/배당수익률 조회
Args:
date (str ): 조회 일자 (YYMMDD)
market (str, optional): 조회 시장 (KOSPI/KOSDAQ/KONEX/ALL)
prev (bool, optional): 조회 일자가 휴일일 경우 이전 영업일 혹은 이후 영업일 선택
Returns:
DataFrame:
>> get_market_fundamental_by_ticker("20210104")
BPS PER PBR EPS DIV DPS
티커
095570 6802 4.660156 0.669922 982 6.550781 300
006840 62448 11.648438 0.399902 2168 2.970703 750
027410 15699 17.765625 0.320068 281 2.199219 110
282330 36022 15.062500 3.660156 8763 2.050781 2700
138930 25415 3.380859 0.219971 1647 6.468750 360
"""
if isinstance(date, datetime.datetime):
date = _datetime2string(date)
date = date.replace("-", "")
df = krx.get_market_fundamental_by_ticker(date, market)
holiday = (df[['BPS', 'PER', 'PBR', 'EPS', 'DIV', 'DPS']] == 0).all(axis=None)
if holiday:
target_date = get_nearest_business_day_in_a_week(date=date, prev=prev)
df = krx.get_market_fundamental_by_ticker(target_date, market)
# print(f"The date you entered {date} seems to be a holiday. PYKRX changes the date parameter to {target_date}.")
return df
|
ac13ef09867b69f354b75f1e1bd98f46baf995fc
| 3,645,235
|
def get_all(request):
""" Gets all tags in the db with counts of use """
tags = []
for tag in Tag.objects.all():
tag_data = {
'name': tag.name,
'count': tag.facebookimage_set.distinct().count()
}
if tag_data['count'] > 0:
tags.append(tag_data)
return JsonResponse({'data': tags})
|
bee0d6afd4ea8afeda0001d090cf0d9156249cce
| 3,645,236
|
def quadratic_crop(x, bbox, alpha=1.0):
"""bbox is xmin, ymin, xmax, ymax"""
im_h, im_w = x.shape[:2]
bbox = np.array(bbox, dtype=np.float32)
bbox = np.clip(bbox, 0, max(im_h, im_w))
center = 0.5 * (bbox[0] + bbox[2]), 0.5 * (bbox[1] + bbox[3])
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
l = int(alpha * max(w, h))
l = max(l, 2)
required_padding = -1 * min(center[0] - l, center[1] - l, im_w -
(center[0] + l), im_h - (center[1] + l))
required_padding = int(np.ceil(required_padding))
if required_padding > 0:
padding = [
[required_padding, required_padding],
[required_padding, required_padding],
]
padding += [[0, 0]] * (len(x.shape) - 2)
x = np.pad(x, padding, "reflect")
center = center[0] + required_padding, center[1] + required_padding
xmin = int(center[0] - l / 2)
ymin = int(center[1] - l / 2)
return np.array(x[ymin:ymin + l, xmin:xmin + l, ...])
|
53e9acf58cf743a89a4bfaafb9211abbbb9d57ec
| 3,645,237
|
def cdlbreakaway(
client,
symbol,
timeframe="6m",
opencol="open",
highcol="high",
lowcol="low",
closecol="close",
):
"""This will return a dataframe of breakaway for the given symbol across
the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
opencol (string): column to use to calculate
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
val = t.CDLBREAKAWAY(
df[opencol].values.astype(float),
df[highcol].values.astype(float),
df[lowcol].values.astype(float),
df[closecol].values.astype(float),
)
return pd.DataFrame(
{
opencol: df[opencol].values,
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
"cdlbreakaway": val,
}
)
|
c8bbc5adbbf742daabe39feaaa2dec01790297fe
| 3,645,238
|
def depends_on(*args):
"""Caches a `Model` parameter based on its dependencies.
Example
-------
>>> @property
>>> @depends_on('x', 'y')
>>> def param(self):
>>> return self.x * self.y
Parameters
----------
args : list of str
List of parameters this parameter depends on.
"""
cache = {}
def _wrapper(fn):
def _fn(self):
key = tuple(getattr(self, arg) for arg in args)
if key not in cache:
cache[key] = fn(self)
return cache[key]
return _fn
return _wrapper
|
09cdb0ad7601a953eafd01e3e19c0bdfb10dccb2
| 3,645,239
|
def l96(x, t, f):
""""This describes the derivative for the non-linear Lorenz 96 Model of arbitrary dimension n.
This will take the state vector x and return the equation for dxdt"""
# shift minus and plus indices
x_m_2 = np.concatenate([x[-2:], x[:-2]])
x_m_1 = np.concatenate([x[-1:], x[:-1]])
x_p_1 = np.append(x[1:], x[0])
dxdt = (x_p_1-x_m_2)*x_m_1 - x + f
return dxdt
|
0db03ed3a8923d18b50095852d17f9213e9f1f0f
| 3,645,240
|
def translation(im0, im1, filter_pcorr=0, odds=1, constraints=None,
reports=None):
"""
Return translation vector to register images.
It tells how to translate the im1 to get im0.
Args:
im0 (2D numpy array): The first (template) image
im1 (2D numpy array): The second (subject) image
filter_pcorr (int): Radius of the minimum spectrum filter
for translation detection, use the filter when detection fails.
Values > 3 are likely not useful.
constraints (dict or None): Specify preference of seeked values.
For more detailed documentation, refer to :func:`similarity`.
The only difference is that here, only keys ``tx`` and/or ``ty``
(i.e. both or any of them or none of them) are used.
odds (float): The greater the odds are, the higher is the preferrence
of the angle + 180 over the original angle. Odds of -1 are the same
as inifinity.
The value 1 is neutral, the converse of 2 is 1 / 2 etc.
Returns:
dict: Contains following keys: ``angle``, ``tvec`` (Y, X),
and ``success``.
"""
angle = 0
report_one = report_two = None
if reports is not None and reports.show("translation"):
report_one = reports.copy_empty()
report_two = reports.copy_empty()
# We estimate translation for the original image...
tvec, succ = _translation(im0, im1, filter_pcorr, constraints, report_one)
# ... and for the 180-degrees rotated image (the rotation estimation
# doesn't distinguish rotation of x vs x + 180deg).
tvec2, succ2 = _translation(im0, utils.rot180(im1), filter_pcorr,
constraints, report_two)
pick_rotated = False
if succ2 * odds > succ or odds == -1:
pick_rotated = True
if reports is not None and reports.show("translation"):
reports["t0-orig"] = report_one["amt-orig"]
reports["t0-postproc"] = report_one["amt-postproc"]
reports["t0-success"] = succ
reports["t0-tvec"] = tuple(tvec)
reports["t1-orig"] = report_two["amt-orig"]
reports["t1-postproc"] = report_two["amt-postproc"]
reports["t1-success"] = succ2
reports["t1-tvec"] = tuple(tvec2)
if reports is not None and reports.show("transformed"):
toapp = [
transform_img(utils.rot180(im1), tvec=tvec2, mode="wrap", order=3),
transform_img(im1, tvec=tvec, mode="wrap", order=3),
]
if pick_rotated:
toapp = toapp[::-1]
reports["after_tform"].extend(toapp)
if pick_rotated:
tvec = tvec2
succ = succ2
angle += 180
ret = dict(tvec=tvec, success=succ, angle=angle)
return ret
|
40e15b6154569d6bb13c7a38d595b603e9421d04
| 3,645,241
|
def CalculateConjointTriad(proteinsequence):
"""
Calculate the conjoint triad features from protein sequence.
Useage:
res = CalculateConjointTriad(protein)
Input: protein is a pure protein sequence.
Output is a dict form containing all 343 conjoint triad features.
"""
res = {}
proteinnum = _Str2Num(proteinsequence)
for i in range(8):
for j in range(8):
for k in range(8):
temp = str(i) + str(j) + str(k)
res[temp] = proteinnum.count(temp)
return res
|
1ed73c1aa78c5360715eb71c9d56594d028cf6d3
| 3,645,242
|
from typing import Type
from typing import Dict
from typing import Optional
from typing import List
import argparse
from typing import Union
from typing import get_type_hints
import os
def _create(
*,
cls: Type[THparams],
data: Dict[str, JSON],
parsed_args: Dict[str, str],
cli_args: Optional[List[str]],
prefix: List[str],
argparse_name_registry: ArgparseNameRegistry,
argparsers: List[argparse.ArgumentParser],
) -> THparams:
"""Helper method to recursively create an instance of the :class:`~yahp.hparams.Hparams`.
Args:
data (Dict[str, JSON]):
A JSON dictionary of values to use to initialize the class.
parsed_args (Dict[str, str]):
Parsed args for this class.
cli_args (Optional[List[str]]):
A list of cli args. This list is modified in-place,
and all used arguments are removed from the list.
Should be None if no cli args are to be used.
prefix (List[str]):
The prefix corresponding to the subset of ``cli_args``
that should be used to instantiate this class.
argparse_name_registry (_ArgparseNameRegistry):
A registry to track CLI argument names.
argparsers (List[argparse.ArgumentParser]):
A list of :class:`~argparse.ArgumentParser` instances,
which is extended in-place.
Returns:
An instance of the class.
"""
kwargs: Dict[str, HparamsField] = {}
deferred_create_calls: Dict[str, Union[_DeferredCreateCall, # singleton field
List[_DeferredCreateCall], # list field
]] = {}
# keep track of missing required fields so we can build a nice error message
missing_required_fields: List[str] = []
cls.validate_keys(list(data.keys()), allow_missing_keys=True)
field_types = get_type_hints(cls)
for f in fields(cls):
if not f.init:
continue
prefix_with_fname = list(prefix) + [f.name]
try:
ftype = HparamsType(field_types[f.name])
full_name = ".".join(prefix_with_fname)
if full_name in parsed_args and parsed_args[full_name] != MISSING:
# use CLI args first
argparse_or_yaml_value = parsed_args[full_name]
elif f.name in data:
# then use YAML
argparse_or_yaml_value = data[f.name]
elif full_name.upper() in os.environ:
# then use environment variables
argparse_or_yaml_value = os.environ[full_name.upper()]
else:
# otherwise, set it as MISSING so the default will be used
argparse_or_yaml_value = MISSING
if not ftype.is_hparams_dataclass:
if argparse_or_yaml_value == MISSING:
if not is_field_required(f):
# if it's a primitive and there's a default value,
# then convert and use it.
# Sometimes primitives will not have correct default values
# (e.g. type is float, but the default is an int)
kwargs[f.name] = ftype.convert(get_default_value(f), full_name)
else:
kwargs[f.name] = ftype.convert(argparse_or_yaml_value, full_name)
else:
if f.name not in cls.hparams_registry:
# concrete, singleton hparams
# list of concrete hparams
# potentially none
if not ftype.is_list:
# concrete, singleton hparams
# potentially none
if ftype.is_optional and is_none_like(argparse_or_yaml_value, allow_list=ftype.is_list):
# none
kwargs[f.name] = None
else:
# concrete, singleton hparams
sub_yaml = data.get(f.name)
if sub_yaml is None:
sub_yaml = {}
if not isinstance(sub_yaml, dict):
raise ValueError(f"{full_name} must be a dict in the yaml")
deferred_create_calls[f.name] = _DeferredCreateCall(
hparams_cls=ftype.type,
data=sub_yaml,
prefix=prefix_with_fname,
parser_args=retrieve_args(cls=ftype.type,
prefix=prefix_with_fname,
argparse_name_registry=argparse_name_registry),
)
else:
# list of concrete hparams
# potentially none
# concrete lists not added to argparse, so just load the yaml
if ftype.is_optional and is_none_like(argparse_or_yaml_value, allow_list=ftype.is_list):
# none
kwargs[f.name] = None
else:
# list of concrete hparams
# concrete lists not added to argparse, so just load the yaml
sub_yaml = data.get(f.name)
if sub_yaml is None:
sub_yaml = []
if isinstance(sub_yaml, dict):
_emit_should_be_list_warning(full_name)
sub_yaml = [sub_yaml]
if not isinstance(sub_yaml, list):
raise TypeError(f"{full_name} must be a list in the yaml")
deferred_calls: List[_DeferredCreateCall] = []
for (i, sub_yaml_item) in enumerate(sub_yaml):
if sub_yaml_item is None:
sub_yaml_item = {}
if not isinstance(sub_yaml_item, dict):
raise TypeError(f"{full_name} must be a dict in the yaml")
deferred_calls.append(
_DeferredCreateCall(
hparams_cls=ftype.type,
data=sub_yaml_item,
prefix=prefix_with_fname + [str(i)],
parser_args=None,
))
deferred_create_calls[f.name] = deferred_calls
else:
# abstract, singleton hparams
# list of abstract hparams
# potentially none
if not ftype.is_list:
# abstract, singleton hparams
# potentially none
if ftype.is_optional and is_none_like(argparse_or_yaml_value, allow_list=ftype.is_list):
# none
kwargs[f.name] = None
else:
# abstract, singleton hparams
# look up type in the registry
# should only have one key in the dict
# argparse_or_yaml_value is a str if argparse, or a dict if yaml
if argparse_or_yaml_value == MISSING:
# use the hparams default
continue
if argparse_or_yaml_value is None:
raise ValueError(f"Field {full_name} is required and cannot be None.")
if isinstance(argparse_or_yaml_value, str):
key = argparse_or_yaml_value
else:
if not isinstance(argparse_or_yaml_value, dict):
raise ValueError(
f"Field {full_name} must be a dict with just one key if specified in the yaml")
try:
key, _ = extract_only_item_from_dict(argparse_or_yaml_value)
except ValueError as e:
raise ValueError(f"Field {full_name} " + e.args[0])
yaml_val = data.get(f.name)
if yaml_val is None:
yaml_val = {}
if not isinstance(yaml_val, dict):
raise ValueError(
f"Field {'.'.join(prefix_with_fname)} must be a dict if specified in the yaml")
yaml_val = yaml_val.get(key)
if yaml_val is None:
yaml_val = {}
if not isinstance(yaml_val, dict):
raise ValueError(
f"Field {'.'.join(prefix_with_fname + [key])} must be a dict if specified in the yaml"
)
deferred_create_calls[f.name] = _DeferredCreateCall(
hparams_cls=cls.hparams_registry[f.name][key],
prefix=prefix_with_fname + [key],
data=yaml_val,
parser_args=retrieve_args(cls=cls.hparams_registry[f.name][key],
prefix=prefix_with_fname + [key],
argparse_name_registry=argparse_name_registry),
)
else:
# list of abstract hparams
# potentially none
if ftype.is_optional and is_none_like(argparse_or_yaml_value, allow_list=ftype.is_list):
# none
kwargs[f.name] = None
else:
# list of abstract hparams
# argparse_or_yaml_value is a List[str] if argparse, or a List[Dict[str, Hparams]] if yaml
if argparse_or_yaml_value == MISSING:
# use the hparams default
continue
# First get the keys
# Argparse has precidence. If there are keys defined in argparse, use only those
# These keys will determine what is loaded
if argparse_or_yaml_value is None:
raise ValueError(f"Field {full_name} is required and cannot be None.")
if isinstance(argparse_or_yaml_value, dict):
_emit_should_be_list_warning(full_name)
argparse_or_yaml_value = [argparse_or_yaml_value]
if not isinstance(argparse_or_yaml_value, list):
raise ValueError(f"Field {full_name} should be a list")
keys: List[str] = []
for item in argparse_or_yaml_value:
if isinstance(item, str):
keys.append(item)
else:
if not isinstance(item, dict):
raise ValueError(f"Field {full_name} should be a list of dicts in the yaml")
key, _ = extract_only_item_from_dict(item)
keys.append(key)
key = argparse_or_yaml_value
# Now, load the values for these keys
yaml_val = data.get(f.name)
if yaml_val is None:
yaml_val = []
if isinstance(yaml_val, dict):
# already emitted the warning, no need to do it again
yaml_val = [yaml_val]
if not isinstance(yaml_val, list):
raise ValueError(
f"Field {'.'.join(prefix_with_fname)} must be a list if specified in the yaml")
# Convert the yaml list to a dict
yaml_dict: Dict[str, Dict[str, JSON]] = {}
for i, yaml_val_entry in enumerate(yaml_val):
if not isinstance(yaml_val_entry, dict):
raise ValueError(
f"Field {'.'.join(list(prefix_with_fname) + [str(i)])} must be a dict if specified in the yaml"
)
k, v = extract_only_item_from_dict(yaml_val_entry)
if not isinstance(v, dict):
raise ValueError(
f"Field {'.'.join(list(prefix_with_fname) + [k])} must be a dict if specified in the yaml"
)
yaml_dict[k] = v
deferred_calls: List[_DeferredCreateCall] = []
for key in keys:
# Use the order of keys
key_yaml = yaml_dict.get(key)
if key_yaml is None:
key_yaml = {}
if not isinstance(key_yaml, dict):
raise ValueError(f"Field {'.'.join(prefix_with_fname + [key])}"
"must be a dict if specified in the yaml")
deferred_calls.append(
_DeferredCreateCall(
hparams_cls=cls.hparams_registry[f.name][key],
prefix=prefix_with_fname + [key],
data=key_yaml,
parser_args=retrieve_args(
cls=cls.hparams_registry[f.name][key],
prefix=prefix_with_fname + [key],
argparse_name_registry=argparse_name_registry,
),
))
deferred_create_calls[f.name] = deferred_calls
except _MissingRequiredFieldException as e:
missing_required_fields.extend(e.args)
# continue processing the other fields and gather everything together
if cli_args is None:
for fname, create_calls in deferred_create_calls.items():
sub_hparams = [
_create(
cls=deferred_call.hparams_cls,
data=deferred_call.data,
parsed_args={},
cli_args=None,
prefix=deferred_call.prefix,
argparse_name_registry=argparse_name_registry,
argparsers=argparsers,
) for deferred_call in ensure_tuple(create_calls)
]
if isinstance(create_calls, list):
kwargs[fname] = sub_hparams
else:
kwargs[fname] = sub_hparams[0]
else:
all_args: List[ParserArgument] = []
for fname, create_calls in deferred_create_calls.items():
for create_call in ensure_tuple(create_calls):
if create_call.parser_args is not None:
all_args.extend(create_call.parser_args)
argparse_name_registry.assign_shortnames()
for fname, create_calls in deferred_create_calls.items():
# TODO parse args from
sub_hparams: List[hp.Hparams] = []
for create_call in ensure_tuple(create_calls):
prefix = create_call.prefix
if create_call.parser_args is None:
parsed_arg_dict = {}
else:
parser = argparse.ArgumentParser(add_help=False)
argparsers.append(parser)
group = parser.add_argument_group(title=".".join(prefix),
description=create_call.hparams_cls.__name__)
for args in create_call.parser_args:
for arg in ensure_tuple(args):
arg.add_to_argparse(group)
parsed_arg_namespace, cli_args[:] = parser.parse_known_args(cli_args)
parsed_arg_dict = vars(parsed_arg_namespace)
sub_hparams.append(
_create(
cls=create_call.hparams_cls,
data=create_call.data,
parsed_args=parsed_arg_dict,
cli_args=cli_args,
prefix=prefix,
argparse_name_registry=argparse_name_registry,
argparsers=argparsers,
))
if isinstance(create_calls, list):
kwargs[fname] = sub_hparams
else:
kwargs[fname] = sub_hparams[0]
for f in fields(cls):
if not f.init:
continue
prefix_with_fname = ".".join(list(prefix) + [f.name])
if f.name not in kwargs:
if f.default == MISSING and f.default_factory == MISSING:
missing_required_fields.append(prefix_with_fname)
# else:
# warnings.warn(f"DefaultValueWarning: Using default value for {prefix_with_fname}. "
# "Using default values is not recommended as they may change between versions.")
if len(missing_required_fields) > 0:
# if there are any missing fields from this class, or optional but partially-filled-in subclasses,
# then propegate back the missing fields
raise _MissingRequiredFieldException(*missing_required_fields)
return cls(**kwargs)
|
db6a407a1e9b0fdc943bf7851b00f28fcd7aed47
| 3,645,243
|
def extract_url(url):
"""Creates a short version of the URL to work with. Also returns None if its not a valid adress.
Args:
url (str): The long version of the URL to shorten
Returns:
str: The short version of the URL
"""
if url.find("www.amazon.de") != -1:
index = url.find("/dp/")
if index != -1:
index2 = index + 14
url = "https://www.amazon.de" + url[index:index2]
else:
index = url.find("/gp/")
if index != -1:
index2 = index + 22
url = "https://www.amazon.de" + url[index:index2]
else:
url = None
else:
url = None
return url
|
85421799b601c89aa54fdce6e98c003ea80111eb
| 3,645,244
|
import unicodedata
from typing import List
def match_name(own: str, other: str) -> bool:
"""
compares 2 medic names (respects missing middle names, or abbrev. name parts)
Args:
own: the first name
other: the last name
Returns: True if both names match
"""
# the simplest case, both name match completely
if own is None or other is None:
return True
own = unicodedata.normalize('NFKD', _remove_umlaut(own)).encode('ASCII', 'ignore').decode("utf-8").lower()
other = unicodedata.normalize('NFKD', _remove_umlaut(other)).encode('ASCII', 'ignore').decode("utf-8").lower()
if own == other:
return True
hn_other = parse_name(other)
hn_own = parse_name(own)
def _remove_surname_titles(surnames: List[str]) -> List[str]:
def _remove(s: str) -> str:
for t in _surname_titles:
s = s.replace(t, "")
return s
return list(map(_remove, surnames))
if hn_own is None or hn_other is None:
return False
# remove surname titles like "von" from surnames
hn_other.last_list = _remove_surname_titles(hn_other.last_list)
hn_other.last = " ".join(hn_other.last_list)
hn_own.last_list = _remove_surname_titles(hn_own.last_list)
hn_own.last = " ".join(hn_own.last_list)
# if the last names doesnt match, we skip here
own_lasts = " ".join([on.lower() for on in hn_own.last_list])
other_lasts = " ".join([on.lower() for on in hn_other.last_list])
# compound surnames
if "-" in own_lasts or "-" in other_lasts:
own_lasts_splitted = own_lasts.split("-")
other_lasts_splitted = other_lasts.split("-")
matches = 0
for o in own_lasts_splitted:
for ot in other_lasts_splitted:
if o == ot or distance(o, ot) <= 1 and (len(o) >= 5 or len(ot) >= 5):
matches += 1
for o in reversed(own_lasts_splitted):
for ot in other_lasts_splitted:
if o == ot or distance(o, ot) <= 1 and (len(o) >= 5 or len(ot) >= 5):
matches += 1
if matches < 2:
return False
elif own_lasts[0] != other_lasts[0] or (own_lasts != other_lasts and distance(own_lasts, other_lasts) > 1):
return False
def _match_name_list(name: str, other: List[str]):
if name in other:
# full name match
return True
elif name.endswith(".") and name in ["{}.".format(f[0:len(name) - 1]) for f in other]:
# A. name match
return True
elif len(name) == 1 and name in [f[0] for f in other]:
# A name match
return True
return False
def _compare_names(a: List[str], b: List[str]) -> bool:
m_a = list(map(lambda n: _match_name_list(n, b), a))
m_b = list(map(lambda n: _match_name_list(n, a), b))
return m_a.count(True) >= m_a.count(False) or m_b.count(
True) >= m_b.count(False)
# check if the firstnames matches (if one side has no firstname we assume a match
first_name_matches = True if (hn_own.first == "" or hn_other.first == "") else _compare_names(hn_own.first_list,
hn_other.first_list)
own_first_middles = hn_own.first + hn_own.middle
other_first_middles = hn_other.first + hn_other.middle
# check if the firstnames+middlename matches (if one side has no firstname we assume a match
first_name_matches_fuzzy = own_first_middles.lower() == other_first_middles.lower() or (
own_first_middles.startswith(other_first_middles) or other_first_middles.startswith(own_first_middles))
if first_name_matches is False or first_name_matches_fuzzy is False:
# if the initials dont match, dont match
if (len(hn_own.first) > 0 and len(hn_own.first) > 0) and hn_own.first[0] != hn_other.first[0]:
return False
# if the names are longer than 5 and start with the same letter we allow tiny typos
l_distance = distance(hn_own.first, hn_other.first)
if l_distance < 2 and (len(hn_other.first) >= 5 or len(hn_own.first) >= 5):
first_name_matches = True
# if none has middle name its a match
if len(hn_own.middle_list) == 0 and len(hn_other.middle_list) == 0:
return first_name_matches or first_name_matches_fuzzy
# if only one side has a middle name its a match
if len(hn_own.middle_list) == 0 and len(hn_other.middle_list) > 0 or len(hn_own.middle_list) > 0 and len(
hn_other.middle_list) == 0:
return first_name_matches or first_name_matches_fuzzy
return _compare_names(hn_own.middle_list, hn_other.middle_list)
|
6987deb8695f823cd5e1e0948bd2a21dc33759bd
| 3,645,245
|
def price_setting():
""" Sets prices """
purchasing_price = float(input("enter purchasing price: "))
new_supplier = str(input("First time user(Y/N)?: ")).lower()
if new_supplier not in ['n', 'y']:
return True, {"errorMsg": f"{new_supplier} not a valid response"}, None
if new_supplier == 'y':
days_since_reg = int(input("Enter the days since you registered?: "))
if days_since_reg < 60:
list_price = purchasing_price*2
discount_percent = 0
profit_percent = ((list_price - purchasing_price)
* 100)/purchasing_price
return False, discount_percent, profit_percent
product_reg_days = int(
input("Enter the days you had registered this product?: "))
if product_reg_days < 0:
return True, {
"errorMsg": f"{product_reg_days} is not an acceptable value"
}, None
if product_reg_days > 30:
list_price = purchasing_price*2
discount_percent, profit_percent = decide_discount(
purchasing_price*2, purchasing_price)
return False, discount_percent, profit_percent
list_price = purchasing_price*2
discount_percent = 0
profit_percent = (list_price - purchasing_price)*100/purchasing_price
return False, discount_percent, profit_percent
discount_percent, profit_percent = decide_discount(
purchasing_price*2, purchasing_price)
return False, discount_percent, profit_percent
|
5d6b98b62ff7b6c8c3cc4278c3a4f2eb7c5f0f27
| 3,645,246
|
def d4_grid():
"""Test functionality of routing when D4 is specified.
The elevation field in this test looks like::
1 2 3 4 5 6 7
1 2 3 0 5 0 7
1 2 3 4 0 0 7
1 2 3 0 5 6 7
1 2 0 0 0 6 7
1 2 3 0 5 6 7
1 2 3 4 5 6 7
"""
mg1 = RasterModelGrid(7, 7, 1.)
mg2 = RasterModelGrid(7, 7, 1.)
z = mg1.node_x.copy() + 1.
lake_nodes = np.array([10, 16, 17, 18, 24, 32, 33, 38, 40])
z[lake_nodes] = 0.
mg1.add_field("node", "topographic__elevation", z, units="-")
mg2.add_field("node", "topographic__elevation", z, units="-")
frD8 = FlowRouter(mg1, method="D8")
frD4 = FlowRouter(mg2, method="D4")
lfD8 = DepressionFinderAndRouter(mg1, routing="D8")
lfD4 = DepressionFinderAndRouter(mg2, routing="D4")
class DansGrid(object):
pass
d4_grid = DansGrid()
d4_grid.mg1 = mg1
d4_grid.mg2 = mg2
d4_grid.z = z
d4_grid.lake_nodes = lake_nodes
d4_grid.frD8 = frD8
d4_grid.frD4 = frD4
d4_grid.lfD8 = lfD8
d4_grid.lfD4 = lfD4
return d4_grid
|
ed285c91cc4cda270a469a0271d1b935f1043d32
| 3,645,247
|
import inspect
def pass_complex_ins(mqc):
"""
The number of PASS complex insertions.
Source: count_variants.py (bcftools view)
"""
k = inspect.currentframe().f_code.co_name
try:
d = next(iter(mqc["multiqc_npm_count_variants"].values()))
v = d["pass_complex_ins"]
v = int(v)
except KeyError:
v = "NA"
return k, v
|
df7177b126829dca4a71252e797a2cb7d7d24ee3
| 3,645,248
|
def get_jwt():
"""
Get authorization token and validate its signature against the public key
from /.well-known/jwks endpoint
"""
expected_errors = {
KeyError: WRONG_PAYLOAD_STRUCTURE,
AssertionError: JWK_HOST_MISSING,
InvalidSignatureError: WRONG_KEY,
DecodeError: WRONG_JWT_STRUCTURE,
InvalidAudienceError: WRONG_AUDIENCE,
TypeError: KID_NOT_FOUND
}
token = get_auth_token()
try:
jwks_payload = jwt.decode(token, options={'verify_signature': False})
assert 'jwks_host' in jwks_payload
jwks_host = jwks_payload.get('jwks_host')
key = get_public_key(jwks_host, token)
aud = request.url_root
payload = jwt.decode(
token, key=key, algorithms=['RS256'], audience=[aud.rstrip('/')]
)
set_ctr_entities_limit(payload)
return payload['key']
except tuple(expected_errors) as error:
message = expected_errors[error.__class__]
raise AuthorizationError(message)
|
92b757e3fa9774ac7e93fc0f89c446efefc47b33
| 3,645,249
|
import os
def devlocation()->str:
"""
:return: 'local' or 'github
"""
return os.getenv('DEVLOCATION') or 'local'
|
f4ab9af75258f6c72786a4cfbbe2d7a7661873c0
| 3,645,250
|
def convert_categorical(df, col_old, conversion, col_new=None):
"""Convet categories"""
if col_new is None:
col_new = col_old
orig_values = df[col_old].values
good_rows = np.isin(orig_values, list(conversion))
df = df.iloc[good_rows]
orig_values = df[col_old].values
cat_values = np.zeros(len(df), dtype=int)
for src, dest in conversion.items():
cat_values[orig_values == src] = dest
df.loc[:, col_new] = cat_values
return df
|
db07bb08f302edf615cab96b04b263f66fa9b8b1
| 3,645,251
|
def _get_pipeline_configs(force=False):
"""
Connects to Shotgun and retrieves information about all projects
and all pipeline configurations in Shotgun. Adds this to the disk cache.
If a cache already exists, this is used instead of talking to Shotgun.
To force a re-cache, set the force flag to True.
Returns a complex data structure with the following fields
local_storages:
- id
- code
- windows_path
- mac_path
- linux_path
pipeline_configurations:
- id
- code
- windows_path
- linux_path
- mac_path
- project
- project.Project.tank_name
:param force: set this to true to force a cache refresh
:returns: dictionary with keys local_storages and pipeline_configurations.
"""
CACHE_KEY = "paths"
if force == False:
# try to load cache first
# if that doesn't work, fall back on shotgun
cache = _load_lookup_cache()
if cache and cache.get(CACHE_KEY):
# cache hit!
return cache.get(CACHE_KEY)
# ok, so either we are force recomputing the cache or the cache wasn't there
sg = shotgun.get_sg_connection()
# get all local storages for this site
local_storages = sg.find("LocalStorage",
[],
["id", "code", "windows_path", "mac_path", "linux_path"])
# get all pipeline configurations (and their associated projects) for this site
pipeline_configs = sg.find("PipelineConfiguration",
[["project.Project.tank_name", "is_not", None]],
["id",
"code",
"windows_path",
"linux_path",
"mac_path",
"project",
"project.Project.tank_name"])
# cache this data
data = {"local_storages": local_storages, "pipeline_configurations": pipeline_configs}
_add_to_lookup_cache(CACHE_KEY, data)
return data
|
3c504606e5a751e0015abbdcf74a3e2513d4d280
| 3,645,252
|
import sys
import argparse
def parse_cmdline(argv):
"""
Returns the parsed argument list and return code.
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
# initialize the parser object:
parser = argparse.ArgumentParser(description='Make combine output from multiple files, with a common column '
'name, printing only data from common column values. ')
parser.add_argument("-d", "--delimiter", help="The delimiter separating the file names in each row of the"
"compare_file_list. The default delimiter is '{}'.".format(DEF_DELIM),
default=DEF_DELIM)
parser.add_argument("-f", "--compare_file_list", help="The location of the file that lists the files to be "
"combined. Each row should contain a list of files to be "
"combined by aligning on the col_name. "
"The default file name is {}, located in the "
"directory where the program as run.".format(DEF_CMP_FILE),
default=DEF_CMP_FILE, type=file_rows_to_list)
parser.add_argument("-l", "--output_location", help="The location (directory) for output files. The default is the "
"directory from which the program was called.",
default=None)
parser.add_argument("-n", "--col_name", help="The common column name in the files used as the key to combine "
"files. The default file name is {}.".format(DEF_ALIGN_COL_NAME),
default=DEF_ALIGN_COL_NAME)
parser.add_argument("-o", "--out_name", help="The output file name. The default is {}.".format(DEF_OUT_FILE),
default=DEF_OUT_FILE)
parser.add_argument("-s", "--sep_out", help="A flag to specify a separate output files should be created for "
"the aligned files from each row of the compare_file_list. If this "
"is specified, the out_name will be used as a suffix. The base name "
"will be based on the common part of the names of the files to be "
"combined. If there is no common string, the output files will be "
"numbered based on their row number in the compare_file_list. Separate "
"output files will also be created if the column names from files on "
"different lines to not match.",
action='store_true')
args = None
try:
args = parser.parse_args(argv)
except IOError as e:
warning("Problems reading file:", e)
parser.print_help()
return args, IO_ERROR
except (KeyError, SystemExit) as e:
if hasattr(e, 'code') and e.code == 0:
return args, GOOD_RET
warning(e)
parser.print_help()
return args, INPUT_ERROR
return args, GOOD_RET
|
64586f30b70ca8af9c107556ffb69bcf1ffa8696
| 3,645,253
|
def info(parentwindow, message, buttons, *,
title=None, defaultbutton=None):
"""Display an information message."""
return _message('info', parentwindow, message, title, buttons,
defaultbutton)
|
060e41cde2e83bdeab3fa3147caebabb3292923a
| 3,645,254
|
import hashlib
import time
import sys
def add_user(args):
"""
Process arguments and ask user for other needed parameters in order
to add info to DB
:param args: returned object from argparse.parse_args
:return: exit code (0 on success, 1 on failure)
"""
logger = fsurfer.log.get_logger()
if args.username is None:
username = get_input("Username")
else:
username = args.username
username = username.strip()
password = get_input("password", echo=False)
if args.first_name is None:
first_name = get_input("First name")
else:
first_name = args.first_name
if args.last_name is None:
last_name = get_input("Last name")
else:
last_name = args.last_name
if args.email is None:
email = get_input("Email")
else:
email = args.email
if args.phone is None:
phone = get_input("Phone")
else:
phone = args.phone
if args.institution is None:
institution = get_input("Institution")
else:
institution = args.institution
salt = hashlib.sha256(str(time.time())).hexdigest()
password = hashlib.sha256(salt + password).hexdigest()
user_insert = "INSERT INTO freesurfer_interface.users(username," \
" first_name," \
" last_name," \
" email," \
" institution," \
" phone," \
" password," \
" salt) " \
"VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
try:
conn = fsurfer.helpers.get_db_client()
with conn.cursor() as cursor:
logger.info("Adding {0} to database".format(username))
cursor.execute(user_insert, (username,
first_name,
last_name,
email,
institution,
phone,
password,
salt))
if cursor.rowcount != 1:
sys.stderr.write("{0}".format(cursor.statusmessage))
logger.error("Encountered error while adding" +
"user {0}: {1}".format(username, cursor.statusmessage))
return 1
logger.info("User {0} added".format(username))
conn.commit()
conn.close()
return 0
except Exception as e:
sys.stderr.write("Got exception: {0}".format(e))
logger.exception("Got exception: {0}".format(e))
return 1
|
eac6dc03bb65ce5243f0c3117e65ff969efe4020
| 3,645,255
|
def gru(xs, lengths, init_hidden, params):
"""RNN with GRU. Based on https://github.com/google/jax/pull/2298"""
def apply_fun_single(state, inputs):
i, x = inputs
inp_update = jnp.matmul(x, params["update_in"])
hidden_update = jnp.dot(state, params["update_weight"])
update_gate = nn.sigmoid(inp_update + hidden_update)
reset_gate = nn.sigmoid(
jnp.matmul(x, params["reset_in"]) + jnp.dot(state, params["reset_weight"])
)
output_gate = update_gate * state + (1 - update_gate) * jnp.tanh(
jnp.matmul(x, params["out_in"])
+ jnp.dot(reset_gate * state, params["out_weight"])
)
hidden = jnp.where((i < lengths)[:, None], output_gate, jnp.zeros_like(state))
return hidden, hidden
init_hidden = jnp.broadcast_to(init_hidden, (xs.shape[1], init_hidden.shape[1]))
return jax.lax.scan(apply_fun_single, init_hidden, (jnp.arange(xs.shape[0]), xs))
|
76c4ca1f90ba5cefc4227197d70c93c358d5f1d1
| 3,645,256
|
def get_strings_in_flattened_sequence(p):
"""
Traverses nested sequence and for each element, returns first string encountered
"""
if p is None:
return []
#
# string is returned as list of single string
#
if isinstance(p, path_str_type):
return [p]
#
# Get all strings flattened into list
#
return get_strings_in_flattened_sequence_aux(p)
|
3de6829386d7877b745277cae88e3b3e6ac889a3
| 3,645,257
|
def kansuji2arabic(string, sep=False):
"""漢数字をアラビア数字に変換"""
def _transvalue(sj, re_obj=re_kunit, transdic=TRANSUNIT):
unit = 1
result = 0
for piece in reversed(re_obj.findall(sj)):
if piece in transdic:
if unit > 1:
result += unit
unit = transdic[piece]
else:
val = int(piece) if piece.isdecimal() else _transvalue(piece)
result += val * unit
unit = 1
if unit > 1:
result += unit
return result
transuji = string.translate(tt_ksuji)
for suji in sorted(set(re_suji.findall(transuji)), key=lambda s: len(s),
reverse=True):
if not suji.isdecimal():
arabic = _transvalue(suji, re_manshin, TRANSMANS)
arabic = '{:,}'.format(arabic) if sep else str(arabic)
transuji = transuji.replace(suji, arabic)
return transuji
|
4787618585baf660164d1c676ac7cae2750fe239
| 3,645,258
|
def get_image(member_status=None, most_recent=None, name=None, owner=None, properties=None, region=None, size_max=None, size_min=None, sort_direction=None, sort_key=None, tag=None, visibility=None):
"""
Use this data source to get the ID of an available OpenStack image.
"""
__args__ = dict()
__args__['memberStatus'] = member_status
__args__['mostRecent'] = most_recent
__args__['name'] = name
__args__['owner'] = owner
__args__['properties'] = properties
__args__['region'] = region
__args__['sizeMax'] = size_max
__args__['sizeMin'] = size_min
__args__['sortDirection'] = sort_direction
__args__['sortKey'] = sort_key
__args__['tag'] = tag
__args__['visibility'] = visibility
__ret__ = pulumi.runtime.invoke('openstack:images/getImage:getImage', __args__)
return GetImageResult(
checksum=__ret__.get('checksum'),
container_format=__ret__.get('containerFormat'),
disk_format=__ret__.get('diskFormat'),
file=__ret__.get('file'),
metadata=__ret__.get('metadata'),
min_disk_gb=__ret__.get('minDiskGb'),
min_ram_mb=__ret__.get('minRamMb'),
protected=__ret__.get('protected'),
region=__ret__.get('region'),
schema=__ret__.get('schema'),
size_bytes=__ret__.get('sizeBytes'),
updated_at=__ret__.get('updatedAt'),
id=__ret__.get('id'))
|
60fc60c558ac3c2e60fcdb73bf72a9d2bbc20855
| 3,645,259
|
def sort_course_dicts(courses):
""" Sorts course dictionaries
@courses: iterable object containing dictionaries representing courses.
Each course must have a course_number and abbreviation key
@return: returns a new list containing the given courses, in naturally sorted order.
"""
detailed_courses = [{
"course": course,
"numeric_course_number": int(extract_numeric_component(course["course_number"])),
"prefix": extract_prefix(course["course_number"]),
"suffix": extract_suffix(course["course_number"])
} for course in courses]
detailed_courses.sort(key=lambda course: course["suffix"])
detailed_courses.sort(key=lambda course: course["prefix"])
detailed_courses.sort(key=lambda course: course["numeric_course_number"])
detailed_courses.sort(key=lambda course: course["course"]["abbreviation"])
return [course_detail["course"] for course_detail in detailed_courses]
|
f715cf1db4c77bd3412290bc870731e0d923871b
| 3,645,260
|
import glob
import re
import numpy
def read_folder(filepath):
"""
Reads multiple image files from a folder and returns the resulting stack.
To find the images in the right order, a regex is used which will search
for files with the following pattern:
[prefix]_p[Nr][suffix]. The start number doesn't need to be 0.
The files are sorted with a natural sort, meaning that files like
0002, 1, 004, 3 will be sorted as 1, 0002, 3, 004.
The follwing regex is used to find the measurements:
".*_+p[0-9]+_?.*\.(tif{1,2}|jpe*g|nii|h5|png)"
Supported file formats for the image file equal the supported formats of
SLIX.imread.
Args:
filepath: Path to folder
Returns:
numpy.array: Image with shape [x, y, z] where [x, y] is the size
of a single image and z specifies the number of measurements
"""
files_in_folder = glob.glob(filepath + '/*')
matching_files = []
for file in files_in_folder:
if re.match(_fileregex, file) is not None:
matching_files.append(file)
matching_files.sort(key=__natural_sort_filenames_key)
image = None
# Check if files contain the needed regex for our measurements
for file in matching_files:
measurement_image = imread(file)
if image is None:
image = measurement_image
elif len(image.shape) == 2:
image = numpy.stack((image, measurement_image), axis=-1)
else:
image = numpy.concatenate((image,
measurement_image
[:, :, numpy.newaxis]), axis=-1)
return image
|
d50dc5ef09931b7950c91b5ea2f07eaa0d90cba1
| 3,645,261
|
from presqt.targets.zenodo.utilities.helpers.get_zenodo_children import zenodo_get_children
def zenodo_fetch_resource_helper(zenodo_project, resource_id, is_record=False, is_file=False):
"""
Takes a Zenodo deposition/record and builds a Zenodo PresQT resource.
Parameters
----------
zenodo_project : dict
The requested Zenodo project.
is_record : boolean
Flag for if the resource is a published record
is_file : boolean
Flag for if the resource is a file
Returns
-------
PresQT Zenodo Resource (dict).
"""
identifier = None
if is_file is False:
if is_record is True:
kind_name = zenodo_project['metadata']['resource_type']['type']
date_modified = zenodo_project['updated']
identifier = zenodo_project['doi']
else:
kind_name = zenodo_project['metadata']['upload_type']
date_modified = zenodo_project['modified']
kind = 'container'
title = zenodo_project['metadata']['title']
hashes = {}
extra = {}
for key, value in zenodo_project['metadata'].items():
if key != 'doi':
extra[key] = value
children = zenodo_get_children(zenodo_project, resource_id, is_record)
else:
kind = 'item'
kind_name = 'file'
title = zenodo_project['key']
date_modified = zenodo_project['updated']
hashes = {'md5': zenodo_project['checksum'].partition(':')[2]}
extra = {}
children = []
return {
"kind": kind,
"kind_name": kind_name,
"id": resource_id,
"identifier": identifier,
"title": title,
"date_created": zenodo_project['created'],
"date_modified": date_modified,
"hashes": hashes,
"extra": extra,
"children": children}
|
8427456ea648d1a5f4b5a0ee3baffc28649184aa
| 3,645,262
|
import json
def add_role_menu(request):
"""菜单授权"""
menu_nums = request.POST.get("node_id_json")
role_id = request.POST.get("role_id")
role_obj = auth_db.Role.objects.get(id=role_id)
menu_nums = json.loads(menu_nums)
role_obj.menu.clear()
for i in menu_nums:
menu_obj = auth_db.Menus.objects.get(menu_num=i)
role_obj.menu.add(menu_obj)
data = "授权已更新,重新登录即生效!"
return HttpResponse(data)
|
31b8d2eb62ad105c4e44f6af7fa75cde2746d2f0
| 3,645,263
|
import os
def repo_path():
"""
little function to help resolve location of doctest_files back in repository
:return: the absolute path to the root of the repository.
"""
return os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
2fc1263d0c6c68e6f2e11c97d55fe9f539fb1b70
| 3,645,264
|
def oauth_url(auth_base, country, language):
"""Construct the URL for users to log in (in a browser) to start an
authenticated session.
"""
url = urljoin(auth_base, 'login/sign_in')
query = urlencode({
'country': country,
'language': language,
'svcCode': SVC_CODE,
'authSvr': 'oauth2',
'client_id': CLIENT_ID,
'division': 'ha',
'grant_type': 'password',
})
return '{}?{}'.format(url, query)
|
d932d161ead93510e2a4b05c20f87982c726e158
| 3,645,265
|
def teammsg(self: Client, message: str) -> str:
"""Sends a team message."""
return self.run('teammsg', message)
|
97a3a9c99af17fcf183d72158ec5dc9b5ad3689d
| 3,645,266
|
def config_section(section):
"""
This configures a specific section of the configuration file
"""
# get a handle on the configuration reader
config_reader = get_config_reader()
# Find the group that this section belongs to.
# When we're in a namespaced section, we'll be in the group that the namespace belongs to.
current_group = None
is_child = False
for group in configuration_groups:
if section == configuration_groups[group]["section"]:
current_group = group
break
else:
# Check to see if our section is a child of the group
if configuration_groups[group]["type"] == 'multi':
# We want to know if the namespace of that group matches.
namespace = configuration_groups[group]["namespace"]
if section.startswith(namespace+"."):
current_group = group
is_child = True
break
# If this search returned nobody, this section cannot be configured through the web interface.
if current_group == None:
abort(404)
config_options_raw = {}
if config_reader.has_section(section):
config_options_raw = dict(config_reader.items(section))
cgroupdict = configuration_groups[current_group]
sidebar = get_sidebar()
# we're going to filter the raw items handed to the template from the
# configuration files. This makes it harder for the generic template to
# mess something up unintentionally.
config_options_cooked = {}
for config_key in config_options_raw.keys():
if config_key == "type" or config_key == "ui_label":
continue
else:
config_options_cooked[config_key] = config_options_raw[config_key]
# now, config_items_cooked has our filtered keys.
# TODO: Make the above easier to extend
# walk through templates and render them in order of likelyhood
# there are a set of templates that need to be considered here:
# in order of specificity,
#
# if we are looking at a wireless interface (interface.wlan0),
# we want to have each of the following templates considered:
#
# * cfg_interface_wlan0.html
# * cfg_interface_wireless.html
# * cfg_interface.html
# * cfg_generic.html
#
# for some types, we need the sequence to be a little different
# such as in the case of services.
#
# * cfg_service_ssh.html
# * cfg_service.html
# * cfg_generic.html
#
# We do this by replacing any .'s with _ first, then using that.
# The namespace of the selected group is then checked:
#
# cfg_{namespace}_{type}
# cfg_{namespace}
# cfg_generic
#
#
# This means that the best way of handling this is to use the
# following hierarchy:
#
# cfg_{section}.html -- the most specific
# cfg_{namespace}_{type}.html -- Not as specific but works for interface.
# cfg_{namespace}.html -- A nice fallback for services.
# cfg_generic.html -- Fallback for all others.
#
# this means that service.ssh will default use the template
# cfg_service_ssh
# Then will use `cfg_service.html` (which should exist.) then
# as a worst case scenario will fall back on `cfg_generic.html`
#
templates = []
# check if we were requested to show key-value stores.
#
if 'notemplate' in request.args and request.args['notemplate'] == 'true':
templates = ['cfg_generic.html']
else:
templates.append('cfg_{0}.html'.format(section.replace('.','_')))
if 'type' in config_options_raw:
# This only works on some sections as a section is responsible for
# declaring its type.
# This means we need to check. This also is only really relevant to the
# multi section type of group, but this does make things simple.
templates.append(
'cfg_{0}_{1}.html'.format(
cgroupdict['namespace'],
config_options_raw['type']
)
)
if 'namespace' in cgroupdict:
templates.append('cfg_{0}.html'.format(cgroupdict['namespace']))
templates.append('cfg_generic.html')
try:
return render_template(list(map(lambda l:"config/"+l, templates)),
sidebar=sidebar,
current_group=current_group,
current_section=section,
values=config_options_cooked,
title="Admin: {0} ({1})".format(cgroupdict['short'],section)
)
except Exception as e:
if app.debug:
raise e
abort(500)
|
93bc61bfc400c8da4e8d677954c6fdb18ae0d959
| 3,645,267
|
def h_html_footnote(e, doc):
"""Handle footnotes with bigfoot"""
if not isinstance(e, pf.Note) or doc.format != "html":
return None
htmlref = rf'<sup id="fnref:{doc.footnotecounter}"><a href="#fn:{doc.footnotecounter}" rel="footnote">{doc.footnotecounter}</a></sup>'
htmlcontent_before = rf'<li class="footnote" id="fn:{doc.footnotecounter}"><p>'
htmlcontent_after = rf'<a href="#fnref:{doc.footnotecounter}" title="return to article"> ↩</a><p></li>'
doc.footnotecounter += 1
conts = pf.Div(*e.content)
doc.footnotecontents += (
[pf.RawBlock(htmlcontent_before, format="html")]
+ [conts]
+ [pf.RawBlock(htmlcontent_after, format="html")]
)
return pf.RawInline(htmlref, format="html")
|
462f4886cc7b4be46b3904abc3396096f36d7938
| 3,645,268
|
def segment(x,u1,u2):
""" given a figure x, create a new figure spanning the specified interval in the original figure
"""
if not (isgoodnum(u1) and isgoodnum(u2)) or close(u1,u2) or u1<0 or u2 < 0 or u1 > 1 or u2 > 1:
raise ValueError('bad parameter arguments passed to segment: '+str(u1)+', '+str(u2))
if ispoint(x):
return deepcopy(x)
elif isline(x):
return segmentline(x,u1,u2)
elif isarc(x):
return segmentarc(x,u1,u2)
elif ispoly(x):
return segmentpoly(x,u1,u2)
elif isgeomlist(x):
return segmentgeomlist(x,u1,u2)
else:
raise ValueError("inappropriate figure type for segment(): "+str(x))
|
291ddfb011ece20840a4a56fdc7bc87f2187625f
| 3,645,269
|
import os
def compress_files(file_names):
"""
Given a list of files, compress all of them into a single file.
Keeps the existing directory structure in tact.
"""
archive_file_name = 'archive.zip'
print(f'{len(file_names)} files found. Compressing the files...')
cwd = os.getcwd()
with ZipFile(archive_file_name, 'w') as zip_file:
for path in file_names:
zip_file.write(path, path.replace(cwd, ''))
print(
f'All {len(file_names)} files were successfully compressed into {archive_file_name}')
return archive_file_name
|
e7b29373f099d73cc207cc78162742721dc96246
| 3,645,270
|
def box_from_anchor_and_target(anchors, regressed_targets):
"""
Get bounding box from anchor and target through transformation provided in the paper.
:param anchors: Nx4 anchor boxes
:param regressed_targets: Nx4 regression targets
:return:
"""
boxes_v = anchors[:, 2] * regressed_targets[:, 0] / 10.0 + anchors[:, 0]
boxes_u = anchors[:, 3] * regressed_targets[:, 1] / 10.0 + anchors[:, 1]
boxes_h = anchors[:, 2] * \
tf.clip_by_value(tf.exp(regressed_targets[:, 2] / 5.0), 1e-4, 1e4)
boxes_w = anchors[:, 3] * \
tf.clip_by_value(tf.exp(regressed_targets[:, 3] / 5.0), 1e-4, 1e4)
return tf.stack([boxes_v,
boxes_u,
boxes_h,
boxes_w], axis=1)
|
c55a54535fbfa67502c1b65ec71c23d772dedd7e
| 3,645,271
|
def block_device_mapping_update(context, bdm_id, values, legacy=True):
"""Update an entry of block device mapping."""
return IMPL.block_device_mapping_update(context, bdm_id, values, legacy)
|
7959cc6c849cb599c719e21f8c8315a7bc7ddd09
| 3,645,272
|
def consumer(address,callback,message_type):
"""
Creates a consumer binding to the given address pull messages.
The callback is invoked for every reply received.
Args:
- address: the address to bind the PULL socket to.
- callback: the callback to invoke for every message. Must accept 1 variables - the message
- message_type: the type of message to receive
"""
return Consumer(address,callback,message_type)
|
a1c01bafa4f65ba0a0a212916556c36315fe2c88
| 3,645,273
|
def _parse_continuous_records(prepared_page, section_dict):
"""Handle parsing a continuous list of records."""
# import pdb; pdb.set_trace()
columns = 6
start = prepared_page.index('Date and time')
for i, column in enumerate(prepared_page[start:start + columns]):
column_index = start + i
values = prepared_page[column_index + columns::columns]
if column in section_dict:
section_dict[column] = section_dict[column] + values
else:
section_dict[column] = values
return section_dict
|
7ddcb52433828d37ce6e0cac5d51d8fcfb249296
| 3,645,274
|
def power_law_at_2500(x, amp, slope, z):
""" Power law model anchored at 2500 AA
This model is defined for a spectral dispersion axis in Angstroem.
:param x: Dispersion of the power law
:type x: np.ndarray
:param amp: Amplitude of the power law (at 2500 A)
:type amp: float
:param slope: Slope of the power law
:type slope: float
:param z: Redshift
:type z: float
:return: Power law model
:rtype: np.ndarray
"""
return amp * (x / (2500. * (z+1.))) ** slope
|
508227f332f652d00c785074c20f9acefbce9258
| 3,645,275
|
def map2alm(
maps,
lmax=None,
mmax=None,
iter=3,
pol=True,
use_weights=False,
datapath=None,
gal_cut=0,
use_pixel_weights=False,
):
"""Computes the alm of a Healpix map. The input maps must all be
in ring ordering.
Parameters
----------
maps : array-like, shape (Npix,) or (n, Npix)
The input map or a list of n input maps. Must be in ring ordering.
lmax : int, scalar, optional
Maximum l of the power spectrum. Default: 3*nside-1
mmax : int, scalar, optional
Maximum m of the alm. Default: lmax
iter : int, scalar, optional
Number of iteration (default: 3)
pol : bool, optional
If True, assumes input maps are TQU. Output will be TEB alm's.
(input must be 1 or 3 maps)
If False, apply spin 0 harmonic transform to each map.
(input can be any number of maps)
If there is only one input map, it has no effect. Default: True.
use_weights: bool, scalar, optional
If True, use the ring weighting. Default: False.
datapath : None or str, optional
If given, the directory where to find the weights data.
gal_cut : float [degrees]
pixels at latitude in [-gal_cut;+gal_cut] are not taken into account
use_pixel_weights: bool, optional
If True, use pixel by pixel weighting, healpy will automatically download the weights, if needed
Returns
-------
alms : array or tuple of array
alm or a tuple of 3 alm (almT, almE, almB) if polarized input.
Notes
-----
The pixels which have the special `UNSEEN` value are replaced by zeros
before spherical harmonic transform. They are converted back to `UNSEEN`
value, so that the input maps are not modified. Each map have its own,
independent mask.
"""
maps = ma_to_array(maps)
info = maptype(maps)
nside = pixelfunc.get_nside(maps)
check_max_nside(nside)
if use_pixel_weights:
if use_weights:
raise RuntimeError("Either use pixel or ring weights")
with data.conf.set_temp("dataurl", DATAURL), data.conf.set_temp(
"remote_timeout", 30
):
pixel_weights_filename = data.get_pkg_data_filename(
"full_weights/healpix_full_weights_nside_%04d.fits" % nside,
package="healpy",
)
else:
pixel_weights_filename = None
if pol or info in (0, 1):
alms = _sphtools.map2alm(
maps,
niter=iter,
datapath=datapath,
use_weights=use_weights,
lmax=lmax,
mmax=mmax,
gal_cut=gal_cut,
pixel_weights_filename=pixel_weights_filename,
)
else:
# info >= 2 and pol is False : spin 0 spht for each map
alms = [
_sphtools.map2alm(
mm,
niter=iter,
datapath=datapath,
use_weights=use_weights,
lmax=lmax,
mmax=mmax,
gal_cut=gal_cut,
pixel_weights_filename=pixel_weights_filename,
)
for mm in maps
]
return np.array(alms)
|
9312a6c5ee40fe9a3ef3f6057ee5964d200f9732
| 3,645,276
|
def credits():
"""
Credits Page
"""
return render_template("credits.html")
|
00fdc0be4c3abd3df21993b271977208252123df
| 3,645,277
|
def register_view(request):
"""Register a new user."""
if request.method == "POST":
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
# Create a new User object but don't save it yet.
new_user = user_form.save(commit=False)
# Set the chosen password
new_user.set_password(
user_form.cleaned_data["password"]
)
# Save the User object
new_user.save()
# Create the user profile
Profile.objects.create(user=new_user)
context = {"user_form": user_form}
return render(request, "account/register_done.html", context)
else:
user_form = UserRegistrationForm()
return render(request, "account/register.html", {"user_form": user_form})
|
19fa639603c7ea67a76696e4b88aae376461988c
| 3,645,278
|
from typing import Dict
from typing import Any
from typing import Optional
import requests
import json
def handle_pdf_build(pdf_job_dict: Dict[str, Any], tx_payload, redis_connection) -> str:
"""
A job dict is now setup and remembered in REDIS
so that we can match it when we get a future callback.
The project.json (in the folder above the CDN one) is also updated, e.g., with new commits.
The job is now passed to the tX system by means of a
POST to the tX webhook (which should hopefully respond with a callback).
"""
AppSettings.logger.debug("Webhook.handle_build setting up pdf job dict…")
our_job_id = get_unique_job_id()
pdf_job_dict['job_id'] = our_job_id
pdf_job_dict['output_format'] = 'pdf'
pdf_job_dict['cdn_file'] = f'tx/job/{our_job_id}.zip'
pdf_job_dict['output'] = f"https://{AppSettings.cdn_bucket_name}/{pdf_job_dict['cdn_file']}"
# NOTE: following line removed as stats recording used too much disk space
# pdf_job_dict['user_projects_invoked_string'] = user_projects_invoked_string # Need to save this for reuse
pdf_job_dict['links'] = {
'href': f'{AppSettings.api_url}/tx/job/{our_job_id}',
'rel': 'self',
'method': 'GET'
}
AppSettings.logger.debug(f"pdf_job_dict: {pdf_job_dict}")
# Save the job info in Redis for the callback to use
remember_job(pdf_job_dict, redis_connection)
# Pass the work request onto the tX system
AppSettings.logger.info(f"PDF Job: Post request to tX system @ {tx_post_url} …")
tx_payload['job_id'] = our_job_id
tx_payload['output_format'] = 'pdf'
AppSettings.logger.debug(f"Payload for pdf tX: {tx_payload}")
response: Optional[requests.Response]
try:
response = requests.post(tx_post_url, json=tx_payload)
except requests.exceptions.ConnectionError as e:
AppSettings.logger.critical(f"Callback connection error: {e}")
response = None
if response:
#AppSettings.logger.info(f"response.status_code = {response.status_code}, response.reason = {response.reason}")
#AppSettings.logger.debug(f"response.headers = {response.headers}")
try:
AppSettings.logger.info(f"response.json = {response.json()}")
except json.decoder.JSONDecodeError:
AppSettings.logger.info("No valid response JSON found")
AppSettings.logger.debug(f"response.text = {response.text}")
if response.status_code != 200:
AppSettings.logger.critical(f"Failed to submit job to tX:"
f" {response.status_code}={response.reason}")
else: # no response
error_msg = "Submission of job to tX system got no response"
AppSettings.logger.critical(error_msg)
# So we go into the FAILED queue and monitoring system
raise Exception(error_msg)
return our_job_id
|
2d1258ad32fe13782264332c914793c82777e995
| 3,645,279
|
def menu(request):
"""
A View to return the menu.html
where all menu images are returned
in a carousel.
"""
menus = MenuImages.objects.all()
context = {
'menus': menus
}
return render(request, 'menu/menu.html', context)
|
9491d9e1d4084ed78d4aadfd867910e2d0511704
| 3,645,280
|
def wav_process(PATH, i):
"""
音频处理,在路径下读取指定序号的文件进行处理
Args:
PATH (str): 音频文件路径
i (int): 指定序号
Returns:
float: 计算得到的音源角度(单位:°)
"""
# 读取数据
wav, sr = read_wav(PATH, i + 1)
# 进行降噪
wav_rn = reduce_noise(wav)
# 计算角度
angle_list = estimate_angle(wav_rn, sr)
# 确定基准方向
angle_13, angle_24 = angle_list[1], angle_list[4]
theta13p, theta13n = (180 + angle_13) % 360, 180 - angle_13
theta24p, theta24n = (270 + angle_24) % 360, 270 - angle_24
if angle_13 > 15 and angle_13 < 165:
if ((theta24p > theta13p - 10 and theta24p < theta13p + 10) or
(theta24p + 360 > theta13p - 10 and theta24p + 360 < theta13p + 10)
or (theta24n > theta13p - 10 and theta24n < theta13p + 10)
or (theta24n + 360 > theta13p - 10
and theta24n + 360 < theta13p + 10)):
scope_mid = theta13p
else:
scope_mid = theta13n
else:
if ((theta13p > theta24p - 10 and theta13p < theta24p + 10) or
(theta13p + 360 > theta24p - 10 and theta13p + 360 < theta24p + 10)
or (theta13n > theta24p - 10 and theta13n < theta24p + 10)
or (theta13n + 360 > theta24p - 10
and theta13n + 360 < theta24p + 10)):
scope_mid = theta24p
else:
scope_mid = theta24n
angle_base = [135, 180, 225, 225, 270, 315]
processed_angle = []
sum = 0
weights = 0
for i, elem in enumerate(angle_list):
if elem > 15 and elem < 165:
# 加权计算最终角度
if elem > 65 and elem < 115:
weight = 100
else:
weight = 1
ap = (angle_base[i] + elem + 360) % 360
an = (angle_base[i] - elem + 360) % 360
if ap > scope_mid - 10 and ap < scope_mid + 10:
processed_angle.append(ap)
sum = sum + ap * weight
weights = weights + weight
else:
processed_angle.append(an)
sum = sum + an * weight
weights = weights + weight
return sum / weights
|
ee336928b4b5e221a72ba8b6509555666ff3b763
| 3,645,281
|
def extract_vuln_id(input_string):
"""
Function to extract a vulnerability ID from a message
"""
if 'fp' in input_string.lower():
wordlist = input_string.split()
vuln_id = wordlist[-1]
return vuln_id
else:
return None
|
06673f2b401472185c8a3e6fc373d39c171791db
| 3,645,282
|
import os
def ensure_paths_for_args(args):
"""
Ensure all arguments with paths are absolute & have simplification removed
Just apply os.path.abspath & os.path.expanduser
:param args: the arguments given from argparse
:returns: an updated args
"""
args.seqs_of_interest = os.path.abspath(
os.path.expanduser(args.seqs_of_interest))
args.assembly_dir = os.path.abspath(os.path.expanduser(args.assembly_dir))
if args.output is not None:
args.output = os.path.abspath(os.path.expanduser(args.output))
if args.cons is not None:
args.cons = os.path.abspath(os.path.expanduser(args.cons))
if args.index_file is not None:
args.index_file = os.path.abspath(os.path.expanduser(args.index_file))
if args.existing_data is not None:
args.existing_data = os.path.abspath(os.path.expanduser(args.existing_data))
return args
|
e15b64f2856954bbb7f61d44084d01bb8cdc53ba
| 3,645,283
|
def compare_images(img1, img2):
"""Expects strings of the locations of two images. Will return an integer representing their difference"""
with Image.open(img1) as img1, Image.open(img2) as img2:
# Calculate a difference image that is the difference between the two images.
diff = ImageChops.difference(img1, img2)
return sum(_unpack_image(diff.getdata())[1])
|
bc94987785a5731e71a1e25daae51179c415eda6
| 3,645,284
|
def bytes_to_nodes(buf):
""" Return a list of ReadNodes corresponding to the bytes in buf.
@param bytes buf: a bytes object
@rtype: list[ReadNode]
>>> bytes_to_nodes(bytes([0, 1, 0, 2]))
[ReadNode(0, 1, 0, 2)]
"""
lst = []
for i in range(0, len(buf), 4):
l_type = buf[i]
l_data = buf[i+1]
r_type = buf[i+2]
r_data = buf[i+3]
lst.append(ReadNode(l_type, l_data, r_type, r_data))
return lst
|
1296b49f5d76605d4408eddf21b76f286dfc5f5b
| 3,645,285
|
import sys
def require_captcha(function, *args, **kwargs):
"""Return a decorator for methods that require captchas."""
raise_captcha_exception = kwargs.pop('raise_captcha_exception', False)
captcha_id = None
# Get a handle to the reddit session
if hasattr(args[0], 'reddit_session'):
reddit_session = args[0].reddit_session
else:
reddit_session = args[0]
while True:
try:
if captcha_id:
captcha_answer = _get_captcha(reddit_session, captcha_id)
# When the method is being decorated, all of its default
# parameters become part of this *args tuple. This means that
# *args currently contains a None where the captcha answer
# needs to go. If we put the captcha in the **kwargs,
# we get a TypeError for having two values of the same param.
func_args = _make_func_args(function)
if 'captcha' in func_args:
captcha_index = func_args.index('captcha')
args = list(args)
args[captcha_index] = captcha_answer
else:
kwargs['captcha'] = captcha_answer
return function(*args, **kwargs)
except errors.InvalidCaptcha as exception:
if raise_captcha_exception or \
not hasattr(sys.stdin, 'closed') or sys.stdin.closed:
raise
captcha_id = exception.response['captcha']
|
e6f427317fd64a156db331d0d85220e09872b5af
| 3,645,286
|
def list_unnecessary_loads(app_label=None):
"""
Scan the project directory tree for template files and process each and
every one of them.
:app_label: String; app label supplied by the user
:returns: None (outputs to the console)
"""
if app_label:
app = get_app(app_label)
else:
app = None
dt_engines = get_djangotemplates_engines()
for dt_engine in dt_engines:
has_issues = False
templates = []
# Get the locations of installed packages
pkg_locations = get_package_locations()
# Get template directories located within the project
for directory in dt_engine.template_dirs:
templates += get_templates(directory, pkg_locations, app)
if templates:
for template in templates:
status = process_template(template, dt_engine.engine)
if status:
has_issues = status
if not has_issues:
output_message(reason=3)
else:
output_message(reason=1)
return has_issues
|
d78aeb6132e4f79f4458454f6107f9003db37999
| 3,645,287
|
from typing import Any
def _element(
html_element: str,
html_class: str,
value: Any,
is_visible: bool,
**kwargs,
) -> dict:
"""
Template to return container with information for a <td></td> or <th></th> element.
"""
if "display_value" not in kwargs:
kwargs["display_value"] = value
return {
"type": html_element,
"value": value,
"class": html_class,
"is_visible": is_visible,
**kwargs,
}
|
4ce4d2ff9f547470d4a875508c40d3ae2a927ba0
| 3,645,288
|
import os
def create_app(test_config=None):
"""
This method creates a flask app object with a
given configuration.
Args:
test_config (dict): Defaults to None.
Returns:
app (Flask): Flask app object.
"""
app = Flask(__name__)
Bootstrap(app)
# check environment variables to see which config to load
env = os.environ.get("FLASK_ENV", "dev")
if test_config:
app.config.from_mapping(**test_config)
else:
# config dict is from config.py
app.config.from_object(config[env])
# create database for development and testing
if env != "prod":
db_url = app.config["SQLALCHEMY_DATABASE_URI"]
if not database_exists(db_url):
create_database(db_url)
db.init_app(app)
Migrate(app, db)
app.register_blueprint(views.flask_app)
return app
|
a901f37ee573b159a25a8c9c715205cf0b2cd496
| 3,645,289
|
import torch
def lovasz_hinge_loss(pred, target, crop_masks, activation='relu', map2inf=False):
"""
Binary Lovasz hinge loss
pred: [P] Variable, logits at each prediction (between -\infty and +\infty)
target: [P] Tensor, binary ground truth labels (0 or 1)
"""
losses = []
for m, p, t in zip(crop_masks, pred, target): # > imgs
num_objs = t.size()[0]
loss = t.new_tensor(0.0)
for i in range(num_objs):
if len(p[i]) > 0:
loss += lovasz_hinge_loss_single(p[i][m[i]].view(-1),
t[i][m[i]].view(-1),
activation=activation,
map2inf=map2inf)
if num_objs > 0:
loss /= num_objs
losses.append(loss)
losses = torch.stack(losses)
return losses
|
c1d7ce49feda1a2ba1116d03de3ba8a5b9ad65a9
| 3,645,290
|
def get_gene_summary(gene):
"""Gets gene summary from a model's gene."""
return {
gene.id: {
"name": gene.name,
"is_functional": gene.functional,
"reactions": [{rxn.id: rxn.name} for rxn in gene.reactions],
"annotation": gene.annotation,
"notes": gene.notes,
}
}
|
dd9cb3f8e9841a558898c67a16a02da1b39479d2
| 3,645,291
|
def prompt_choice_list(msg, a_list, default=1, help_string=None):
"""Prompt user to select from a list of possible choices.
:param msg:A message displayed to the user before the choice list
:type msg: str
:param a_list:The list of choices (list of strings or list of dicts with 'name' & 'desc')
"type a_list: list
:param default:The default option that should be chosen if user doesn't enter a choice
:type default: int
:returns: The list index of the item chosen.
"""
verify_is_a_tty()
options = "\n".join(
[
" [{}] {}{}".format(
i + 1,
x["name"] if isinstance(x, dict) and "name" in x else x,
" - " + x["desc"] if isinstance(x, dict) and "desc" in x else "",
)
for i, x in enumerate(a_list)
]
)
allowed_vals = list(range(1, len(a_list) + 1))
while True:
val = _input(
"{}\n{}\nPlease enter a choice [Default choice({})]: ".format(msg, options, default)
)
if val == "?" and help_string is not None:
print(help_string)
continue
if not val:
val = "{}".format(default)
try:
ans = int(val)
if ans in allowed_vals:
# array index is 0-based, user input is 1-based
return ans - 1
raise ValueError
except ValueError:
logger.warning("Valid values are %s", allowed_vals)
|
dc5f077d3710420b9d9b26032ee340c0671d009d
| 3,645,292
|
def random_permutation_matrix(size):
"""Random permutation matrix.
Parameters
----------
size : int
The dimension of the random permutation matrix.
Returns
-------
random_permutation : array, shape (size, size)
An identity matrix with its rows random shuffled.
"""
identity = np.identity(size)
index = np.arange(0, size)
np.random.shuffle(index)
random_permutation = identity[index]
return random_permutation
|
0ca4e93218fd647188ac09c4d71a3df1cff3acf7
| 3,645,293
|
from typing import Optional
import collections
def matched(captured: Optional[Capture], groups_count: int) -> MatchedType:
"""
Construct the matched strings transversing\
given a captured structure
The passed Capture has the last captured char\
and so the sequence is transversed in reverse
Sub-matches are put in their group index
Repeating sub-matches (i.e: ``(a)*``) are put\
into a nested sequence of their group index
:param captured: The last capture or None
:param groups_count: number of groups
:return: matched strings
:private:
"""
match = collections.defaultdict(lambda: [])
curr_groups = []
while captured:
if captured.char == Symbols.GROUP_END:
curr_groups.append(captured)
if captured.is_repeated:
match[captured.index].append([])
captured = captured.prev
continue
if captured.char == Symbols.GROUP_START:
curr_groups.pop()
captured = captured.prev
continue
for g in curr_groups:
if g.is_repeated:
match[g.index][-1].append(captured.char)
else:
match[g.index].append(captured.char)
captured = captured.prev
assert not curr_groups
return tuple(
_join_reversed(match[g])
if g in match
else None
for g in range(groups_count))
|
0bb7544f9d5ac339e0aed717bc5779deba781dc8
| 3,645,294
|
def tle_fmt_float(num,width=10):
""" Return a left-aligned signed float string, with no leading zero left of the decimal """
digits = (width-2)
ret = "{:<.{DIGITS}f}".format(num,DIGITS=digits)
if ret.startswith("0."):
return " " + ret[1:]
if ret.startswith("-0."):
return "-" + ret[2:]
|
686cb4061e5cf2ad620b85b0e66b96a8cd1c3abf
| 3,645,295
|
def pack(name=None, prefix=None, output=None, format='infer',
arcroot='', dest_prefix=None, verbose=False, force=False,
compress_level=4, n_threads=1, zip_symlinks=False, zip_64=True,
filters=None, ignore_editable_packages=False):
"""Package an existing conda environment into an archive file.
Parameters
----------
name : str, optional
The name of the conda environment to pack.
prefix : str, optional
A path to a conda environment to pack.
output : str, optional
The path of the output file. Defaults to the environment name with a
``.tar.gz`` suffix (e.g. ``my_env.tar.gz``).
format : {'infer', 'zip', 'tar.gz', 'tgz', 'tar.bz2', 'tbz2', 'tar'}, optional
The archival format to use. By default this is inferred by the output
file extension.
arcroot : str, optional
The relative path in the archive to the conda environment.
Defaults to ''.
dest_prefix : str, optional
If present, prefixes will be rewritten to this path before packaging.
In this case the ``conda-unpack`` script will not be generated.
verbose : bool, optional
If True, progress is reported to stdout. Default is False.
force : bool, optional
Whether to overwrite any existing archive at the output path. Default
is False.
compress_level : int, optional
The compression level to use, from 0 to 9. Higher numbers decrease
output file size at the expense of compression time. Ignored for
``format='zip'``. Default is 4.
zip_symlinks : bool, optional
Symbolic links aren't supported by the Zip standard, but are supported
by *many* common Zip implementations. If True, store symbolic links in
the archive, instead of the file referred to by the link. This can
avoid storing multiple copies of the same files. *Note that the
resulting archive may silently fail on decompression if the ``unzip``
implementation doesn't support symlinks*. Default is False. Ignored if
format isn't ``zip``.
n_threads : int, optional
The number of threads to use. Set to -1 to use the number of cpus on
this machine. If a file format doesn't support threaded packaging, this
option will be ignored. Default is 1.
zip_64 : bool, optional
Whether to enable ZIP64 extensions. Default is True.
filters : list, optional
A list of filters to apply to the files. Each filter is a tuple of
``(kind, pattern)``, where ``kind`` is either ``'exclude'`` or
``'include'`` and ``pattern`` is a file pattern. Filters are applied in
the order specified.
ignore_editable_packages : bool, optional
By default conda-pack will error in the presence of editable packages.
Set to True to skip these checks.
Returns
-------
out_path : str
The path to the archived environment.
"""
if name and prefix:
raise CondaPackException("Cannot specify both ``name`` and ``prefix``")
if verbose:
print("Collecting packages...")
if prefix:
env = CondaEnv.from_prefix(prefix,
ignore_editable_packages=ignore_editable_packages)
elif name:
env = CondaEnv.from_name(name, ignore_editable_packages=ignore_editable_packages)
else:
env = CondaEnv.from_default(ignore_editable_packages=ignore_editable_packages)
if filters is not None:
for kind, pattern in filters:
if kind == 'exclude':
env = env.exclude(pattern)
elif kind == 'include':
env = env.include(pattern)
else:
raise CondaPackException("Unknown filter of kind %r" % kind)
return env.pack(output=output, format=format, arcroot=arcroot,
dest_prefix=dest_prefix,
verbose=verbose, force=force,
compress_level=compress_level, n_threads=n_threads,
zip_symlinks=zip_symlinks, zip_64=zip_64)
|
500841ec51c58ec0ff99c4b286c8a235ab887d7b
| 3,645,296
|
def rasterize(points):
""" Return (array, no_data_value) tuple.
Rasterize the indices of the points in an array at the highest quadtree
resolution. Note that points of larger squares in the quadtree also just
occupy one cell in the resulting array, the rest of the cells get the
no_data_value.
"""
points = np.asarray(points, dtype=float)
x, y = points.transpose()
xs, ys = analyze(x, y)
x1, y2 = x.min(), y.max()
# get indices to land each point index in its own array cell
j = np.int64(np.zeros_like(x) if xs is None else (x - x1) / xs)
i = np.int64(np.zeros_like(y) if ys is None else (y2 - y) / ys)
index = i, j
no_data_value = len(points)
ids = np.arange(no_data_value)
values = np.full((i.max() + 1, j.max() + 1), no_data_value)
values[index] = ids
return values, no_data_value
|
41db3b63a5956aff192585c7c5ce5b6c83f0d6cd
| 3,645,297
|
def parse_aedge_layout_attrs(aedge, translation=None):
"""
parse grpahviz splineType
"""
if translation is None:
translation = np.array([0, 0])
edge_attrs = {}
apos = aedge.attr['pos']
# logger.info('apos = %r' % (apos,))
end_pt = None
start_pt = None
# if '-' in apos:
# import utool
# utool.embed()
def safeadd(x, y):
if x is None or y is None:
return None
return x + y
strpos_list = apos.split(' ')
strtup_list = [ea.split(',') for ea in strpos_list]
ctrl_ptstrs = [ea for ea in strtup_list if ea[0] not in 'es']
end_ptstrs = [ea[1:] for ea in strtup_list[0:2] if ea[0] == 'e']
start_ptstrs = [ea[1:] for ea in strtup_list[0:2] if ea[0] == 's']
assert len(end_ptstrs) <= 1
assert len(start_ptstrs) <= 1
if len(end_ptstrs) == 1:
end_pt = np.array([float(f) for f in end_ptstrs[0]])
if len(start_ptstrs) == 1:
start_pt = np.array([float(f) for f in start_ptstrs[0]])
ctrl_pts = np.array([tuple([float(f) for f in ea]) for ea in ctrl_ptstrs])
adata = aedge.attr
ctrl_pts = ctrl_pts
edge_attrs['pos'] = apos
edge_attrs['ctrl_pts'] = safeadd(ctrl_pts, translation)
edge_attrs['start_pt'] = safeadd(start_pt, translation)
edge_attrs['end_pt'] = safeadd(end_pt, translation)
edge_attrs['lp'] = safeadd(parse_point(adata.get('lp', None)), translation)
edge_attrs['label'] = adata.get('label', None)
edge_attrs['headlabel'] = adata.get('headlabel', None)
edge_attrs['taillabel'] = adata.get('taillabel', None)
edge_attrs['head_lp'] = safeadd(parse_point(adata.get('head_lp', None)), translation)
edge_attrs['tail_lp'] = safeadd(parse_point(adata.get('tail_lp', None)), translation)
return edge_attrs
|
f086e2267d19710685e3515aeee352066bd983b2
| 3,645,298
|
import importlib
import re
def load_class_by_path(taskpath):
""" Given a taskpath, returns the main task class. """
return getattr(importlib.import_module(re.sub(r"\.[^.]+$", "", taskpath)), re.sub(r"^.*\.", "", taskpath))
|
a9601dafbc73635d81732a0f3747fd450e393d76
| 3,645,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.