content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def step_euler(last, dt, drift, volatility, noise):
"""Approximate SDE in one time step with Euler scheme"""
return last + drift * dt + np.dot(volatility, noise)
| 5,338,100
|
def retrieve_downloads(config_bundle, cache_dir, show_progress, disable_ssl_verification=False):
"""
Retrieve downloads into the downloads cache.
config_bundle is the config.ConfigBundle to retrieve downloads for.
cache_dir is the pathlib.Path to the downloads cache.
show_progress is a boolean indicating if download progress is printed to the console.
disable_ssl_verification is a boolean indicating if certificate verification
should be disabled for downloads using HTTPS.
Raises FileNotFoundError if the downloads path does not exist.
Raises NotADirectoryError if the downloads path is not a directory.
"""
if not cache_dir.exists():
raise FileNotFoundError(cache_dir)
if not cache_dir.is_dir():
raise NotADirectoryError(cache_dir)
if disable_ssl_verification:
import ssl
# TODO: Remove this or properly implement disabling SSL certificate verification
orig_https_context = ssl._create_default_https_context #pylint: disable=protected-access
ssl._create_default_https_context = ssl._create_unverified_context #pylint: disable=protected-access
try:
for download_name, download_properties in _downloads_iter(config_bundle):
get_logger().info('Downloading "%s" to "%s" ...', download_name,
download_properties.download_filename)
download_path = cache_dir / download_properties.download_filename
_download_if_needed(download_path, download_properties.url, show_progress)
if download_properties.has_hash_url():
get_logger().info('Downloading hashes for "%s"', download_name)
_, hash_filename, hash_url = download_properties.hashes['hash_url']
_download_if_needed(cache_dir / hash_filename, hash_url, show_progress)
finally:
# Try to reduce damage of hack by reverting original HTTPS context ASAP
if disable_ssl_verification:
ssl._create_default_https_context = orig_https_context
| 5,338,101
|
def KK_RC66_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
Kristian B. Knudsen (kknu@berkeley.edu / kristianbknudsen@gmail.com)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
R6 = params["R6"]
R7 = params["R7"]
R8 = params["R8"]
R9 = params["R9"]
R10 = params["R10"]
R11 = params["R11"]
R12 = params["R12"]
R13 = params["R13"]
R14 = params["R14"]
R15 = params["R15"]
R16 = params["R16"]
R17 = params["R17"]
R18 = params["R18"]
R19 = params["R19"]
R20 = params["R20"]
R21 = params["R21"]
R22 = params["R22"]
R23 = params["R23"]
R24 = params["R24"]
R25 = params["R25"]
R26 = params["R26"]
R27 = params["R27"]
R28 = params["R28"]
R29 = params["R29"]
R30 = params["R30"]
R31 = params["R31"]
R32 = params["R32"]
R33 = params["R33"]
R34 = params["R34"]
R35 = params["R35"]
R36 = params["R36"]
R37 = params["R37"]
R38 = params["R38"]
R39 = params["R39"]
R40 = params["R40"]
R41 = params["R41"]
R42 = params["R42"]
R43 = params["R43"]
R44 = params["R44"]
R45 = params["R45"]
R46 = params["R46"]
R47 = params["R47"]
R48 = params["R48"]
R49 = params["R49"]
R50 = params["R50"]
R51 = params["R51"]
R52 = params["R52"]
R53 = params["R53"]
R54 = params["R54"]
R55 = params["R55"]
R56 = params["R56"]
R57 = params["R57"]
R58 = params["R58"]
R59 = params["R59"]
R60 = params["R60"]
R61 = params["R61"]
R62 = params["R62"]
R63 = params["R63"]
R64 = params["R64"]
R65 = params["R65"]
R66 = params["R66"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
+ (R6 / (1 + w * 1j * t_values[5]))
+ (R7 / (1 + w * 1j * t_values[6]))
+ (R8 / (1 + w * 1j * t_values[7]))
+ (R9 / (1 + w * 1j * t_values[8]))
+ (R10 / (1 + w * 1j * t_values[9]))
+ (R11 / (1 + w * 1j * t_values[10]))
+ (R12 / (1 + w * 1j * t_values[11]))
+ (R13 / (1 + w * 1j * t_values[12]))
+ (R14 / (1 + w * 1j * t_values[13]))
+ (R15 / (1 + w * 1j * t_values[14]))
+ (R16 / (1 + w * 1j * t_values[15]))
+ (R17 / (1 + w * 1j * t_values[16]))
+ (R18 / (1 + w * 1j * t_values[17]))
+ (R19 / (1 + w * 1j * t_values[18]))
+ (R20 / (1 + w * 1j * t_values[19]))
+ (R21 / (1 + w * 1j * t_values[20]))
+ (R22 / (1 + w * 1j * t_values[21]))
+ (R23 / (1 + w * 1j * t_values[22]))
+ (R24 / (1 + w * 1j * t_values[23]))
+ (R25 / (1 + w * 1j * t_values[24]))
+ (R26 / (1 + w * 1j * t_values[25]))
+ (R27 / (1 + w * 1j * t_values[26]))
+ (R28 / (1 + w * 1j * t_values[27]))
+ (R29 / (1 + w * 1j * t_values[28]))
+ (R30 / (1 + w * 1j * t_values[29]))
+ (R31 / (1 + w * 1j * t_values[30]))
+ (R32 / (1 + w * 1j * t_values[31]))
+ (R33 / (1 + w * 1j * t_values[32]))
+ (R34 / (1 + w * 1j * t_values[33]))
+ (R35 / (1 + w * 1j * t_values[34]))
+ (R36 / (1 + w * 1j * t_values[35]))
+ (R37 / (1 + w * 1j * t_values[36]))
+ (R38 / (1 + w * 1j * t_values[37]))
+ (R39 / (1 + w * 1j * t_values[38]))
+ (R40 / (1 + w * 1j * t_values[39]))
+ (R41 / (1 + w * 1j * t_values[40]))
+ (R42 / (1 + w * 1j * t_values[41]))
+ (R43 / (1 + w * 1j * t_values[42]))
+ (R44 / (1 + w * 1j * t_values[43]))
+ (R45 / (1 + w * 1j * t_values[44]))
+ (R46 / (1 + w * 1j * t_values[45]))
+ (R47 / (1 + w * 1j * t_values[46]))
+ (R48 / (1 + w * 1j * t_values[47]))
+ (R49 / (1 + w * 1j * t_values[48]))
+ (R50 / (1 + w * 1j * t_values[49]))
+ (R51 / (1 + w * 1j * t_values[50]))
+ (R52 / (1 + w * 1j * t_values[51]))
+ (R53 / (1 + w * 1j * t_values[52]))
+ (R54 / (1 + w * 1j * t_values[53]))
+ (R55 / (1 + w * 1j * t_values[54]))
+ (R56 / (1 + w * 1j * t_values[55]))
+ (R57 / (1 + w * 1j * t_values[56]))
+ (R58 / (1 + w * 1j * t_values[57]))
+ (R59 / (1 + w * 1j * t_values[58]))
+ (R60 / (1 + w * 1j * t_values[59]))
+ (R61 / (1 + w * 1j * t_values[60]))
+ (R62 / (1 + w * 1j * t_values[61]))
+ (R63 / (1 + w * 1j * t_values[62]))
+ (R64 / (1 + w * 1j * t_values[63]))
+ (R65 / (1 + w * 1j * t_values[64]))
+ (R66 / (1 + w * 1j * t_values[65]))
)
| 5,338,102
|
def plot_comparsion():
"""
Quick function to plot a comparison of two simulation runs. Hard coding the file names for now...
"""
sgridlow = read_and_reshape_data("sgrid_1e-8.out")
sgridhigh = read_and_reshape_data("sgrid_1e-5.out")
fig, ax = plt.subplots(figsize=(12, 12))
ax.semilogy(sgridlow[-1, :, 1], sgridlow[-1, :, 6], label=r"$\rho = 10^{-8}$ g/cm$^{3}$")
ax.semilogy(sgridhigh[-1, :, 1], sgridhigh[-1, :, 6], label=r"$\rho = 10^{-5}$ g/cm$^{3}$")
ax.set_xlim(sgridlow[-1, :, 1].min(), sgridlow[-1, :, 1].max())
ax.set_xlabel("Height, $z$", fontsize=13)
ax.set_ylabel("Temperature, $T$", fontsize=13)
ax.legend()
fig.suptitle(r"$\rho = 10^{-8}$ g/cm$^{3}$ Vs. $\rho = 10^{-5}$ g/cm$^{3}$")
plt.savefig("temperature_comparison.{}".format(FILE_TYPE))
if plot_show:
plt.show()
else:
plt.close()
return
| 5,338,103
|
def similar_in_manner(manner_1: UnmarkableManner) -> List[Manner]:
"""
If the value is a wildcard value, return
all possible manner of articualtion values, otherwise
return the single corresponding manner of articulation value.
"""
if isinstance(manner_1, MarkedManner):
return manner_1.manner
return manner_states
| 5,338,104
|
def wikify(value):
"""Converts value to wikipedia "style" of URLS, removes non-word characters
and converts spaces to hyphens and leaves case of value.
"""
value = re.sub(r'[^\w\s-]', '', value).strip()
return re.sub(r'[-\s]+', '_', value)
| 5,338,105
|
def predict(x, y, parameters):
"""
X -- data set of examples you would like to label
parameters -- parameters of the trained model
p -- predictions for the given dataset X
"""
# X的数量
m = x.shape[1]
# 数据集X的对应的prediction的维度
p = np.zeros((1, m))
# 前向传播
probas, caches = L_model_forward(x, parameters)
# 将0 ~ 1 -- 映射 --> 0, 1
for i in range(0, probas.shape[1]):
if probas[0, i] > 0.5:
p[0, i] = 1
else:
p[0, i] = 0
print("Accuracy: " + str(np.sum((p == y) / m)))
| 5,338,106
|
def test_simple_conv_encoder_next_size(
simple_conv_encoder_image_classification):
"""
Args:
simple_conv_encoder_image_classification (@pytest.fixture): SimpleConvEncoder
Asserts: True if result is a tuple of adequate values.
"""
dim_x = 32
dim_y = 32
k = 4
s = 2
p = 1
output = simple_conv_encoder_image_classification.next_size(
dim_x, dim_y, k, s, p)
expected_value = infer_conv_size(dim_x, k, s, p)
assert isinstance(output, tuple)
assert output[0] == expected_value and output[1] == expected_value
| 5,338,107
|
def assert_array_almost_equal(
x: numpy.ndarray, y: numpy.ndarray, decimal: int, err_msg: Literal["size=51"]
):
"""
usage.scipy: 8
"""
...
| 5,338,108
|
def build_model(name, num_classes, loss='softmax', pretrained=True,
use_gpu=True, dropout_prob=0.0, feature_dim=512, fpn=True, fpn_dim=256,
gap_as_conv=False, input_size=(256, 128), IN_first=False):
"""A function wrapper for building a model.
"""
avai_models = list(__model_factory.keys())
if name not in avai_models:
raise KeyError('Unknown model: {}. Must be one of {}'.format(name, avai_models))
return __model_factory[name](
num_classes=num_classes,
loss=loss,
pretrained=pretrained,
use_gpu=use_gpu,
dropout_prob=dropout_prob,
feature_dim=feature_dim,
fpn=fpn,
fpn_dim=fpn_dim,
gap_as_conv=gap_as_conv,
input_size=input_size,
IN_first=IN_first
)
| 5,338,109
|
def policy_absent(name):
"""
Ensure that the named policy is not present
:param name: The name of the policy to be deleted
:returns: The result of the state execution
:rtype: dict
"""
current_policy = __salt__['mdl_vault.get_policy'](name)
ret = {'name': name,
'comment': '',
'result': False,
'changes': {}}
if not current_policy:
ret['result'] = True
ret['comment'] = ('The {policy_name} policy is not present.'.format(
policy_name=name))
elif __opts__['test']:
ret['result'] = None
if current_policy:
ret['changes']['old'] = current_policy
ret['changes']['new'] = {}
ret['comment'] = ('The {policy_name} policy {suffix}.'.format(
policy_name=name,
suffix='will be deleted' if current_policy else 'is not present'))
else:
try:
__salt__['mdl_vault.delete_policy'](name)
ret['result'] = True
ret['comment'] = ('The {policy_name} policy was successfully '
'deleted.')
ret['changes']['old'] = current_policy
ret['changes']['new'] = {}
except __utils__['mdl_vault.vault_error']() as e:
log.exception(e)
ret['comment'] = ('The {policy_name} policy failed to be '
'created/updated'.format(policy_name=name))
return ret
| 5,338,110
|
def _tolist(arg):
"""
Assure that *arg* is a list, e.g. if string or None are given.
Parameters
----------
arg :
Argument to make list
Returns
-------
list
list(arg)
Examples
--------
>>> _tolist('string')
['string']
>>> _tolist([1,2,3])
[1, 2, 3]
>>> _tolist(None)
[None]
"""
if isinstance(arg, str):
return [arg]
try:
return list(arg)
except TypeError:
return [arg]
| 5,338,111
|
def calc_Qhs_sys(bpr, tsd):
"""
it calculates final loads
"""
# GET SYSTEMS EFFICIENCIES
# GET SYSTEMS EFFICIENCIES
energy_source = bpr.supply['source_hs']
scale_technology = bpr.supply['scale_hs']
efficiency_average_year = bpr.supply['eff_hs']
if scale_technology == "BUILDING":
if energy_source == "GRID":
tsd['E_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "NATURALGAS":
tsd['NG_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "OIL":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "COAL":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['SOLAR_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "WOOD":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "NONE":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
else:
raise Exception('check potential error in input database of LCA infrastructure / HEATING')
elif scale_technology == "DISTRICT":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
elif scale_technology == "NONE":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
else:
raise Exception('check potential error in input database of LCA infrastructure / HEATING')
return tsd
| 5,338,112
|
def wait_net_service(server, port, timeout=None):
""" Wait for network service to appear
@param timeout: in seconds, if None or 0 wait forever
@return: True of False, if timeout is None may return only True or
throw unhandled network exception
"""
import socket
s = socket.socket()
if timeout:
from time import time as now
# time module is needed to calc timeout shared between two exceptions
end = now() + timeout
while True:
try:
if timeout:
next_timeout = end - now()
if next_timeout < 0:
return False
else:
s.settimeout(next_timeout)
s.connect((server, port))
except (socket.timeout, socket.error):
pass
else:
s.close()
return True
| 5,338,113
|
def latex_nucleus(nucleus):
"""Creates a isotope symbol string for processing by LaTeX.
Parameters
----------
nucleus : str
Of the form `'<mass><sym>'`, where `'<mass>'` is the nuceleus'
mass number and `'<sym>'` is its chemical symbol. I.e. for
lead-207, `nucleus` would be `'207Pb'`.
Returns
-------
latex_nucleus : str
Of the form ``$^{<mass>}$<sym>`` i.e. given `'207Pb'`, the
return value would be ``$^{207}$Pb``
Raises
------
ValueError
If `nucleus` does not match the regex ``^[0-9]+[a-zA-Z]+$``
"""
if re.match(r'\d+[a-zA-Z]+', nucleus):
mass = re.search(r'\d+', nucleus).group()
sym = re.search(r'[a-zA-Z]+', nucleus).group()
return f'$^{{{mass}}}${sym}'
else:
raise ValueError(
f'{cols.R}`nucleus` is invalid. Should match the regex'
f' \\d+[a-zA-Z]+{cols.END}'
)
| 5,338,114
|
def ConvertToFloat(line, colnam_list):
"""
Convert some columns (in colnam_list) to float, and round by 3 decimal.
:param line: a dictionary from DictReader.
:param colnam_list: float columns
:return: a new dictionary
"""
for name in colnam_list:
line[name] = round(float(line[name]), 3)
return line
| 5,338,115
|
def rnn_step(x, prev_h, Wx, Wh, b):
"""
Run the forward pass for a single timestep of a vanilla RNN that uses a tanh
activation function.
The input data has dimension D, the hidden state has dimension H, and we use
a minibatch size of N.
Inputs:
- x: Input data for this timestep, of shape (N, D).
- prev_h: Hidden state from previous timestep, of shape (N, H)
- Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
- Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
- b: Biases of shape (H,)
Returns a tuple of:
- next_h: Next hidden state, of shape (N, H)
"""
next_h = np.tanh(x.dot(Wx) + prev_h.dot(Wh) + b)
return next_h
| 5,338,116
|
def 世界(链接, 简称=无): #py:World
"""
用来切换到一个指定的世界。
如果当前的世界和参数里所指定的世界不一致,那么引用此函数将使当前的世界切换为指定
的世界,其后的指令将会被忽略。
如果当前的世界和参数里所指定的世界一致,那么此函数将会被忽略,其后的指令将会被执
行。
如果指定的世界没有出现在世界菜单里,那么引用此函数将添加其至菜单。
参数:
链接:有两种选择——出现在世界菜单里的名称,或者在某个网站上的世界的链接。
简称:(可选参数)出现在世界菜单里的名称
例子:
>>> 世界("空白") # 默认包含的世界
>>> 世界("http://reeborg.ca/my_world") # 虚构的连接
# 如果成功,名称“http://reeborg.ca/my_world”将被加入世界菜单
>>> 世界("http://reeborg.ca/my_world", "Hello")
# 如果成功,名称“Hello”而不是链接将被加入世界菜单
"""
if 简称 is None:
RUR._World_(链接)
else:
RUR._World_(链接, 简称)
| 5,338,117
|
def transform_application_assigned_users(json_app_data: str) -> List[str]:
"""
Transform application users data for graph consumption
:param json_app_data: raw json application data
:return: individual user id
"""
users: List[str] = []
app_data = json.loads(json_app_data)
for user in app_data:
users.append(user["id"])
return users
| 5,338,118
|
def testapp(app):
"""Create Webtest app."""
return TestApp(app)
| 5,338,119
|
def to_y_channel(img):
"""Change to Y channel of YCbCr.
Args:
img (ndarray): Images with range [0, 255].
Returns:
(ndarray): Images with range [0, 255] (float type) without round.
"""
img = img.astype(np.float32) / 255.
if img.ndim == 3 and img.shape[2] == 3:
img = bgr2ycbcr(img, y_only=True)
img = img[..., None]
return img * 255.
| 5,338,120
|
def get_user_signatures(user_id):
"""
Given a user ID, returns the user's signatures.
:param user_id: The user's ID.
:type user_id: string
:return: list of signature data for this user.
:rtype: [dict]
"""
user = get_user_instance()
try:
user.load(user_id)
except DoesNotExist as err:
return {'errors': {'user_id': str(err)}}
signatures = user.get_user_signatures()
return [agr.to_dict() for agr in signatures]
| 5,338,121
|
def post_vehicle_action():
""" Add vehicle
:return:
"""
output = JsonOutput()
try:
if not request.is_json:
raise TypeError('Payload is not json')
payload = request.json
usecases.SetVehicleUsecase(db=db, vehicle=payload).execute()
output.add(status=200, response=json.dumps({'data': request.json}))
except Exception as error:
db.session.rollback()
app.logger.critical(str(error))
output.add(status=400, response=json.dumps({'error': str(error)}))
return output.show()
| 5,338,122
|
def get_day_suffix(day):
"""
Returns the suffix of the day, such as in 1st, 2nd, ...
"""
if day in (1, 21, 31):
return 'st'
elif day in (2, 12, 22):
return 'nd'
elif day in (3, 23):
return 'rd'
else:
return 'th'
| 5,338,123
|
def retain_images(image_dir,xml_file, annotation=''):
"""Deprecated"""
image_in_boxes_dict=return_image_in_boxes_dict(image_dir,xml_file, annotation)
return [img for img in image_in_boxes_dict if image_in_boxes_dict[img]]
| 5,338,124
|
def compute_owa(
metrics: List[Tuple[float, float]],
datasets: Dict[K, DatasetSplit],
metadata: List[MetaData],
) -> float:
"""
Computes the OWA metric from the M4 competition, using a weighted average of the relative
MASE and sMAPE metrics depending on the size of the datasets.
Args:
metrics: The forecast's metrics (MASE and sMAPE).
datasets: The datasets for which the forecasts have been generated, mapped from a hashable
so that computations do not have to be repeated.
metadata: Metadata available for the dataset.
Returns:
The OWA metric value.
"""
assert (
len(metrics) == len(datasets) == len(metadata)
), "The lengths of the provided lists must be equal."
dataset_weights = np.array([len(d.gluonts()) for d in datasets.values()])
dataset_weights = dataset_weights / dataset_weights.sum()
naive_mase = 0
naive_smape = 0
actual_mase = 0
actual_smape = 0
for metric, (dataset_key, split), meta, weight in zip(
metrics, datasets.items(), metadata, dataset_weights
):
cache_file = Path.home() / ".cache" / "naive2" / f"{dataset_key}"
if cache_file.exists():
naive_forecast = QuantileForecasts.load(cache_file)
else:
naive_forecast = _naive_2_forecasts(
split.gluonts(), meta.freq, cast(int, meta.prediction_length)
)
cache_file.parent.mkdir(parents=True, exist_ok=True)
naive_forecast.save(cache_file)
data = split.evaluation()
seasonal_error = naive_error(data.past, get_seasonality(meta.freq))
naive_mase += (
mase(naive_forecast.median, data.future, seasonal_error) * weight
)
naive_smape += smape(naive_forecast.median, data.future) * weight
actual_mase += metric[0] * weight
actual_smape += metric[1] * weight
return 0.5 * (actual_smape / naive_smape + actual_mase / naive_mase)
| 5,338,125
|
def do_tns(catalog):
"""Load TNS metadata."""
session = requests.Session()
task_str = catalog.get_current_task_str()
tns_url = 'https://wis-tns.weizmann.ac.il/'
search_url = ('https://wis-tns.weizmann.ac.il/'+
'search?&discovered_period_value=30&discovered_period_units=years'+
'&unclassified_at=0&classified_sne=0&include_frb=0&name=&name_like=0'+
'&isTNS_AT=all&public=all&ra=&decl=&radius=&coords_unit=arcsec&reporting_groupid%5B%5D=null'+
'&groupid%5B%5D=null&classifier_groupid%5B%5D=null&objtype%5B%5D=27'+
'&at_type%5B%5D=null&date_start%5Bdate%5D=&date_end%5Bdate%5D=&discovery_mag_min=&discovery_mag_max='+
'&internal_name=&discoverer=&classifier=&spectra_count=&redshift_min=&redshift_max=&hostname='+
'&ext_catid=&ra_range_min=&ra_range_max=&decl_range_min=&decl_range_max='+
'&discovery_instrument%5B%5D=null&classification_instrument%5B%5D=null&associated_groups%5B%5D=null'+
'&at_rep_remarks=&class_rep_remarks=&frb_repeat=all&frb_repeater_of_objid=&frb_measured_redshift=0'+
'&frb_dm_range_min=&frb_dm_range_max=&frb_rm_range_min=&frb_rm_range_max=&frb_snr_range_min='+
'&frb_snr_range_max=&frb_flux_range_min=&frb_flux_range_max=&num_page=500&display%5Bredshift%5D=1'+
'&display%5Bhostname%5D=1&display%5Bhost_redshift%5D=1&display%5Bsource_group_name%5D=1'+
'&display%5Bclassifying_source_group_name%5D=1&display%5Bdiscovering_instrument_name%5D=0'+
'&display%5Bclassifing_instrument_name%5D=0&display%5Bprograms_name%5D=0&display%5Binternal_name%5D=1'+
'&display%5BisTNS_AT%5D=0&display%5Bpublic%5D=1&display%5Bend_pop_period%5D=0'+
'&display%5Bspectra_count%5D=1&display%5Bdiscoverymag%5D=1&display%5Bdiscmagfilter%5D=1&display'+
'%5Bdiscoverydate%5D=1&display%5Bdiscoverer%5D=1&display%5Bremarks%5D=0&display%5Bsources%5D=0'+
'&display%5Bbibcode%5D=0&display%5Bext_catalogs%5D=0&format=csv')
csvtxt = catalog.load_url(search_url,
os.path.join(catalog.get_current_task_repo(),
'TNS', 'index.csv'),timeout=220)
data = read(csvtxt, format='csv')
for rrow in pbar(data, task_str):
row = dict((x, str(rrow[x])) for x in rrow.columns)
name = catalog.add_entry(row['Name'])
source = catalog.entries[name].add_source(name='Transient Name Server', url=tns_url)
if int(float(row['Discovery Mag/Flux'])) >= 8:
catalog.entries[name].add_quantity(CATACLYSMIC.MAX_VISUAL_APP_MAG, row['Discovery Mag/Flux'],
source)
catalog.entries[name].add_quantity(CATACLYSMIC.RA, row['RA'], source)
catalog.entries[name].add_quantity(CATACLYSMIC.DEC, row['DEC'], source)
catalog.entries[name].add_quantity(CATACLYSMIC.DISCOVER_DATE, row['Discovery Date (UT)'].replace('-', '/'), source)
catalog.entries[name].add_quantity(CATACLYSMIC.CLAIMED_TYPE, 'Cataclysmic_Variable', source)
catalog.journal_entries()
| 5,338,126
|
def test_local_to_global_coords(
local_box: BoundingBox, embedding_box: BoundingBox, expected_embedded_box: BoundingBox
) -> None:
"""
Testing func: local_to_global_coords returns BoundingBox with global coords correctly
"""
# Act
embedded_box = local_to_global_coords(local_box, embedding_box)
# Assert
assert embedded_box == expected_embedded_box
| 5,338,127
|
def parse(cell, config):
"""Extract connection info and result variable from SQL
Please don't add any more syntax requiring
special parsing.
Instead, add @arguments to SqlMagic.execute.
We're grandfathering the
connection string and `<<` operator in.
"""
result = {"connection": "", "sql": "", "result_var": None}
pieces = cell.split(None, 3)
if not pieces:
return result
result["connection"] = _connection_string(pieces[0], config)
if result["connection"]:
pieces.pop(0)
if len(pieces) > 1 and pieces[1] == "<<":
result["result_var"] = pieces.pop(0)
pieces.pop(0) # discard << operator
result["sql"] = (" ".join(pieces)).strip()
return result
| 5,338,128
|
def createResource(url, user, pWd, resourceName, resourceJson):
"""
create a new resource based on the provided JSON
returns rc=200 (valid) & other rc's from the put
resourceDef (json)
"""
# create a new resource
apiURL = url + "/access/1/catalog/resources/"
header = {"content-type": "application/json"}
print("\tcreating resource: " + resourceName)
newResourceResp = requests.post(
apiURL,
data=json.dumps(resourceJson),
headers=header,
auth=HTTPBasicAuth(user, pWd),
verify=False,
)
print("\trc=" + str(newResourceResp.status_code))
print("\tbody=" + str(newResourceResp.text))
return newResourceResp.status_code
| 5,338,129
|
def generate_invalid_sequence():
"""Generates an invalid sequence of length 10"""
return ''.join(np.random.choice(list(string.ascii_uppercase + string.digits), size=10))
| 5,338,130
|
def transform_bcs_profile(T, axis, Nc):
""" Translates the profile to body cs and then transforms it for rotation.
"""
from input_surface import circle
profile = circle(Nc, radius = 1, flag = 0)
Pb_new = np.zeros((Nc, 3), dtype = float)
ind_p = np.arange(0, 3*Nc, step = 3, dtype = int)
p_new = np.zeros(3*Nc, dtype = float)
p_new[ind_p] = profile[:, 0] + axis[0]
p_new[ind_p + 1] = profile[:, 1] + axis[1]
p_new[ind_p + 2] = axis[2]
P_new = np.dot(T.toarray(), p_new)
Pb_new[:, 0] = P_new[ind_p]
Pb_new[:, 1] = P_new[ind_p + 1]
Pb_new[:, 2] = P_new[ind_p + 2]
return Pb_new
| 5,338,131
|
def count_configuration(config, root=True, num_samples_per_dist=1):
"""Recursively count configuration."""
count = 1
if isinstance(config, dict):
for _, v in sorted(config.items()):
count *= count_configuration(
v, root=False, num_samples_per_dist=num_samples_per_dist)
elif callable(config):
assert num_samples_per_dist > 0, ('callable not allowed in config with '
'num_samples_per_dist < 1')
count *= num_samples_per_dist
elif isinstance(config, list):
if root:
count = ()
for c in config:
count += (count_configuration(
c, root=False, num_samples_per_dist=num_samples_per_dist),)
else:
count *= len(config)
return count
| 5,338,132
|
def test_file_getters(config_struct):
"""Test get_* functions with no installed extensions."""
installed_jsons = maninex.get_existing_jsons(config_struct.json_dir)
installed_folders = maninex.get_existing_folders(config_struct.ext_dir)
assert len(installed_jsons) == 0
assert len(installed_folders) == 0
for ext_ref in maninex.get_exts_from_config(config_struct.config):
assert not maninex.is_installed(config_struct,
ext_ref.idstr)
| 5,338,133
|
def transition(state_table):
"""Decorator used to set up methods which cause transitions between states.
The decorator is applied to methods of the context (state machine) class.
Invoking the method may cause a transition to another state. To define
what the transitions are, the nextStates method of the TransitionTable class
is used.
"""
stVarName = state_table.inst_state_name
def wrapper(func):
state_table._addEventHandler(func.__name__)
@wraps(func)
def objCall(self, *args, **kwargs):
state_var = getattr(self, stVarName)
state_var.setXition(func)
rtn = func(self, *args, **kwargs)
state_var.toNextState(self)
return rtn
objCall.wrapping = stVarName
return objCall
return wrapper
| 5,338,134
|
def isfinite(x: Union[ivy.Array, ivy.NativeArray], f: ivy.Framework = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Tests each element x_i of the input array x to determine if finite (i.e., not NaN and not equal to positive
or negative infinity).
:param x: Input array.
:type x: array
:param f: Machine learning framework. Inferred from inputs if None.
:type f: ml_framework, optional
:return: an array containing test results. An element out_i is True if x_i is finite and False otherwise.
The returned array must have a data type of bool.
"""
return _cur_framework(x, f=f).isfinite(x)
| 5,338,135
|
def form_overwrite_file(
PATH: Path, QUESTION: Optional[str] = None, DEFAULT_NO: bool = True
) -> bool:
"""Yes/no form to ask whether file should be overwritten if already existing."""
if QUESTION is None:
QUESTION = "Overwrite {PATH}?"
save = True
if PATH.is_file():
save = form_yes_or_no(QUESTION, DEFAULT_NO=DEFAULT_NO)
return save
| 5,338,136
|
def dist(integer):
"""
Return the distance from center.
"""
if integer == 1:
return 0
c = which_layer(integer)
rows = layer_rows(c)
l = len(rows[0])
mid = (l / 2) - 1
for r in rows:
if integer in r:
list_pos = r.index(integer)
return c + abs(mid - list_pos) - 1
| 5,338,137
|
def get_market_fundamental_by_ticker(date: str, market: str="KOSPI", prev=False) -> DataFrame:
"""특정 일자의 전종목 PER/PBR/배당수익률 조회
Args:
date (str ): 조회 일자 (YYMMDD)
market (str, optional): 조회 시장 (KOSPI/KOSDAQ/KONEX/ALL)
prev (bool, optional): 조회 일자가 휴일일 경우 이전 영업일 혹은 이후 영업일 선택
Returns:
DataFrame:
>> get_market_fundamental_by_ticker("20210104")
BPS PER PBR EPS DIV DPS
티커
095570 6802 4.660156 0.669922 982 6.550781 300
006840 62448 11.648438 0.399902 2168 2.970703 750
027410 15699 17.765625 0.320068 281 2.199219 110
282330 36022 15.062500 3.660156 8763 2.050781 2700
138930 25415 3.380859 0.219971 1647 6.468750 360
"""
if isinstance(date, datetime.datetime):
date = _datetime2string(date)
date = date.replace("-", "")
df = krx.get_market_fundamental_by_ticker(date, market)
holiday = (df[['BPS', 'PER', 'PBR', 'EPS', 'DIV', 'DPS']] == 0).all(axis=None)
if holiday:
target_date = get_nearest_business_day_in_a_week(date=date, prev=prev)
df = krx.get_market_fundamental_by_ticker(target_date, market)
# print(f"The date you entered {date} seems to be a holiday. PYKRX changes the date parameter to {target_date}.")
return df
| 5,338,138
|
def cite():
"""Print the reference"""
cite_data = """
=========== MLA ===========
Bosch, J., Marxer, R., Gomez, E., "Evaluation and Combination of
Pitch Estimation Methods for Melody Extraction in Symphonic
Classical Music", Journal of New Music Research (2016)
========== Bibtex ==========
@article{bosch2016evaluation,
title={Evaluation and combination of pitch estimation methods for melody extraction in symphonic classical music},
author={Bosch, Juan J and Marxer, Ricard and G{\'o}mez, Emilia},
journal={Journal of New Music Research},
volume={45},
number={2},
pages={101--117},
year={2016},
publisher={Taylor \\& Francis}
"""
print(cite_data)
| 5,338,139
|
def get_all(request):
""" Gets all tags in the db with counts of use """
tags = []
for tag in Tag.objects.all():
tag_data = {
'name': tag.name,
'count': tag.facebookimage_set.distinct().count()
}
if tag_data['count'] > 0:
tags.append(tag_data)
return JsonResponse({'data': tags})
| 5,338,140
|
def test_unsupported_operators():
"""Verify unsupported operators
"""
def unsupported_operator(op, func):
"""Verify error messages on unsupported operators
"""
# Indicate what we're testing in the nose printout
sys.stderr.write(f'\b\b\b\b\b\b: {op} ... ')
a = uut.FixedPoint(random.getrandbits(10))
b = uut.FixedPoint(random.getrandbits(10))
f = 1.0
d = 1
regex = re.escape(op.replace('%', '%%'))
errmsg = f"unsupported operand type\\(s\\) for {regex}: %r and %r"
with nose.tools.assert_raises_regex(TypeError, errmsg % ('FixedPoint', 'FixedPoint')):
func(a, b)
with nose.tools.assert_raises_regex(TypeError, errmsg % ('FixedPoint', 'int')):
func(a, d)
with nose.tools.assert_raises_regex(TypeError, errmsg % ('int', 'FixedPoint')):
func(d, b)
with nose.tools.assert_raises_regex(TypeError, errmsg % ('float', 'FixedPoint')):
func(f, b)
with nose.tools.assert_raises_regex(TypeError, errmsg % ('FixedPoint', 'float')):
func(a, f)
for args in [
('@', operator.matmul),
('@=', operator.imatmul),
('/', operator.truediv),
('/=', operator.itruediv),
('//', operator.floordiv),
('//=', operator.ifloordiv),
('%', operator.mod),
('%=', operator.imod),
('divmod()', divmod),
]:
yield unsupported_operator, *args
| 5,338,141
|
def quadratic_crop(x, bbox, alpha=1.0):
"""bbox is xmin, ymin, xmax, ymax"""
im_h, im_w = x.shape[:2]
bbox = np.array(bbox, dtype=np.float32)
bbox = np.clip(bbox, 0, max(im_h, im_w))
center = 0.5 * (bbox[0] + bbox[2]), 0.5 * (bbox[1] + bbox[3])
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
l = int(alpha * max(w, h))
l = max(l, 2)
required_padding = -1 * min(center[0] - l, center[1] - l, im_w -
(center[0] + l), im_h - (center[1] + l))
required_padding = int(np.ceil(required_padding))
if required_padding > 0:
padding = [
[required_padding, required_padding],
[required_padding, required_padding],
]
padding += [[0, 0]] * (len(x.shape) - 2)
x = np.pad(x, padding, "reflect")
center = center[0] + required_padding, center[1] + required_padding
xmin = int(center[0] - l / 2)
ymin = int(center[1] - l / 2)
return np.array(x[ymin:ymin + l, xmin:xmin + l, ...])
| 5,338,142
|
def cdlbreakaway(
client,
symbol,
timeframe="6m",
opencol="open",
highcol="high",
lowcol="low",
closecol="close",
):
"""This will return a dataframe of breakaway for the given symbol across
the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
opencol (string): column to use to calculate
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
val = t.CDLBREAKAWAY(
df[opencol].values.astype(float),
df[highcol].values.astype(float),
df[lowcol].values.astype(float),
df[closecol].values.astype(float),
)
return pd.DataFrame(
{
opencol: df[opencol].values,
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
"cdlbreakaway": val,
}
)
| 5,338,143
|
def depends_on(*args):
"""Caches a `Model` parameter based on its dependencies.
Example
-------
>>> @property
>>> @depends_on('x', 'y')
>>> def param(self):
>>> return self.x * self.y
Parameters
----------
args : list of str
List of parameters this parameter depends on.
"""
cache = {}
def _wrapper(fn):
def _fn(self):
key = tuple(getattr(self, arg) for arg in args)
if key not in cache:
cache[key] = fn(self)
return cache[key]
return _fn
return _wrapper
| 5,338,144
|
def l96(x, t, f):
""""This describes the derivative for the non-linear Lorenz 96 Model of arbitrary dimension n.
This will take the state vector x and return the equation for dxdt"""
# shift minus and plus indices
x_m_2 = np.concatenate([x[-2:], x[:-2]])
x_m_1 = np.concatenate([x[-1:], x[:-1]])
x_p_1 = np.append(x[1:], x[0])
dxdt = (x_p_1-x_m_2)*x_m_1 - x + f
return dxdt
| 5,338,145
|
def translation(im0, im1, filter_pcorr=0, odds=1, constraints=None,
reports=None):
"""
Return translation vector to register images.
It tells how to translate the im1 to get im0.
Args:
im0 (2D numpy array): The first (template) image
im1 (2D numpy array): The second (subject) image
filter_pcorr (int): Radius of the minimum spectrum filter
for translation detection, use the filter when detection fails.
Values > 3 are likely not useful.
constraints (dict or None): Specify preference of seeked values.
For more detailed documentation, refer to :func:`similarity`.
The only difference is that here, only keys ``tx`` and/or ``ty``
(i.e. both or any of them or none of them) are used.
odds (float): The greater the odds are, the higher is the preferrence
of the angle + 180 over the original angle. Odds of -1 are the same
as inifinity.
The value 1 is neutral, the converse of 2 is 1 / 2 etc.
Returns:
dict: Contains following keys: ``angle``, ``tvec`` (Y, X),
and ``success``.
"""
angle = 0
report_one = report_two = None
if reports is not None and reports.show("translation"):
report_one = reports.copy_empty()
report_two = reports.copy_empty()
# We estimate translation for the original image...
tvec, succ = _translation(im0, im1, filter_pcorr, constraints, report_one)
# ... and for the 180-degrees rotated image (the rotation estimation
# doesn't distinguish rotation of x vs x + 180deg).
tvec2, succ2 = _translation(im0, utils.rot180(im1), filter_pcorr,
constraints, report_two)
pick_rotated = False
if succ2 * odds > succ or odds == -1:
pick_rotated = True
if reports is not None and reports.show("translation"):
reports["t0-orig"] = report_one["amt-orig"]
reports["t0-postproc"] = report_one["amt-postproc"]
reports["t0-success"] = succ
reports["t0-tvec"] = tuple(tvec)
reports["t1-orig"] = report_two["amt-orig"]
reports["t1-postproc"] = report_two["amt-postproc"]
reports["t1-success"] = succ2
reports["t1-tvec"] = tuple(tvec2)
if reports is not None and reports.show("transformed"):
toapp = [
transform_img(utils.rot180(im1), tvec=tvec2, mode="wrap", order=3),
transform_img(im1, tvec=tvec, mode="wrap", order=3),
]
if pick_rotated:
toapp = toapp[::-1]
reports["after_tform"].extend(toapp)
if pick_rotated:
tvec = tvec2
succ = succ2
angle += 180
ret = dict(tvec=tvec, success=succ, angle=angle)
return ret
| 5,338,146
|
def CalculateConjointTriad(proteinsequence):
"""
Calculate the conjoint triad features from protein sequence.
Useage:
res = CalculateConjointTriad(protein)
Input: protein is a pure protein sequence.
Output is a dict form containing all 343 conjoint triad features.
"""
res = {}
proteinnum = _Str2Num(proteinsequence)
for i in range(8):
for j in range(8):
for k in range(8):
temp = str(i) + str(j) + str(k)
res[temp] = proteinnum.count(temp)
return res
| 5,338,147
|
def shutdown(forza: Forza, threadPool: ThreadPoolExecutor, listener: Listener):
"""shutdown/clean up resources
Args:
forza (Forza): forza
threadPool (ThreadPoolExecutor): thread pool
listener (Listener): keyboard listener
"""
forza.isRunning = False
threadPool.shutdown(wait=False)
listener.stop()
| 5,338,148
|
def _create(
*,
cls: Type[THparams],
data: Dict[str, JSON],
parsed_args: Dict[str, str],
cli_args: Optional[List[str]],
prefix: List[str],
argparse_name_registry: ArgparseNameRegistry,
argparsers: List[argparse.ArgumentParser],
) -> THparams:
"""Helper method to recursively create an instance of the :class:`~yahp.hparams.Hparams`.
Args:
data (Dict[str, JSON]):
A JSON dictionary of values to use to initialize the class.
parsed_args (Dict[str, str]):
Parsed args for this class.
cli_args (Optional[List[str]]):
A list of cli args. This list is modified in-place,
and all used arguments are removed from the list.
Should be None if no cli args are to be used.
prefix (List[str]):
The prefix corresponding to the subset of ``cli_args``
that should be used to instantiate this class.
argparse_name_registry (_ArgparseNameRegistry):
A registry to track CLI argument names.
argparsers (List[argparse.ArgumentParser]):
A list of :class:`~argparse.ArgumentParser` instances,
which is extended in-place.
Returns:
An instance of the class.
"""
kwargs: Dict[str, HparamsField] = {}
deferred_create_calls: Dict[str, Union[_DeferredCreateCall, # singleton field
List[_DeferredCreateCall], # list field
]] = {}
# keep track of missing required fields so we can build a nice error message
missing_required_fields: List[str] = []
cls.validate_keys(list(data.keys()), allow_missing_keys=True)
field_types = get_type_hints(cls)
for f in fields(cls):
if not f.init:
continue
prefix_with_fname = list(prefix) + [f.name]
try:
ftype = HparamsType(field_types[f.name])
full_name = ".".join(prefix_with_fname)
if full_name in parsed_args and parsed_args[full_name] != MISSING:
# use CLI args first
argparse_or_yaml_value = parsed_args[full_name]
elif f.name in data:
# then use YAML
argparse_or_yaml_value = data[f.name]
elif full_name.upper() in os.environ:
# then use environment variables
argparse_or_yaml_value = os.environ[full_name.upper()]
else:
# otherwise, set it as MISSING so the default will be used
argparse_or_yaml_value = MISSING
if not ftype.is_hparams_dataclass:
if argparse_or_yaml_value == MISSING:
if not is_field_required(f):
# if it's a primitive and there's a default value,
# then convert and use it.
# Sometimes primitives will not have correct default values
# (e.g. type is float, but the default is an int)
kwargs[f.name] = ftype.convert(get_default_value(f), full_name)
else:
kwargs[f.name] = ftype.convert(argparse_or_yaml_value, full_name)
else:
if f.name not in cls.hparams_registry:
# concrete, singleton hparams
# list of concrete hparams
# potentially none
if not ftype.is_list:
# concrete, singleton hparams
# potentially none
if ftype.is_optional and is_none_like(argparse_or_yaml_value, allow_list=ftype.is_list):
# none
kwargs[f.name] = None
else:
# concrete, singleton hparams
sub_yaml = data.get(f.name)
if sub_yaml is None:
sub_yaml = {}
if not isinstance(sub_yaml, dict):
raise ValueError(f"{full_name} must be a dict in the yaml")
deferred_create_calls[f.name] = _DeferredCreateCall(
hparams_cls=ftype.type,
data=sub_yaml,
prefix=prefix_with_fname,
parser_args=retrieve_args(cls=ftype.type,
prefix=prefix_with_fname,
argparse_name_registry=argparse_name_registry),
)
else:
# list of concrete hparams
# potentially none
# concrete lists not added to argparse, so just load the yaml
if ftype.is_optional and is_none_like(argparse_or_yaml_value, allow_list=ftype.is_list):
# none
kwargs[f.name] = None
else:
# list of concrete hparams
# concrete lists not added to argparse, so just load the yaml
sub_yaml = data.get(f.name)
if sub_yaml is None:
sub_yaml = []
if isinstance(sub_yaml, dict):
_emit_should_be_list_warning(full_name)
sub_yaml = [sub_yaml]
if not isinstance(sub_yaml, list):
raise TypeError(f"{full_name} must be a list in the yaml")
deferred_calls: List[_DeferredCreateCall] = []
for (i, sub_yaml_item) in enumerate(sub_yaml):
if sub_yaml_item is None:
sub_yaml_item = {}
if not isinstance(sub_yaml_item, dict):
raise TypeError(f"{full_name} must be a dict in the yaml")
deferred_calls.append(
_DeferredCreateCall(
hparams_cls=ftype.type,
data=sub_yaml_item,
prefix=prefix_with_fname + [str(i)],
parser_args=None,
))
deferred_create_calls[f.name] = deferred_calls
else:
# abstract, singleton hparams
# list of abstract hparams
# potentially none
if not ftype.is_list:
# abstract, singleton hparams
# potentially none
if ftype.is_optional and is_none_like(argparse_or_yaml_value, allow_list=ftype.is_list):
# none
kwargs[f.name] = None
else:
# abstract, singleton hparams
# look up type in the registry
# should only have one key in the dict
# argparse_or_yaml_value is a str if argparse, or a dict if yaml
if argparse_or_yaml_value == MISSING:
# use the hparams default
continue
if argparse_or_yaml_value is None:
raise ValueError(f"Field {full_name} is required and cannot be None.")
if isinstance(argparse_or_yaml_value, str):
key = argparse_or_yaml_value
else:
if not isinstance(argparse_or_yaml_value, dict):
raise ValueError(
f"Field {full_name} must be a dict with just one key if specified in the yaml")
try:
key, _ = extract_only_item_from_dict(argparse_or_yaml_value)
except ValueError as e:
raise ValueError(f"Field {full_name} " + e.args[0])
yaml_val = data.get(f.name)
if yaml_val is None:
yaml_val = {}
if not isinstance(yaml_val, dict):
raise ValueError(
f"Field {'.'.join(prefix_with_fname)} must be a dict if specified in the yaml")
yaml_val = yaml_val.get(key)
if yaml_val is None:
yaml_val = {}
if not isinstance(yaml_val, dict):
raise ValueError(
f"Field {'.'.join(prefix_with_fname + [key])} must be a dict if specified in the yaml"
)
deferred_create_calls[f.name] = _DeferredCreateCall(
hparams_cls=cls.hparams_registry[f.name][key],
prefix=prefix_with_fname + [key],
data=yaml_val,
parser_args=retrieve_args(cls=cls.hparams_registry[f.name][key],
prefix=prefix_with_fname + [key],
argparse_name_registry=argparse_name_registry),
)
else:
# list of abstract hparams
# potentially none
if ftype.is_optional and is_none_like(argparse_or_yaml_value, allow_list=ftype.is_list):
# none
kwargs[f.name] = None
else:
# list of abstract hparams
# argparse_or_yaml_value is a List[str] if argparse, or a List[Dict[str, Hparams]] if yaml
if argparse_or_yaml_value == MISSING:
# use the hparams default
continue
# First get the keys
# Argparse has precidence. If there are keys defined in argparse, use only those
# These keys will determine what is loaded
if argparse_or_yaml_value is None:
raise ValueError(f"Field {full_name} is required and cannot be None.")
if isinstance(argparse_or_yaml_value, dict):
_emit_should_be_list_warning(full_name)
argparse_or_yaml_value = [argparse_or_yaml_value]
if not isinstance(argparse_or_yaml_value, list):
raise ValueError(f"Field {full_name} should be a list")
keys: List[str] = []
for item in argparse_or_yaml_value:
if isinstance(item, str):
keys.append(item)
else:
if not isinstance(item, dict):
raise ValueError(f"Field {full_name} should be a list of dicts in the yaml")
key, _ = extract_only_item_from_dict(item)
keys.append(key)
key = argparse_or_yaml_value
# Now, load the values for these keys
yaml_val = data.get(f.name)
if yaml_val is None:
yaml_val = []
if isinstance(yaml_val, dict):
# already emitted the warning, no need to do it again
yaml_val = [yaml_val]
if not isinstance(yaml_val, list):
raise ValueError(
f"Field {'.'.join(prefix_with_fname)} must be a list if specified in the yaml")
# Convert the yaml list to a dict
yaml_dict: Dict[str, Dict[str, JSON]] = {}
for i, yaml_val_entry in enumerate(yaml_val):
if not isinstance(yaml_val_entry, dict):
raise ValueError(
f"Field {'.'.join(list(prefix_with_fname) + [str(i)])} must be a dict if specified in the yaml"
)
k, v = extract_only_item_from_dict(yaml_val_entry)
if not isinstance(v, dict):
raise ValueError(
f"Field {'.'.join(list(prefix_with_fname) + [k])} must be a dict if specified in the yaml"
)
yaml_dict[k] = v
deferred_calls: List[_DeferredCreateCall] = []
for key in keys:
# Use the order of keys
key_yaml = yaml_dict.get(key)
if key_yaml is None:
key_yaml = {}
if not isinstance(key_yaml, dict):
raise ValueError(f"Field {'.'.join(prefix_with_fname + [key])}"
"must be a dict if specified in the yaml")
deferred_calls.append(
_DeferredCreateCall(
hparams_cls=cls.hparams_registry[f.name][key],
prefix=prefix_with_fname + [key],
data=key_yaml,
parser_args=retrieve_args(
cls=cls.hparams_registry[f.name][key],
prefix=prefix_with_fname + [key],
argparse_name_registry=argparse_name_registry,
),
))
deferred_create_calls[f.name] = deferred_calls
except _MissingRequiredFieldException as e:
missing_required_fields.extend(e.args)
# continue processing the other fields and gather everything together
if cli_args is None:
for fname, create_calls in deferred_create_calls.items():
sub_hparams = [
_create(
cls=deferred_call.hparams_cls,
data=deferred_call.data,
parsed_args={},
cli_args=None,
prefix=deferred_call.prefix,
argparse_name_registry=argparse_name_registry,
argparsers=argparsers,
) for deferred_call in ensure_tuple(create_calls)
]
if isinstance(create_calls, list):
kwargs[fname] = sub_hparams
else:
kwargs[fname] = sub_hparams[0]
else:
all_args: List[ParserArgument] = []
for fname, create_calls in deferred_create_calls.items():
for create_call in ensure_tuple(create_calls):
if create_call.parser_args is not None:
all_args.extend(create_call.parser_args)
argparse_name_registry.assign_shortnames()
for fname, create_calls in deferred_create_calls.items():
# TODO parse args from
sub_hparams: List[hp.Hparams] = []
for create_call in ensure_tuple(create_calls):
prefix = create_call.prefix
if create_call.parser_args is None:
parsed_arg_dict = {}
else:
parser = argparse.ArgumentParser(add_help=False)
argparsers.append(parser)
group = parser.add_argument_group(title=".".join(prefix),
description=create_call.hparams_cls.__name__)
for args in create_call.parser_args:
for arg in ensure_tuple(args):
arg.add_to_argparse(group)
parsed_arg_namespace, cli_args[:] = parser.parse_known_args(cli_args)
parsed_arg_dict = vars(parsed_arg_namespace)
sub_hparams.append(
_create(
cls=create_call.hparams_cls,
data=create_call.data,
parsed_args=parsed_arg_dict,
cli_args=cli_args,
prefix=prefix,
argparse_name_registry=argparse_name_registry,
argparsers=argparsers,
))
if isinstance(create_calls, list):
kwargs[fname] = sub_hparams
else:
kwargs[fname] = sub_hparams[0]
for f in fields(cls):
if not f.init:
continue
prefix_with_fname = ".".join(list(prefix) + [f.name])
if f.name not in kwargs:
if f.default == MISSING and f.default_factory == MISSING:
missing_required_fields.append(prefix_with_fname)
# else:
# warnings.warn(f"DefaultValueWarning: Using default value for {prefix_with_fname}. "
# "Using default values is not recommended as they may change between versions.")
if len(missing_required_fields) > 0:
# if there are any missing fields from this class, or optional but partially-filled-in subclasses,
# then propegate back the missing fields
raise _MissingRequiredFieldException(*missing_required_fields)
return cls(**kwargs)
| 5,338,149
|
def extract_url(url):
"""Creates a short version of the URL to work with. Also returns None if its not a valid adress.
Args:
url (str): The long version of the URL to shorten
Returns:
str: The short version of the URL
"""
if url.find("www.amazon.de") != -1:
index = url.find("/dp/")
if index != -1:
index2 = index + 14
url = "https://www.amazon.de" + url[index:index2]
else:
index = url.find("/gp/")
if index != -1:
index2 = index + 22
url = "https://www.amazon.de" + url[index:index2]
else:
url = None
else:
url = None
return url
| 5,338,150
|
def match_name(own: str, other: str) -> bool:
"""
compares 2 medic names (respects missing middle names, or abbrev. name parts)
Args:
own: the first name
other: the last name
Returns: True if both names match
"""
# the simplest case, both name match completely
if own is None or other is None:
return True
own = unicodedata.normalize('NFKD', _remove_umlaut(own)).encode('ASCII', 'ignore').decode("utf-8").lower()
other = unicodedata.normalize('NFKD', _remove_umlaut(other)).encode('ASCII', 'ignore').decode("utf-8").lower()
if own == other:
return True
hn_other = parse_name(other)
hn_own = parse_name(own)
def _remove_surname_titles(surnames: List[str]) -> List[str]:
def _remove(s: str) -> str:
for t in _surname_titles:
s = s.replace(t, "")
return s
return list(map(_remove, surnames))
if hn_own is None or hn_other is None:
return False
# remove surname titles like "von" from surnames
hn_other.last_list = _remove_surname_titles(hn_other.last_list)
hn_other.last = " ".join(hn_other.last_list)
hn_own.last_list = _remove_surname_titles(hn_own.last_list)
hn_own.last = " ".join(hn_own.last_list)
# if the last names doesnt match, we skip here
own_lasts = " ".join([on.lower() for on in hn_own.last_list])
other_lasts = " ".join([on.lower() for on in hn_other.last_list])
# compound surnames
if "-" in own_lasts or "-" in other_lasts:
own_lasts_splitted = own_lasts.split("-")
other_lasts_splitted = other_lasts.split("-")
matches = 0
for o in own_lasts_splitted:
for ot in other_lasts_splitted:
if o == ot or distance(o, ot) <= 1 and (len(o) >= 5 or len(ot) >= 5):
matches += 1
for o in reversed(own_lasts_splitted):
for ot in other_lasts_splitted:
if o == ot or distance(o, ot) <= 1 and (len(o) >= 5 or len(ot) >= 5):
matches += 1
if matches < 2:
return False
elif own_lasts[0] != other_lasts[0] or (own_lasts != other_lasts and distance(own_lasts, other_lasts) > 1):
return False
def _match_name_list(name: str, other: List[str]):
if name in other:
# full name match
return True
elif name.endswith(".") and name in ["{}.".format(f[0:len(name) - 1]) for f in other]:
# A. name match
return True
elif len(name) == 1 and name in [f[0] for f in other]:
# A name match
return True
return False
def _compare_names(a: List[str], b: List[str]) -> bool:
m_a = list(map(lambda n: _match_name_list(n, b), a))
m_b = list(map(lambda n: _match_name_list(n, a), b))
return m_a.count(True) >= m_a.count(False) or m_b.count(
True) >= m_b.count(False)
# check if the firstnames matches (if one side has no firstname we assume a match
first_name_matches = True if (hn_own.first == "" or hn_other.first == "") else _compare_names(hn_own.first_list,
hn_other.first_list)
own_first_middles = hn_own.first + hn_own.middle
other_first_middles = hn_other.first + hn_other.middle
# check if the firstnames+middlename matches (if one side has no firstname we assume a match
first_name_matches_fuzzy = own_first_middles.lower() == other_first_middles.lower() or (
own_first_middles.startswith(other_first_middles) or other_first_middles.startswith(own_first_middles))
if first_name_matches is False or first_name_matches_fuzzy is False:
# if the initials dont match, dont match
if (len(hn_own.first) > 0 and len(hn_own.first) > 0) and hn_own.first[0] != hn_other.first[0]:
return False
# if the names are longer than 5 and start with the same letter we allow tiny typos
l_distance = distance(hn_own.first, hn_other.first)
if l_distance < 2 and (len(hn_other.first) >= 5 or len(hn_own.first) >= 5):
first_name_matches = True
# if none has middle name its a match
if len(hn_own.middle_list) == 0 and len(hn_other.middle_list) == 0:
return first_name_matches or first_name_matches_fuzzy
# if only one side has a middle name its a match
if len(hn_own.middle_list) == 0 and len(hn_other.middle_list) > 0 or len(hn_own.middle_list) > 0 and len(
hn_other.middle_list) == 0:
return first_name_matches or first_name_matches_fuzzy
return _compare_names(hn_own.middle_list, hn_other.middle_list)
| 5,338,151
|
def price_setting():
""" Sets prices """
purchasing_price = float(input("enter purchasing price: "))
new_supplier = str(input("First time user(Y/N)?: ")).lower()
if new_supplier not in ['n', 'y']:
return True, {"errorMsg": f"{new_supplier} not a valid response"}, None
if new_supplier == 'y':
days_since_reg = int(input("Enter the days since you registered?: "))
if days_since_reg < 60:
list_price = purchasing_price*2
discount_percent = 0
profit_percent = ((list_price - purchasing_price)
* 100)/purchasing_price
return False, discount_percent, profit_percent
product_reg_days = int(
input("Enter the days you had registered this product?: "))
if product_reg_days < 0:
return True, {
"errorMsg": f"{product_reg_days} is not an acceptable value"
}, None
if product_reg_days > 30:
list_price = purchasing_price*2
discount_percent, profit_percent = decide_discount(
purchasing_price*2, purchasing_price)
return False, discount_percent, profit_percent
list_price = purchasing_price*2
discount_percent = 0
profit_percent = (list_price - purchasing_price)*100/purchasing_price
return False, discount_percent, profit_percent
discount_percent, profit_percent = decide_discount(
purchasing_price*2, purchasing_price)
return False, discount_percent, profit_percent
| 5,338,152
|
def d4_grid():
"""Test functionality of routing when D4 is specified.
The elevation field in this test looks like::
1 2 3 4 5 6 7
1 2 3 0 5 0 7
1 2 3 4 0 0 7
1 2 3 0 5 6 7
1 2 0 0 0 6 7
1 2 3 0 5 6 7
1 2 3 4 5 6 7
"""
mg1 = RasterModelGrid(7, 7, 1.)
mg2 = RasterModelGrid(7, 7, 1.)
z = mg1.node_x.copy() + 1.
lake_nodes = np.array([10, 16, 17, 18, 24, 32, 33, 38, 40])
z[lake_nodes] = 0.
mg1.add_field("node", "topographic__elevation", z, units="-")
mg2.add_field("node", "topographic__elevation", z, units="-")
frD8 = FlowRouter(mg1, method="D8")
frD4 = FlowRouter(mg2, method="D4")
lfD8 = DepressionFinderAndRouter(mg1, routing="D8")
lfD4 = DepressionFinderAndRouter(mg2, routing="D4")
class DansGrid(object):
pass
d4_grid = DansGrid()
d4_grid.mg1 = mg1
d4_grid.mg2 = mg2
d4_grid.z = z
d4_grid.lake_nodes = lake_nodes
d4_grid.frD8 = frD8
d4_grid.frD4 = frD4
d4_grid.lfD8 = lfD8
d4_grid.lfD4 = lfD4
return d4_grid
| 5,338,153
|
def pass_complex_ins(mqc):
"""
The number of PASS complex insertions.
Source: count_variants.py (bcftools view)
"""
k = inspect.currentframe().f_code.co_name
try:
d = next(iter(mqc["multiqc_npm_count_variants"].values()))
v = d["pass_complex_ins"]
v = int(v)
except KeyError:
v = "NA"
return k, v
| 5,338,154
|
def get_jwt():
"""
Get authorization token and validate its signature against the public key
from /.well-known/jwks endpoint
"""
expected_errors = {
KeyError: WRONG_PAYLOAD_STRUCTURE,
AssertionError: JWK_HOST_MISSING,
InvalidSignatureError: WRONG_KEY,
DecodeError: WRONG_JWT_STRUCTURE,
InvalidAudienceError: WRONG_AUDIENCE,
TypeError: KID_NOT_FOUND
}
token = get_auth_token()
try:
jwks_payload = jwt.decode(token, options={'verify_signature': False})
assert 'jwks_host' in jwks_payload
jwks_host = jwks_payload.get('jwks_host')
key = get_public_key(jwks_host, token)
aud = request.url_root
payload = jwt.decode(
token, key=key, algorithms=['RS256'], audience=[aud.rstrip('/')]
)
set_ctr_entities_limit(payload)
return payload['key']
except tuple(expected_errors) as error:
message = expected_errors[error.__class__]
raise AuthorizationError(message)
| 5,338,155
|
def devlocation()->str:
"""
:return: 'local' or 'github
"""
return os.getenv('DEVLOCATION') or 'local'
| 5,338,156
|
def test_base_widget_1():
""" Assert widget is None """
from pypom_form.widgets import BaseWidget
widget = BaseWidget()
assert widget.field is None
| 5,338,157
|
def wrap_refresh(change):
"""Query the databroker with user supplied text."""
try:
query = eval("dict({})".format(db_search_widget.value))
headers = db(**query)
except NameError:
headers = []
db_search_widget.value += " -- is an invalid search"
scan_id_dict = get_scan_id_dict(headers)
select_scan_id_widget.options = scan_id_dict
| 5,338,158
|
def convert_categorical(df, col_old, conversion, col_new=None):
"""Convet categories"""
if col_new is None:
col_new = col_old
orig_values = df[col_old].values
good_rows = np.isin(orig_values, list(conversion))
df = df.iloc[good_rows]
orig_values = df[col_old].values
cat_values = np.zeros(len(df), dtype=int)
for src, dest in conversion.items():
cat_values[orig_values == src] = dest
df.loc[:, col_new] = cat_values
return df
| 5,338,159
|
def _get_pipeline_configs(force=False):
"""
Connects to Shotgun and retrieves information about all projects
and all pipeline configurations in Shotgun. Adds this to the disk cache.
If a cache already exists, this is used instead of talking to Shotgun.
To force a re-cache, set the force flag to True.
Returns a complex data structure with the following fields
local_storages:
- id
- code
- windows_path
- mac_path
- linux_path
pipeline_configurations:
- id
- code
- windows_path
- linux_path
- mac_path
- project
- project.Project.tank_name
:param force: set this to true to force a cache refresh
:returns: dictionary with keys local_storages and pipeline_configurations.
"""
CACHE_KEY = "paths"
if force == False:
# try to load cache first
# if that doesn't work, fall back on shotgun
cache = _load_lookup_cache()
if cache and cache.get(CACHE_KEY):
# cache hit!
return cache.get(CACHE_KEY)
# ok, so either we are force recomputing the cache or the cache wasn't there
sg = shotgun.get_sg_connection()
# get all local storages for this site
local_storages = sg.find("LocalStorage",
[],
["id", "code", "windows_path", "mac_path", "linux_path"])
# get all pipeline configurations (and their associated projects) for this site
pipeline_configs = sg.find("PipelineConfiguration",
[["project.Project.tank_name", "is_not", None]],
["id",
"code",
"windows_path",
"linux_path",
"mac_path",
"project",
"project.Project.tank_name"])
# cache this data
data = {"local_storages": local_storages, "pipeline_configurations": pipeline_configs}
_add_to_lookup_cache(CACHE_KEY, data)
return data
| 5,338,160
|
def downgrade():
"""Migrations for the downgrade."""
op.execute("UPDATE db_dbnode SET type = 'code.Code.' WHERE type = 'data.code.Code.';")
| 5,338,161
|
def parse_cmdline(argv):
"""
Returns the parsed argument list and return code.
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
# initialize the parser object:
parser = argparse.ArgumentParser(description='Make combine output from multiple files, with a common column '
'name, printing only data from common column values. ')
parser.add_argument("-d", "--delimiter", help="The delimiter separating the file names in each row of the"
"compare_file_list. The default delimiter is '{}'.".format(DEF_DELIM),
default=DEF_DELIM)
parser.add_argument("-f", "--compare_file_list", help="The location of the file that lists the files to be "
"combined. Each row should contain a list of files to be "
"combined by aligning on the col_name. "
"The default file name is {}, located in the "
"directory where the program as run.".format(DEF_CMP_FILE),
default=DEF_CMP_FILE, type=file_rows_to_list)
parser.add_argument("-l", "--output_location", help="The location (directory) for output files. The default is the "
"directory from which the program was called.",
default=None)
parser.add_argument("-n", "--col_name", help="The common column name in the files used as the key to combine "
"files. The default file name is {}.".format(DEF_ALIGN_COL_NAME),
default=DEF_ALIGN_COL_NAME)
parser.add_argument("-o", "--out_name", help="The output file name. The default is {}.".format(DEF_OUT_FILE),
default=DEF_OUT_FILE)
parser.add_argument("-s", "--sep_out", help="A flag to specify a separate output files should be created for "
"the aligned files from each row of the compare_file_list. If this "
"is specified, the out_name will be used as a suffix. The base name "
"will be based on the common part of the names of the files to be "
"combined. If there is no common string, the output files will be "
"numbered based on their row number in the compare_file_list. Separate "
"output files will also be created if the column names from files on "
"different lines to not match.",
action='store_true')
args = None
try:
args = parser.parse_args(argv)
except IOError as e:
warning("Problems reading file:", e)
parser.print_help()
return args, IO_ERROR
except (KeyError, SystemExit) as e:
if hasattr(e, 'code') and e.code == 0:
return args, GOOD_RET
warning(e)
parser.print_help()
return args, INPUT_ERROR
return args, GOOD_RET
| 5,338,162
|
def info(parentwindow, message, buttons, *,
title=None, defaultbutton=None):
"""Display an information message."""
return _message('info', parentwindow, message, title, buttons,
defaultbutton)
| 5,338,163
|
def add_user(args):
"""
Process arguments and ask user for other needed parameters in order
to add info to DB
:param args: returned object from argparse.parse_args
:return: exit code (0 on success, 1 on failure)
"""
logger = fsurfer.log.get_logger()
if args.username is None:
username = get_input("Username")
else:
username = args.username
username = username.strip()
password = get_input("password", echo=False)
if args.first_name is None:
first_name = get_input("First name")
else:
first_name = args.first_name
if args.last_name is None:
last_name = get_input("Last name")
else:
last_name = args.last_name
if args.email is None:
email = get_input("Email")
else:
email = args.email
if args.phone is None:
phone = get_input("Phone")
else:
phone = args.phone
if args.institution is None:
institution = get_input("Institution")
else:
institution = args.institution
salt = hashlib.sha256(str(time.time())).hexdigest()
password = hashlib.sha256(salt + password).hexdigest()
user_insert = "INSERT INTO freesurfer_interface.users(username," \
" first_name," \
" last_name," \
" email," \
" institution," \
" phone," \
" password," \
" salt) " \
"VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
try:
conn = fsurfer.helpers.get_db_client()
with conn.cursor() as cursor:
logger.info("Adding {0} to database".format(username))
cursor.execute(user_insert, (username,
first_name,
last_name,
email,
institution,
phone,
password,
salt))
if cursor.rowcount != 1:
sys.stderr.write("{0}".format(cursor.statusmessage))
logger.error("Encountered error while adding" +
"user {0}: {1}".format(username, cursor.statusmessage))
return 1
logger.info("User {0} added".format(username))
conn.commit()
conn.close()
return 0
except Exception as e:
sys.stderr.write("Got exception: {0}".format(e))
logger.exception("Got exception: {0}".format(e))
return 1
| 5,338,164
|
def wait_for_active_cluster_commands(active_command):
"""
Wait until Cloudera Manager finishes running cluster active_command
:param active_command: Descriptive of what should be running - this just waits if any task is detected running
:return:
"""
view = 'summary'
wait_status = '[*'
done = '0'
while done == '0':
sys.stdout.write('\r%s - Waiting: %s' % (active_command, wait_status))
try:
api_response = cloudera_manager_api.list_active_commands(view=view)
if not api_response.items:
if debug == 'True':
pprint(api_response)
done = '1'
sys.stdout.write(']\n')
break
else:
sys.stdout.flush()
time.sleep(10)
wait_status = wait_status + '*'
except ApiException as e:
print('Exception waiting for active commands: {}'.format(e))
| 5,338,165
|
def gru(xs, lengths, init_hidden, params):
"""RNN with GRU. Based on https://github.com/google/jax/pull/2298"""
def apply_fun_single(state, inputs):
i, x = inputs
inp_update = jnp.matmul(x, params["update_in"])
hidden_update = jnp.dot(state, params["update_weight"])
update_gate = nn.sigmoid(inp_update + hidden_update)
reset_gate = nn.sigmoid(
jnp.matmul(x, params["reset_in"]) + jnp.dot(state, params["reset_weight"])
)
output_gate = update_gate * state + (1 - update_gate) * jnp.tanh(
jnp.matmul(x, params["out_in"])
+ jnp.dot(reset_gate * state, params["out_weight"])
)
hidden = jnp.where((i < lengths)[:, None], output_gate, jnp.zeros_like(state))
return hidden, hidden
init_hidden = jnp.broadcast_to(init_hidden, (xs.shape[1], init_hidden.shape[1]))
return jax.lax.scan(apply_fun_single, init_hidden, (jnp.arange(xs.shape[0]), xs))
| 5,338,166
|
def get_strings_in_flattened_sequence(p):
"""
Traverses nested sequence and for each element, returns first string encountered
"""
if p is None:
return []
#
# string is returned as list of single string
#
if isinstance(p, path_str_type):
return [p]
#
# Get all strings flattened into list
#
return get_strings_in_flattened_sequence_aux(p)
| 5,338,167
|
def convert():
"""Convert conda packages to other platforms."""
os_name = {
'darwin': 'osx',
'win32': 'win',
'linux': 'linux'
}[sys.platform]
dirname = '{}-{}'.format(os_name, platform.architecture()[0][:2])
files = glob.glob('build/{}/*.tar.bz2'.format(dirname))
for filename in files:
convert_cmd = "conda convert {} -p all -o build/".format(filename)
print(convert_cmd)
check_call(shlex.split(convert_cmd))
| 5,338,168
|
def kansuji2arabic(string, sep=False):
"""漢数字をアラビア数字に変換"""
def _transvalue(sj, re_obj=re_kunit, transdic=TRANSUNIT):
unit = 1
result = 0
for piece in reversed(re_obj.findall(sj)):
if piece in transdic:
if unit > 1:
result += unit
unit = transdic[piece]
else:
val = int(piece) if piece.isdecimal() else _transvalue(piece)
result += val * unit
unit = 1
if unit > 1:
result += unit
return result
transuji = string.translate(tt_ksuji)
for suji in sorted(set(re_suji.findall(transuji)), key=lambda s: len(s),
reverse=True):
if not suji.isdecimal():
arabic = _transvalue(suji, re_manshin, TRANSMANS)
arabic = '{:,}'.format(arabic) if sep else str(arabic)
transuji = transuji.replace(suji, arabic)
return transuji
| 5,338,169
|
def get_image(member_status=None, most_recent=None, name=None, owner=None, properties=None, region=None, size_max=None, size_min=None, sort_direction=None, sort_key=None, tag=None, visibility=None):
"""
Use this data source to get the ID of an available OpenStack image.
"""
__args__ = dict()
__args__['memberStatus'] = member_status
__args__['mostRecent'] = most_recent
__args__['name'] = name
__args__['owner'] = owner
__args__['properties'] = properties
__args__['region'] = region
__args__['sizeMax'] = size_max
__args__['sizeMin'] = size_min
__args__['sortDirection'] = sort_direction
__args__['sortKey'] = sort_key
__args__['tag'] = tag
__args__['visibility'] = visibility
__ret__ = pulumi.runtime.invoke('openstack:images/getImage:getImage', __args__)
return GetImageResult(
checksum=__ret__.get('checksum'),
container_format=__ret__.get('containerFormat'),
disk_format=__ret__.get('diskFormat'),
file=__ret__.get('file'),
metadata=__ret__.get('metadata'),
min_disk_gb=__ret__.get('minDiskGb'),
min_ram_mb=__ret__.get('minRamMb'),
protected=__ret__.get('protected'),
region=__ret__.get('region'),
schema=__ret__.get('schema'),
size_bytes=__ret__.get('sizeBytes'),
updated_at=__ret__.get('updatedAt'),
id=__ret__.get('id'))
| 5,338,170
|
def sort_course_dicts(courses):
""" Sorts course dictionaries
@courses: iterable object containing dictionaries representing courses.
Each course must have a course_number and abbreviation key
@return: returns a new list containing the given courses, in naturally sorted order.
"""
detailed_courses = [{
"course": course,
"numeric_course_number": int(extract_numeric_component(course["course_number"])),
"prefix": extract_prefix(course["course_number"]),
"suffix": extract_suffix(course["course_number"])
} for course in courses]
detailed_courses.sort(key=lambda course: course["suffix"])
detailed_courses.sort(key=lambda course: course["prefix"])
detailed_courses.sort(key=lambda course: course["numeric_course_number"])
detailed_courses.sort(key=lambda course: course["course"]["abbreviation"])
return [course_detail["course"] for course_detail in detailed_courses]
| 5,338,171
|
def read_folder(filepath):
"""
Reads multiple image files from a folder and returns the resulting stack.
To find the images in the right order, a regex is used which will search
for files with the following pattern:
[prefix]_p[Nr][suffix]. The start number doesn't need to be 0.
The files are sorted with a natural sort, meaning that files like
0002, 1, 004, 3 will be sorted as 1, 0002, 3, 004.
The follwing regex is used to find the measurements:
".*_+p[0-9]+_?.*\.(tif{1,2}|jpe*g|nii|h5|png)"
Supported file formats for the image file equal the supported formats of
SLIX.imread.
Args:
filepath: Path to folder
Returns:
numpy.array: Image with shape [x, y, z] where [x, y] is the size
of a single image and z specifies the number of measurements
"""
files_in_folder = glob.glob(filepath + '/*')
matching_files = []
for file in files_in_folder:
if re.match(_fileregex, file) is not None:
matching_files.append(file)
matching_files.sort(key=__natural_sort_filenames_key)
image = None
# Check if files contain the needed regex for our measurements
for file in matching_files:
measurement_image = imread(file)
if image is None:
image = measurement_image
elif len(image.shape) == 2:
image = numpy.stack((image, measurement_image), axis=-1)
else:
image = numpy.concatenate((image,
measurement_image
[:, :, numpy.newaxis]), axis=-1)
return image
| 5,338,172
|
def zenodo_fetch_resource_helper(zenodo_project, resource_id, is_record=False, is_file=False):
"""
Takes a Zenodo deposition/record and builds a Zenodo PresQT resource.
Parameters
----------
zenodo_project : dict
The requested Zenodo project.
is_record : boolean
Flag for if the resource is a published record
is_file : boolean
Flag for if the resource is a file
Returns
-------
PresQT Zenodo Resource (dict).
"""
identifier = None
if is_file is False:
if is_record is True:
kind_name = zenodo_project['metadata']['resource_type']['type']
date_modified = zenodo_project['updated']
identifier = zenodo_project['doi']
else:
kind_name = zenodo_project['metadata']['upload_type']
date_modified = zenodo_project['modified']
kind = 'container'
title = zenodo_project['metadata']['title']
hashes = {}
extra = {}
for key, value in zenodo_project['metadata'].items():
if key != 'doi':
extra[key] = value
from presqt.targets.zenodo.utilities.helpers.get_zenodo_children import zenodo_get_children
children = zenodo_get_children(zenodo_project, resource_id, is_record)
else:
kind = 'item'
kind_name = 'file'
title = zenodo_project['key']
date_modified = zenodo_project['updated']
hashes = {'md5': zenodo_project['checksum'].partition(':')[2]}
extra = {}
children = []
return {
"kind": kind,
"kind_name": kind_name,
"id": resource_id,
"identifier": identifier,
"title": title,
"date_created": zenodo_project['created'],
"date_modified": date_modified,
"hashes": hashes,
"extra": extra,
"children": children}
| 5,338,173
|
def add_role_menu(request):
"""菜单授权"""
menu_nums = request.POST.get("node_id_json")
role_id = request.POST.get("role_id")
role_obj = auth_db.Role.objects.get(id=role_id)
menu_nums = json.loads(menu_nums)
role_obj.menu.clear()
for i in menu_nums:
menu_obj = auth_db.Menus.objects.get(menu_num=i)
role_obj.menu.add(menu_obj)
data = "授权已更新,重新登录即生效!"
return HttpResponse(data)
| 5,338,174
|
def repo_path():
"""
little function to help resolve location of doctest_files back in repository
:return: the absolute path to the root of the repository.
"""
return os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
| 5,338,175
|
def setup():
"""Setup a brand new pilot config for a new index.
NOTE: Paths with tilde '~' DO NOT WORK. These cause problems for resolving
paths that look like /~/foo/bar, which sometimes translate as ~/foo/bar
instead. These are disabled to prevent that from happening.
"""
PROJECT_QUERIES = {
'projects_endpoint': {
'prompt': 'Set a Globus UUID where your data should be stored.',
'default': '',
'help': 'visit "https://app.globus.org/file-manager/collections" '
'to find Globus endpoint to store your data.',
'validation': [input_validation.validate_is_uuid,
input_validation.validate_is_globus_endpoint],
},
'projects_base_path': {
'prompt': 'Pick a base path.',
'default': '/',
'help': 'All data will be saved under this directory',
'validation': [input_validation.validate_no_spaces,
input_validation.validate_absolute_path,
input_validation.validate_no_tilde],
},
'projects_group': {
'prompt': 'Pick a Globus Group to secure Globus Search records',
'default': 'public',
'help': 'The group determines who can view records in search. '
'People not in this group will not see records in Globus '
'Search. "public" allows anyone to see these records.',
'validation': [input_validation.validate_is_valid_globus_group],
},
}
pc = commands.get_pilot_client()
projects = pc.project.load_all().keys()
if projects:
click.secho(
f'Index is already setup with the following projects: {projects}. '
f'Please delete them before setting up your index.', fg='red')
return
order = ['projects_endpoint', 'projects_base_path', 'projects_group']
iv = input_validation.InputValidator(queries=PROJECT_QUERIES, order=order)
new_ctx = iv.ask_all()
pc.context.update_context(new_ctx)
pc.context.push()
click.secho('Your index has been setup successfully. Now see '
'`pilot project add`.', fg='green')
return
| 5,338,176
|
def oauth_url(auth_base, country, language):
"""Construct the URL for users to log in (in a browser) to start an
authenticated session.
"""
url = urljoin(auth_base, 'login/sign_in')
query = urlencode({
'country': country,
'language': language,
'svcCode': SVC_CODE,
'authSvr': 'oauth2',
'client_id': CLIENT_ID,
'division': 'ha',
'grant_type': 'password',
})
return '{}?{}'.format(url, query)
| 5,338,177
|
def teammsg(self: Client, message: str) -> str:
"""Sends a team message."""
return self.run('teammsg', message)
| 5,338,178
|
def config_section(section):
"""
This configures a specific section of the configuration file
"""
# get a handle on the configuration reader
config_reader = get_config_reader()
# Find the group that this section belongs to.
# When we're in a namespaced section, we'll be in the group that the namespace belongs to.
current_group = None
is_child = False
for group in configuration_groups:
if section == configuration_groups[group]["section"]:
current_group = group
break
else:
# Check to see if our section is a child of the group
if configuration_groups[group]["type"] == 'multi':
# We want to know if the namespace of that group matches.
namespace = configuration_groups[group]["namespace"]
if section.startswith(namespace+"."):
current_group = group
is_child = True
break
# If this search returned nobody, this section cannot be configured through the web interface.
if current_group == None:
abort(404)
config_options_raw = {}
if config_reader.has_section(section):
config_options_raw = dict(config_reader.items(section))
cgroupdict = configuration_groups[current_group]
sidebar = get_sidebar()
# we're going to filter the raw items handed to the template from the
# configuration files. This makes it harder for the generic template to
# mess something up unintentionally.
config_options_cooked = {}
for config_key in config_options_raw.keys():
if config_key == "type" or config_key == "ui_label":
continue
else:
config_options_cooked[config_key] = config_options_raw[config_key]
# now, config_items_cooked has our filtered keys.
# TODO: Make the above easier to extend
# walk through templates and render them in order of likelyhood
# there are a set of templates that need to be considered here:
# in order of specificity,
#
# if we are looking at a wireless interface (interface.wlan0),
# we want to have each of the following templates considered:
#
# * cfg_interface_wlan0.html
# * cfg_interface_wireless.html
# * cfg_interface.html
# * cfg_generic.html
#
# for some types, we need the sequence to be a little different
# such as in the case of services.
#
# * cfg_service_ssh.html
# * cfg_service.html
# * cfg_generic.html
#
# We do this by replacing any .'s with _ first, then using that.
# The namespace of the selected group is then checked:
#
# cfg_{namespace}_{type}
# cfg_{namespace}
# cfg_generic
#
#
# This means that the best way of handling this is to use the
# following hierarchy:
#
# cfg_{section}.html -- the most specific
# cfg_{namespace}_{type}.html -- Not as specific but works for interface.
# cfg_{namespace}.html -- A nice fallback for services.
# cfg_generic.html -- Fallback for all others.
#
# this means that service.ssh will default use the template
# cfg_service_ssh
# Then will use `cfg_service.html` (which should exist.) then
# as a worst case scenario will fall back on `cfg_generic.html`
#
templates = []
# check if we were requested to show key-value stores.
#
if 'notemplate' in request.args and request.args['notemplate'] == 'true':
templates = ['cfg_generic.html']
else:
templates.append('cfg_{0}.html'.format(section.replace('.','_')))
if 'type' in config_options_raw:
# This only works on some sections as a section is responsible for
# declaring its type.
# This means we need to check. This also is only really relevant to the
# multi section type of group, but this does make things simple.
templates.append(
'cfg_{0}_{1}.html'.format(
cgroupdict['namespace'],
config_options_raw['type']
)
)
if 'namespace' in cgroupdict:
templates.append('cfg_{0}.html'.format(cgroupdict['namespace']))
templates.append('cfg_generic.html')
try:
return render_template(list(map(lambda l:"config/"+l, templates)),
sidebar=sidebar,
current_group=current_group,
current_section=section,
values=config_options_cooked,
title="Admin: {0} ({1})".format(cgroupdict['short'],section)
)
except Exception as e:
if app.debug:
raise e
abort(500)
| 5,338,179
|
def h_html_footnote(e, doc):
"""Handle footnotes with bigfoot"""
if not isinstance(e, pf.Note) or doc.format != "html":
return None
htmlref = rf'<sup id="fnref:{doc.footnotecounter}"><a href="#fn:{doc.footnotecounter}" rel="footnote">{doc.footnotecounter}</a></sup>'
htmlcontent_before = rf'<li class="footnote" id="fn:{doc.footnotecounter}"><p>'
htmlcontent_after = rf'<a href="#fnref:{doc.footnotecounter}" title="return to article"> ↩</a><p></li>'
doc.footnotecounter += 1
conts = pf.Div(*e.content)
doc.footnotecontents += (
[pf.RawBlock(htmlcontent_before, format="html")]
+ [conts]
+ [pf.RawBlock(htmlcontent_after, format="html")]
)
return pf.RawInline(htmlref, format="html")
| 5,338,180
|
def plot_image(image):
"""
:param image: the image to be plotted in a 3-D matrix format
:return: None
"""
plt.imshow(image)
plt.show()
| 5,338,181
|
def segment(x,u1,u2):
""" given a figure x, create a new figure spanning the specified interval in the original figure
"""
if not (isgoodnum(u1) and isgoodnum(u2)) or close(u1,u2) or u1<0 or u2 < 0 or u1 > 1 or u2 > 1:
raise ValueError('bad parameter arguments passed to segment: '+str(u1)+', '+str(u2))
if ispoint(x):
return deepcopy(x)
elif isline(x):
return segmentline(x,u1,u2)
elif isarc(x):
return segmentarc(x,u1,u2)
elif ispoly(x):
return segmentpoly(x,u1,u2)
elif isgeomlist(x):
return segmentgeomlist(x,u1,u2)
else:
raise ValueError("inappropriate figure type for segment(): "+str(x))
| 5,338,182
|
def worker():
"""Print usage of GPU
"""
if SHOW_GPU_USAGE_TIME == 0:
return
while True:
process = psutil.Process(os.getpid())
print("\n Gen RAM Free:" + humanize.naturalsize(psutil.virtual_memory().available),
"I Proc size:" + humanize.naturalsize(process.memory_info().rss))
print("GPU RAM Free:{0:.0f}MB, Used:{1:.0f}MB, Util:{2:3.0f}%, Total:{3:.0f}MB, Load:{4:.0f}%".format(gpu.memoryFree, gpu.memoryUsed,
gpu.memoryUtil*100, gpu.memoryTotal, gpu.load))
time.sleep(SHOW_GPU_USAGE_TIME)
| 5,338,183
|
def compress_files(file_names):
"""
Given a list of files, compress all of them into a single file.
Keeps the existing directory structure in tact.
"""
archive_file_name = 'archive.zip'
print(f'{len(file_names)} files found. Compressing the files...')
cwd = os.getcwd()
with ZipFile(archive_file_name, 'w') as zip_file:
for path in file_names:
zip_file.write(path, path.replace(cwd, ''))
print(
f'All {len(file_names)} files were successfully compressed into {archive_file_name}')
return archive_file_name
| 5,338,184
|
def box_from_anchor_and_target(anchors, regressed_targets):
"""
Get bounding box from anchor and target through transformation provided in the paper.
:param anchors: Nx4 anchor boxes
:param regressed_targets: Nx4 regression targets
:return:
"""
boxes_v = anchors[:, 2] * regressed_targets[:, 0] / 10.0 + anchors[:, 0]
boxes_u = anchors[:, 3] * regressed_targets[:, 1] / 10.0 + anchors[:, 1]
boxes_h = anchors[:, 2] * \
tf.clip_by_value(tf.exp(regressed_targets[:, 2] / 5.0), 1e-4, 1e4)
boxes_w = anchors[:, 3] * \
tf.clip_by_value(tf.exp(regressed_targets[:, 3] / 5.0), 1e-4, 1e4)
return tf.stack([boxes_v,
boxes_u,
boxes_h,
boxes_w], axis=1)
| 5,338,185
|
def block_device_mapping_update(context, bdm_id, values, legacy=True):
"""Update an entry of block device mapping."""
return IMPL.block_device_mapping_update(context, bdm_id, values, legacy)
| 5,338,186
|
async def run():
"""
Bot setup
"""
async with aiohttp.ClientSession() as session:
token = env_file.get()
bot.add_cog(DevCommands(bot))
bot.add_cog(GeneralCommands(bot, session))
bot.add_cog(LinksCommands(bot))
if 'DBL_TOKEN' in token:
bot.add_cog(TopGG(bot, token['DBL_TOKEN']))
if 'WEBHOOK_URL' in token:
logs.webhook_url = token['WEBHOOK_URL']
if 'WEBHOOK_ERROR_SUPERVISOR_ID' in token:
logs.error_supervisor = token['WEBHOOK_ERROR_SUPERVISOR_ID']
await bot.start(token['BOT_TOKEN'])
| 5,338,187
|
def append_expression_of_remove_child(*, child: DisplayObject) -> None:
"""
Append expression of remove_child (removeElement).
Parameters
----------
child : DisplayObject
Child object to remove.
"""
import apysc as ap
with ap.DebugInfo(
callable_=append_expression_of_remove_child, locals_=locals(),
module_name=__name__):
from apysc._expression import expression_variables_util
from apysc._expression import var_names
parent_name: str = expression_variables_util.get_next_variable_name(
type_name=var_names.PARENT)
child_name: str = child.variable_name
expression: str = (
f'var {parent_name} = {child_name}.parent();'
f'\nif ({parent_name}) {{'
f'\n {parent_name}.removeElement({child_name});'
'\n}'
)
ap.append_js_expression(expression=expression)
| 5,338,188
|
def test_check_versions_negative_binary_not_found():
"""Test check_versions - negative case, binary not found."""
with mock.patch("shutil.which", return_value=None):
with pytest.raises(
AEAException,
match="'go' is required by the libp2p connection, but it is not installed, or it is not accessible from the system path.",
):
check_versions()
| 5,338,189
|
def consumer(address,callback,message_type):
"""
Creates a consumer binding to the given address pull messages.
The callback is invoked for every reply received.
Args:
- address: the address to bind the PULL socket to.
- callback: the callback to invoke for every message. Must accept 1 variables - the message
- message_type: the type of message to receive
"""
return Consumer(address,callback,message_type)
| 5,338,190
|
def _parse_continuous_records(prepared_page, section_dict):
"""Handle parsing a continuous list of records."""
# import pdb; pdb.set_trace()
columns = 6
start = prepared_page.index('Date and time')
for i, column in enumerate(prepared_page[start:start + columns]):
column_index = start + i
values = prepared_page[column_index + columns::columns]
if column in section_dict:
section_dict[column] = section_dict[column] + values
else:
section_dict[column] = values
return section_dict
| 5,338,191
|
def power_law_at_2500(x, amp, slope, z):
""" Power law model anchored at 2500 AA
This model is defined for a spectral dispersion axis in Angstroem.
:param x: Dispersion of the power law
:type x: np.ndarray
:param amp: Amplitude of the power law (at 2500 A)
:type amp: float
:param slope: Slope of the power law
:type slope: float
:param z: Redshift
:type z: float
:return: Power law model
:rtype: np.ndarray
"""
return amp * (x / (2500. * (z+1.))) ** slope
| 5,338,192
|
def map2alm(
maps,
lmax=None,
mmax=None,
iter=3,
pol=True,
use_weights=False,
datapath=None,
gal_cut=0,
use_pixel_weights=False,
):
"""Computes the alm of a Healpix map. The input maps must all be
in ring ordering.
Parameters
----------
maps : array-like, shape (Npix,) or (n, Npix)
The input map or a list of n input maps. Must be in ring ordering.
lmax : int, scalar, optional
Maximum l of the power spectrum. Default: 3*nside-1
mmax : int, scalar, optional
Maximum m of the alm. Default: lmax
iter : int, scalar, optional
Number of iteration (default: 3)
pol : bool, optional
If True, assumes input maps are TQU. Output will be TEB alm's.
(input must be 1 or 3 maps)
If False, apply spin 0 harmonic transform to each map.
(input can be any number of maps)
If there is only one input map, it has no effect. Default: True.
use_weights: bool, scalar, optional
If True, use the ring weighting. Default: False.
datapath : None or str, optional
If given, the directory where to find the weights data.
gal_cut : float [degrees]
pixels at latitude in [-gal_cut;+gal_cut] are not taken into account
use_pixel_weights: bool, optional
If True, use pixel by pixel weighting, healpy will automatically download the weights, if needed
Returns
-------
alms : array or tuple of array
alm or a tuple of 3 alm (almT, almE, almB) if polarized input.
Notes
-----
The pixels which have the special `UNSEEN` value are replaced by zeros
before spherical harmonic transform. They are converted back to `UNSEEN`
value, so that the input maps are not modified. Each map have its own,
independent mask.
"""
maps = ma_to_array(maps)
info = maptype(maps)
nside = pixelfunc.get_nside(maps)
check_max_nside(nside)
if use_pixel_weights:
if use_weights:
raise RuntimeError("Either use pixel or ring weights")
with data.conf.set_temp("dataurl", DATAURL), data.conf.set_temp(
"remote_timeout", 30
):
pixel_weights_filename = data.get_pkg_data_filename(
"full_weights/healpix_full_weights_nside_%04d.fits" % nside,
package="healpy",
)
else:
pixel_weights_filename = None
if pol or info in (0, 1):
alms = _sphtools.map2alm(
maps,
niter=iter,
datapath=datapath,
use_weights=use_weights,
lmax=lmax,
mmax=mmax,
gal_cut=gal_cut,
pixel_weights_filename=pixel_weights_filename,
)
else:
# info >= 2 and pol is False : spin 0 spht for each map
alms = [
_sphtools.map2alm(
mm,
niter=iter,
datapath=datapath,
use_weights=use_weights,
lmax=lmax,
mmax=mmax,
gal_cut=gal_cut,
pixel_weights_filename=pixel_weights_filename,
)
for mm in maps
]
return np.array(alms)
| 5,338,193
|
def credits():
"""
Credits Page
"""
return render_template("credits.html")
| 5,338,194
|
def register_view(request):
"""Register a new user."""
if request.method == "POST":
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
# Create a new User object but don't save it yet.
new_user = user_form.save(commit=False)
# Set the chosen password
new_user.set_password(
user_form.cleaned_data["password"]
)
# Save the User object
new_user.save()
# Create the user profile
Profile.objects.create(user=new_user)
context = {"user_form": user_form}
return render(request, "account/register_done.html", context)
else:
user_form = UserRegistrationForm()
return render(request, "account/register.html", {"user_form": user_form})
| 5,338,195
|
def show_available_models():
"""Displays available models
"""
print(list(__model_factory.keys()))
| 5,338,196
|
def deleteone(indextodelete, openfile=open):
"""
Delete a task from the todofile
"""
index_existant = False
with openfile(todofilefromconfig(), "r") as todofile:
lines = todofile.readlines()
with openfile(todofilefromconfig(), "w") as todofile:
for line in lines:
# if the current row doesn't contain the index it is kept
if not re.findall("^" + indextodelete + " ", line):
todofile.write(line)
# else it is deleted by not being copied
else:
printinfo(
"task deleted from the todolist - "
+ line.rstrip("\n")
)
index_existant = True
if not index_existant:
printwarning(
"no task is deleted from the todolist, not existing index - "
+ indextodelete
)
| 5,338,197
|
def handle_pdf_build(pdf_job_dict: Dict[str, Any], tx_payload, redis_connection) -> str:
"""
A job dict is now setup and remembered in REDIS
so that we can match it when we get a future callback.
The project.json (in the folder above the CDN one) is also updated, e.g., with new commits.
The job is now passed to the tX system by means of a
POST to the tX webhook (which should hopefully respond with a callback).
"""
AppSettings.logger.debug("Webhook.handle_build setting up pdf job dict…")
our_job_id = get_unique_job_id()
pdf_job_dict['job_id'] = our_job_id
pdf_job_dict['output_format'] = 'pdf'
pdf_job_dict['cdn_file'] = f'tx/job/{our_job_id}.zip'
pdf_job_dict['output'] = f"https://{AppSettings.cdn_bucket_name}/{pdf_job_dict['cdn_file']}"
# NOTE: following line removed as stats recording used too much disk space
# pdf_job_dict['user_projects_invoked_string'] = user_projects_invoked_string # Need to save this for reuse
pdf_job_dict['links'] = {
'href': f'{AppSettings.api_url}/tx/job/{our_job_id}',
'rel': 'self',
'method': 'GET'
}
AppSettings.logger.debug(f"pdf_job_dict: {pdf_job_dict}")
# Save the job info in Redis for the callback to use
remember_job(pdf_job_dict, redis_connection)
# Pass the work request onto the tX system
AppSettings.logger.info(f"PDF Job: Post request to tX system @ {tx_post_url} …")
tx_payload['job_id'] = our_job_id
tx_payload['output_format'] = 'pdf'
AppSettings.logger.debug(f"Payload for pdf tX: {tx_payload}")
response: Optional[requests.Response]
try:
response = requests.post(tx_post_url, json=tx_payload)
except requests.exceptions.ConnectionError as e:
AppSettings.logger.critical(f"Callback connection error: {e}")
response = None
if response:
#AppSettings.logger.info(f"response.status_code = {response.status_code}, response.reason = {response.reason}")
#AppSettings.logger.debug(f"response.headers = {response.headers}")
try:
AppSettings.logger.info(f"response.json = {response.json()}")
except json.decoder.JSONDecodeError:
AppSettings.logger.info("No valid response JSON found")
AppSettings.logger.debug(f"response.text = {response.text}")
if response.status_code != 200:
AppSettings.logger.critical(f"Failed to submit job to tX:"
f" {response.status_code}={response.reason}")
else: # no response
error_msg = "Submission of job to tX system got no response"
AppSettings.logger.critical(error_msg)
# So we go into the FAILED queue and monitoring system
raise Exception(error_msg)
return our_job_id
| 5,338,198
|
def test_missing_file_raises_access_denied(basic_auth_db: BasicAuthDb, basic_auth_db_path: Path):
"""
In the default setup the sys-admin may not have created a blank db file.
Correct behaviour here is to behave as if the file exists and is empty.
We want an AuthenticationFailedException NOT any OSError (FileNotFound).
"""
basic_auth_db_path.unlink(missing_ok=False)
with pytest.raises(AuthenticationFailedException):
basic_auth_db.authenticate(TEST_USER_NAME, TEST_PASSWORD)
| 5,338,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.