content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def createMode():
"""Required to initialize the module. RV will call this function to create your mode."""
return AudioForSequence() | aa3095a7b0754da8cb4739c3e3198f8304a21661 | 27,500 |
def pre_process(cpp_line):
"""预处理"""
# 预处理
cpp_line = cpp_line.replace('\t', ' ')
cpp_line = cpp_line.replace('\n', '')
cpp_line = cpp_line.replace(';', '')
return cpp_line | 4c0db8ae834286106472aba425c45a8eeded3183 | 27,501 |
def actions_db(action_id):
"""
replaces the actual db call
"""
if action_id == 'not found':
return None
elif action_id == 'state error':
return {
'id': 'state error',
'name': 'dag_it',
'parameters': None,
'dag_id': 'state error',
'dag_execution_date': '2017-09-06 14:10:08.528402',
'user': 'robot1',
'timestamp': '2017-09-06 14:10:08.528402',
'context_marker': '8-4-4-4-12a'
}
else:
return {
'id': '59bb330a-9e64-49be-a586-d253bb67d443',
'name': 'dag_it',
'parameters': None,
'dag_id': 'did2',
'dag_execution_date': '2017-09-06 14:10:08.528402',
'user': 'robot1',
'timestamp': '2017-09-06 14:10:08.528402',
'context_marker': '8-4-4-4-12a'
} | cd9e8e87ce5535648b4e7a5e58d0333b80e9ae1c | 27,502 |
import os
def _safe_clear_dirflow(path):
"""
Safely check that the path contains ONLY folders of png files,
if any other structure, will simply ERROR out.
Parameters
----------
path: str
string to the path to the folders of data images to be used
"""
print("Clearing {}...".format(path))
assert os.path.isdir(path), "Didn't pass a folder to be cleaned"
list_dir = [f for f in os.listdir(path) if not f.startswith('.')]
for folder in list_dir:
cat_folder = os.path.join(path, folder)
assert os.path.isdir(cat_folder), \
"Dir contains Non-Folder File!"
cat_folder_item = [f for f in os.listdir(cat_folder)
if not f.startswith('.')]
for file in cat_folder_item:
# For every file, confirm is PNG or error.
# DONT DELETE YET, IN CASE OF ERRORS!
assert ".png" in file, "Folder has Non PNG Contents!"
# If we got though that with no error, then now we can delete!
# for folder in os.listdir(the_path):
# cat_folder = os.path.join(the_path, folder)
# for file in os.listdir(cat_folder):
# os.remove(os.path.join(cat_folder, file))
# os.rmdir(cat_folder)
# os.rmdir(the_path)
return True | af92454143fee21c497e1dca2c268c9cb915dbd2 | 27,503 |
def parse(argv):
"""
Parse command line options. Returns the name of the command
and any additional data returned by the command parser.
May raise CommandParsingError if there are problems.
"""
# Find the command
if len(argv) < 1:
full = None
cmd = command_dict[None]
else:
try:
full = complete(argv[0])
except KeyError, e:
raise CommandParsingError(str(e))
if full is not None and full in command_dict:
cmd = command_dict[full]
argv = argv[1:]
else:
cmd = command_dict[None]
try:
return None, cmd.parse(*getopt.gnu_getopt(argv, cmd.short_opts, cmd.long_opts))
except getopt.GetoptError, e:
raise CommandParsingError("unrecognised command '%s'" % argv[0])
# Process arguments
try:
return full, cmd.parse(*getopt.gnu_getopt(argv, cmd.short_opts, cmd.long_opts))
except getopt.GetoptError, e:
raise CommandParsingError(str(e)) | 09867eeb1a17f7609c5f99fc9c5b2d80442a7b44 | 27,504 |
def _get_answer_spans(text, ref_answer):
"""
Based on Rouge-L Score to get the best answer spans.
:param text: list of tokens in text
:param ref_answer: the human's answer, also tokenized
:returns max_spans: list of two numbers, marks the start and end position with the max score
"""
max_score = -1.
max_spans = [0, len(text)-1]
for start, _token in enumerate(text):
if _token not in ref_answer: continue
for end in range(len(text)-1, start-1, -1):
scorer = recall_score # rouge_score, rouge score is too slow
_score = scorer(text[start: end+1], ref_answer)
if _score > max_score:
max_score = _score
max_spans = [start, end]
if max_score > 0.9:
return max_spans
# Warning: the end pointed character is inclueded in fake answer
return max_spans | aa99c85dec40bcb01457b31b358aac9b50238284 | 27,505 |
from functools import cmp_to_key
def order(list, cmp=None, key=None, reverse=False):
""" Returns a list of indices in the order as when the given list is sorted.
For example: ["c","a","b"] => [1, 2, 0]
This means that in the sorted list, "a" (index 1) comes first and "c" (index 0) last.
"""
if cmp and key:
f = lambda i, j: cmp(key(list[i]), key(list[j]))
elif cmp:
f = lambda i, j: cmp(list[i], list[j])
elif key:
f = lambda i, j: int(key(list[i]) >= key(list[j])) * 2 - 1
else:
f = lambda i, j: int(list[i] >= list[j]) * 2 - 1
return sorted(range(len(list)), key=cmp_to_key(f), reverse=reverse) | 7bcc6f44f02be4fb329b211b5caadf057d6d9b9a | 27,506 |
def get_maximum_batch_id(path: Pathy) -> int:
"""
Get the last batch ID. Works with GCS, AWS, and local.
Args:
path: The path folder to look in.
Begin with 'gs://' for GCS. Begin with 's3://' for AWS S3.
Supports wildcards *, **, ?, and [..].
Returns: The maximum batch id of data in `path`. If `path` exists but contains no files
then returns -1.
Raises FileNotFoundError if `path` does not exist.
"""
_LOG.debug(f"Looking for maximum batch id in {path}")
filesystem = get_filesystem(path)
if not filesystem.exists(path.parent):
msg = f"{path.parent} does not exist"
_LOG.warning(msg)
raise FileNotFoundError(msg)
filenames = filesystem.glob(path)
# if there is no files, return 0
if len(filenames) == 0:
_LOG.debug(f"Did not find any files in {path}")
return -1
# Now that filenames have leading zeros (like 000001.nc), we can use lexographical sorting
# to find the last filename, instead of having to convert all filenames to int.
filenames = np.sort(filenames)
last_filename = filenames[-1]
last_filename = Pathy(last_filename)
last_filename_stem = last_filename.stem
maximum_batch_id = int(last_filename_stem)
_LOG.debug(f"Found maximum of batch it of {maximum_batch_id} in {path}")
return maximum_batch_id | f299189417ef19e59ad155e223f306a51e27d756 | 27,507 |
import requests
def curl_post(method, txParameters=None, RPCaddress=None, ifPrint=False):
"""
call Ethereum RPC functions
"""
payload = {"jsonrpc": "2.0",
"method": method,
"id": 1}
if txParameters:
payload["params"] = [txParameters]
headers = {'Content-type': 'application/json'}
response = requests.post(RPCaddress, json=payload, headers=headers)
response_json = response.json()
if ifPrint:
print('raw json response: {}'.format(response_json))
if "error" in response_json:
raise MethodNotExistentError()
else:
return response_json['result'] | 1b403e91cf542127038b7a79b54a28b69105be39 | 27,508 |
def intRoexpwt2(g1, g2, p, w, t):
""" Integral of the roexpwt filter Oxenham & Shera (2003) equation (3)
Parameters
----------
g1, g2 - Limits of the integral in normalized terms (eg.: g1=0.1,g2=0.35)
p - SLope parameter
t - Factor by which second slope is shallower than first
w - relative weigths slopes (determines where 2nd starts to dominate)
Returns
-------
I - Integral of the function
"""
(I, err) = quad(roexpwt, g1, g2, args=(p, w, t))
return I | fc6e312a5f5134e43d63569b35fc1e6e7af74084 | 27,509 |
def create_connection(query):
"""
クエリを発行するためのデコレータ
:param query: クエリストリング
:return:
"""
def wrapper(*args, **kargs):
config = Config()
connection = pymysql.connect(host= config.nijo_db_host,
user= config.nijo_db_user,
password= config.nijo_db_pass,
db= config.nijo_db_name,
charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
with connection.cursor() as cursor:
cursor.execute(query(*args, **kargs))
rows = cursor.fetchall()
connection.commit()
connection.close()
return rows
return wrapper | 8e6d0733eef3210b2ee074430cf79dd3bdbf8dfc | 27,510 |
def numentries(arrays):
"""
Counts the number of entries in a typical arrays from a ROOT file,
by looking at the length of the first key
"""
return arrays[list(arrays.keys())[0]].shape[0] | e3c9f2e055f068f12039741ff9bb1091716263d5 | 27,511 |
from typing import Callable
import functools
def build_jax_solve_eval_fwd(fenics_templates: FenicsVariable) -> Callable:
"""Return `f(*args) = build_jax_solve_eval(*args)(ofunc(*args))`. This is forward mode AD.
Given the FEniCS-side function ofunc(*args), return the function
`f(*args) = build_jax_solve_eval(*args)(ofunc(*args))` with
the JVP of `f`, where:
`*args` are all arguments to `ofunc`.
Args:
ofunc: The FEniCS-side function to be wrapped.
Returns:
`f(args) = build_jax_solve_eval(*args)(ofunc(*args))`
"""
def decorator(fenics_function: Callable) -> Callable:
@functools.wraps(fenics_function)
def jax_solve_eval(*args):
return jax_solve_eval_p.bind(*args)
jax_solve_eval_p = Primitive("jax_solve_eval")
jax_solve_eval_p.def_impl(
lambda *args: solve_eval(fenics_function, fenics_templates, *args)[0]
)
jax_solve_eval_p.def_abstract_eval(
lambda *args: jax.abstract_arrays.make_shaped_array(
solve_eval(fenics_function, fenics_templates, *args)[0]
)
)
def jax_solve_eval_batch(vector_arg_values, batch_axes):
assert len(set(batch_axes)) == 1 # assert that all batch axes are same
assert (
batch_axes[0] == 0
) # assert that batch axis is zero, need to rewrite for a general case?
res = list(map(jax_solve_eval, *vector_arg_values))
res = np.asarray(res)
return res, batch_axes[0]
jax.interpreters.batching.primitive_batchers[jax_solve_eval_p] = jax_solve_eval_batch
# @trace("jvp_jax_solve_eval")
def jvp_jax_solve_eval(ps, ts):
return jvp_jax_solve_eval_p.bind(ps, ts)
jvp_jax_solve_eval_p = Primitive("jvp_jax_solve_eval")
jvp_jax_solve_eval_p.multiple_results = True
jvp_jax_solve_eval_p.def_impl(
lambda ps, ts: jvp_solve_eval(fenics_function, fenics_templates, ps, ts)
)
jax.interpreters.ad.primitive_jvps[jax_solve_eval_p] = jvp_jax_solve_eval
# TODO: JAX Tracer goes inside fenics wrappers and zero array is returned
# because fenics numpy conversion works only for concrete arrays
vjp_jax_solve_eval_p = Primitive("vjp_jax_solve_eval")
vjp_jax_solve_eval_p.def_impl(
lambda ct, *args: vjp_solve_eval(fenics_function, fenics_templates, *args)[
1
](ct)
)
jax.interpreters.ad.primitive_transposes[
jax_solve_eval_p
] = vjp_jax_solve_eval_p
return jax_solve_eval
return decorator | 4e9d7bff6e8580dd9970f25026ba813e6b4cc37f | 27,512 |
import torch
def gen_gcam_target(imgs, model, target_layer='layer4', target_index=None, classes=get_imagenet_classes(), device='cuda', prep=True):
"""
Visualize model responses given multiple images
"""
# Get model and forward pass
gcam, probs, ids, images = gen_model_forward(imgs, model, device=device, prep=prep, type='gcam')
ids_ = torch.LongTensor([[x] for x in target_index]).to(device)
gcam.backward(ids=ids_)
regions = gcam.generate(target_layer=target_layer)
masks=[]
for j in range(len(images)):
mask = save_gradcam(
gcam=regions[j, 0]
)
masks += [mask]
if len(masks) == 1:
return masks[0]
return masks | f685b17b030643fbd29eed0b69be140422b3e730 | 27,513 |
def get_insert_query(table_name):
"""Build a SQL query to insert a RDF triple into a PostgreSQL dataset"""
return f"INSERT INTO {table_name} (subject,predicate,object) VALUES (%s,%s,%s) ON CONFLICT (subject,predicate,object) DO NOTHING" | 423ccbf1d69e85316abdb81207d6e0f04729c2b8 | 27,514 |
import torch
def q_mult(q1, q2):
"""Quaternion multiplication."""
w = q1[0] * q2[0] - q1[1] * q2[1] - q1[2] * q2[2] - q1[3] * q2[3]
x = q1[1] * q2[0] + q1[0] * q2[1] + q1[2] * q2[3] - q1[3] * q2[2]
y = q1[0] * q2[2] - q1[1] * q2[3] + q1[2] * q2[0] + q1[3] * q2[1]
z = q1[0] * q2[3] + q1[1] * q2[2] - q1[2] * q2[1] + q1[3] * q2[0]
return torch.stack((w, x, y, z)) | cafafa392d9e41e7680c703415ed6df87207f0d0 | 27,515 |
def is_object_group(group):
"""True if the group's object name is not one of the static names"""
return not group.name.value in (IMAGE, EXPERIMENT, OBJECT_RELATIONSHIPS) | d111781f39feef74698b625186f3937a85fa8713 | 27,516 |
import time
def plugin_poll(handle):
""" Extracts data from the sensor and returns it in a JSON document as a Python dict.
Available for poll mode only.
Args:
handle: handle returned by the plugin initialisation call
Returns:
returns a sensor reading in a JSON document, as a Python dict, if it is available
None - If no reading is available
Raises:
Exception
"""
global _handle, _restart_config
bluetooth_adr = _handle['bluetoothAddress']['value']
tag = _handle['tag']
asset_prefix = '{}'.format(_handle['assetNamePrefix']['value']).replace('%M', bluetooth_adr)
try:
if not tag.is_connected:
raise RuntimeError("SensorTagCC2650 {} not connected".format(bluetooth_adr))
time_stamp = utils.local_timestamp()
data = list()
# In this method, cannot use "handle" as it might have changed due to restart. Hence use "_handle".
if _handle['temperatureSensor']['value'] == 'true':
count = 0
while count < SensorTagCC2650.reading_iterations:
object_temp_celsius, ambient_temp_celsius = tag.hex_temp_to_celsius(
tag.char_read_hnd(_handle['characteristics']['temperature']['data']['handle'], "temperature"))
time.sleep(0.5) # wait for a while
count = count + 1
data.append({
'asset': '{}{}'.format(asset_prefix, _handle['temperatureSensorName']['value']),
'timestamp': time_stamp,
'readings': {"object": object_temp_celsius, 'ambient': ambient_temp_celsius}
})
if _handle['luminanceSensor']['value'] == 'true':
lux_luminance = tag.hex_lux_to_lux(
tag.char_read_hnd(_handle['characteristics']['luminance']['data']['handle'], "luminance"))
data.append({
'asset': '{}{}'.format(asset_prefix, _handle['luminanceSensorName']['value']),
'timestamp': time_stamp,
'readings': {"lux": lux_luminance}
})
if _handle['humiditySensor']['value'] == 'true':
rel_humidity, rel_temperature = tag.hex_humidity_to_rel_humidity(
tag.char_read_hnd(_handle['characteristics']['humidity']['data']['handle'], "humidity"))
data.append({
'asset': '{}{}'.format(asset_prefix, _handle['humiditySensorName']['value']),
'timestamp': time_stamp,
'readings': {"humidity": rel_humidity, "temperature": rel_temperature}
})
if _handle['pressureSensor']['value'] == 'true':
bar_pressure = tag.hex_pressure_to_pressure(
tag.char_read_hnd(_handle['characteristics']['pressure']['data']['handle'], "pressure"))
data.append({
'asset': '{}{}'.format(asset_prefix, _handle['pressureSensorName']['value']),
'timestamp': time_stamp,
'readings': {"pressure": bar_pressure}
})
if _handle['movementSensor']['value'] == 'true':
gyro_x, gyro_y, gyro_z, acc_x, acc_y, acc_z, mag_x, mag_y, mag_z, acc_range = tag.hex_movement_to_movement(
tag.char_read_hnd(_handle['characteristics']['movement']['data']['handle'], "movement"))
data.append({
'asset': '{}{}'.format(asset_prefix, _handle['gyroscopeSensorName']['value']),
'timestamp': time_stamp,
'readings': {"x": gyro_x, "y": gyro_y, "z": gyro_z}
})
data.append({
'asset': '{}{}'.format(asset_prefix, _handle['accelerometerSensorName']['value']),
'timestamp': time_stamp,
'readings': {"x": acc_x, "y": acc_y, "z": acc_z}
})
data.append({
'asset': '{}{}'.format(asset_prefix, _handle['magnetometerSensorName']['value']),
'timestamp': time_stamp,
'readings': {"x": mag_x, "y": mag_y, "z": mag_z}
})
if _handle['batteryData']['value'] == 'true':
battery_level = tag.get_battery_level(
tag.char_read_hnd(_handle['characteristics']['battery']['data']['handle'], "battery"))
data.append({
'asset': '{}{}'.format(asset_prefix, _handle['batterySensorName']['value']),
'timestamp': time_stamp,
'readings': {"percentage": battery_level}
})
except (Exception, RuntimeError, pexpect.exceptions.TIMEOUT) as ex:
_plugin_restart(bluetooth_adr)
raise ex
return data | 05f3974c919fdd9c5ee4b853b37bf07bbd45738c | 27,517 |
def autodetect_camera() -> str:
"""Auto-detect camera using gphoto2.
Returns:
string returned from gphoto2.
"""
_logger.debug('Auto detecting camera.')
result = execute('gphoto2 --auto-detect')
if result.returncode:
raise ValueError(result.stderr)
return result.stdout.decode() | 8320782c40cb3cbec37925479d52d781904a1a0c | 27,518 |
def info(token, customerid=None):
""" Returns the info for your account
:type token: string
:param token: Your NodePing API token
:type customerid: string
:param customerid: Optional subaccount ID for your account
:return: Return contents from the NodePing query
:rtype: dict
"""
url = "{0}accounts".format(API_URL)
valid_token = _query_nodeping_api.get(
_utils.create_url(token, url, customerid))
return valid_token | d106bf166e846bc2b3627af4e43e74074a519cd1 | 27,519 |
import math
def overlay(*fields: SampledField or Tensor) -> Tensor:
"""
Specify that multiple fields should be drawn on top of one another in the same figure.
The fields will be plotted in the order they are given, i.e. the last field on top.
```python
vis.plot(vis.overlay(heatmap, points, velocity))
```
Args:
*fields: `SampledField` or `Tensor` instances
Returns:
Plottable object
"""
return math.layout(fields, math.channel('overlay')) | 629eb69c15d814384b70bac2ac78cebdd93ef20d | 27,520 |
def load_identifier(value):
"""load identifier"""
if value == "y":
return True
return False | 0292439c8eeb3788a6b517bb7b340df2c0435b4d | 27,521 |
def IKinSpaceConstrained(screw_list, ee_home, ee_goal, theta_list,
position_tolerance, rotation_tolerance, joint_mins, joint_maxs, max_iterations):
"""
Calculates IK to a certain goal within joint rotation constraints
Args:
screw_list: screw list
ee_home: home end effector position
ee_goal: Goal Position
theta_list: Initial thetas
position_tolerance: Positional tolerance
rotation_tolerance: Rotational tolerance
joint_mins: joint minimum rotations
joint_maxs: joint maximum rotations
max_iterations: Maximum Iterations before failure
Returns:
ndarray: joint configuration
boolean: success
"""
ee_current = FKinSpace(ee_home, screw_list, theta_list)
error_vec = np.dot(Adjoint(ee_current),
se3ToVec(MatrixLog6(np.dot(TransInv(ee_current), ee_goal))))
#print(mhp.MatrixLog6(np.dot(mhp.TransInv(ee_current), ee_goal)), "Test")
error_bool = (np.linalg.norm(error_vec[0:3]) > position_tolerance or
np.linalg.norm(error_vec[3:6]) > rotation_tolerance)
#if np.isnan(error_vec).any():
# error_bool = True
i = 0
while error_bool and i < max_iterations:
jacobian_space = JacobianSpace(screw_list, theta_list)
inverse_jacobian_space = np.linalg.pinv(jacobian_space)
new_theta = np.dot(inverse_jacobian_space, error_vec)
theta_list = theta_list + new_theta
for j in range(len(theta_list)):
if theta_list[j] < joint_mins[j]:
theta_list[j] = joint_mins[j]
if theta_list[j] > joint_maxs[j]:
theta_list[j] = joint_maxs[j];
i = i + 1
ee_current = FKinSpace(ee_home, screw_list, theta_list)
error_vec = np.dot(Adjoint(ee_current),
se3ToVec(MatrixLog6(np.dot(TransInv(ee_current), ee_goal))))
error_bool = (np.linalg.norm(error_vec[0:3]) > position_tolerance or
np.linalg.norm(error_vec[3:6]) > rotation_tolerance)
#if np.isnan(error_vec).any():
# error_bool = True
success = not error_bool
return theta_list, success | 7c59a8eaa7cff24f6c5359b100e006bbc58e8c00 | 27,522 |
def add_hidden_range(*args):
"""
add_hidden_range(ea1, ea2, description, header, footer, color) -> bool
Mark a range of addresses as hidden. The range will be created in the
invisible state with the default color
@param ea1: linear address of start of the address range (C++: ea_t)
@param ea2: linear address of end of the address range (C++: ea_t)
@param description: range parameters (C++: const char *)
@param header: range parameters (C++: const char *)
@param footer: range parameters (C++: const char *)
@param color (C++: bgcolor_t)
@return: success
"""
return _ida_bytes.add_hidden_range(*args) | 8f844c14b348bdcf58abe799cb8ed0c91d00aeef | 27,523 |
import json
def _fetch_certs(request, certs_url):
"""Fetches certificates.
Google-style cerificate endpoints return JSON in the format of
``{'key id': 'x509 certificate'}``.
Args:
request (google.auth.transport.Request): The object used to make
HTTP requests.
certs_url (str): The certificate endpoint URL.
Returns:
Mapping[str, str]: A mapping of public key ID to x.509 certificate
data.
"""
response = request(certs_url, method='GET')
if response.status != http_client.OK:
raise exceptions.TransportError(
'Could not fetch certificates at {}'.format(certs_url))
return json.loads(response.data.decode('utf-8')) | 3141c78d604bbed10236b5ed11cfc2c06a756ef2 | 27,524 |
def get_mock_personalization_dict():
"""Get a dict of personalization mock."""
mock_pers = dict()
mock_pers['to_list'] = [To("test1@example.com",
"Example User"),
To("test2@example.com",
"Example User")]
mock_pers['cc_list'] = [To("test3@example.com",
"Example User"),
To("test4@example.com",
"Example User")]
mock_pers['bcc_list'] = [To("test5@example.com"),
To("test6@example.com")]
mock_pers['subject'] = ("Hello World from the Personalized "
"SendGrid Python Library")
mock_pers['headers'] = [Header("X-Test", "test"),
Header("X-Mock", "true")]
mock_pers['substitutions'] = [Substitution("%name%", "Example User"),
Substitution("%city%", "Denver")]
mock_pers['custom_args'] = [CustomArg("user_id", "343"),
CustomArg("type", "marketing")]
mock_pers['send_at'] = 1443636843
return mock_pers | 4be81a67715bc967c8d624d784008ed3cb6775f8 | 27,525 |
def load_python_bindings(python_input):
"""
Custom key bindings.
"""
bindings = KeyBindings()
sidebar_visible = Condition(lambda: python_input.show_sidebar)
handle = bindings.add
@handle("c-l")
def _(event):
"""
Clear whole screen and render again -- also when the sidebar is visible.
"""
event.app.renderer.clear()
@handle("c-z")
def _(event):
"""
Suspend.
"""
if python_input.enable_system_bindings:
event.app.suspend_to_background()
# Delete word before cursor, but use all Python symbols as separators
# (WORD=False).
handle("c-w")(get_by_name("backward-kill-word"))
@handle("f2")
def _(event):
"""
Show/hide sidebar.
"""
python_input.show_sidebar = not python_input.show_sidebar
if python_input.show_sidebar:
event.app.layout.focus(python_input.ptpython_layout.sidebar)
else:
event.app.layout.focus_last()
@handle("f3")
def _(event):
"""
Select from the history.
"""
python_input.enter_history()
@handle("f4")
def _(event):
"""
Toggle between Vi and Emacs mode.
"""
python_input.vi_mode = not python_input.vi_mode
@handle("f6")
def _(event):
"""
Enable/Disable paste mode.
"""
python_input.paste_mode = not python_input.paste_mode
@handle(
"tab", filter=~sidebar_visible & ~has_selection & tab_should_insert_whitespace
)
def _(event):
"""
When tab should insert whitespace, do that instead of completion.
"""
event.app.current_buffer.insert_text(" ")
@Condition
def is_multiline():
return document_is_multiline_python(python_input.default_buffer.document)
@handle(
"enter",
filter=~sidebar_visible
& ~has_selection
& (vi_insert_mode | emacs_insert_mode)
& has_focus(DEFAULT_BUFFER)
& ~is_multiline,
)
@handle(Keys.Escape, Keys.Enter, filter=~sidebar_visible & emacs_mode)
def _(event):
"""
Accept input (for single line input).
"""
b = event.current_buffer
if b.validate():
# When the cursor is at the end, and we have an empty line:
# drop the empty lines, but return the value.
b.document = Document(
text=b.text.rstrip(), cursor_position=len(b.text.rstrip())
)
b.validate_and_handle()
@handle(
"enter",
filter=~sidebar_visible
& ~has_selection
& (vi_insert_mode | emacs_insert_mode)
& has_focus(DEFAULT_BUFFER)
& is_multiline,
)
def _(event):
"""
Behaviour of the Enter key.
Auto indent after newline/Enter.
(When not in Vi navigaton mode, and when multiline is enabled.)
"""
b = event.current_buffer
empty_lines_required = python_input.accept_input_on_enter or 10000
def at_the_end(b):
"""we consider the cursor at the end when there is no text after
the cursor, or only whitespace."""
text = b.document.text_after_cursor
return text == "" or (text.isspace() and not "\n" in text)
if python_input.paste_mode:
# In paste mode, always insert text.
b.insert_text("\n")
elif at_the_end(b) and b.document.text.replace(" ", "").endswith(
"\n" * (empty_lines_required - 1)
):
# When the cursor is at the end, and we have an empty line:
# drop the empty lines, but return the value.
if b.validate():
b.document = Document(
text=b.text.rstrip(), cursor_position=len(b.text.rstrip())
)
b.validate_and_handle()
else:
auto_newline(b)
@handle(
"c-d",
filter=~sidebar_visible
& has_focus(python_input.default_buffer)
& Condition(
lambda:
# The current buffer is empty.
not get_app().current_buffer.text
),
)
def _(event):
"""
Override Control-D exit, to ask for confirmation.
"""
if python_input.confirm_exit:
# Show exit confirmation and focus it (focusing is important for
# making sure the default buffer key bindings are not active).
python_input.show_exit_confirmation = True
python_input.app.layout.focus(
python_input.ptpython_layout.exit_confirmation
)
else:
event.app.exit(exception=EOFError)
@handle("c-c", filter=has_focus(python_input.default_buffer))
def _(event):
" Abort when Control-C has been pressed. "
event.app.exit(exception=KeyboardInterrupt, style="class:aborting")
return bindings | 2725352272d001da7dc74e7c29819b8ddae5521e | 27,526 |
def batch_retrieve_pipeline_s3(pipeline_upload):
""" Data is returned in the form (chunk_object, file_data). """
study = Study.objects.get(id = pipeline_upload.study_id)
return pipeline_upload, s3_retrieve(pipeline_upload.s3_path,
study.object_id,
raw_path=True) | 9f48186a116fab7826dd083ab80e4f5383e813ba | 27,527 |
import re
import os
def indent(instr, nspaces=4, ntabs=0, flatten=False):
"""Indent a string a given number of spaces or tabstops.
indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.
Parameters
----------
instr : basestring
The string to be indented.
nspaces : int (default: 4)
The number of spaces to be indented.
ntabs : int (default: 0)
The number of tabs to be indented.
flatten : bool (default: False)
Whether to scrub existing indentation. If True, all lines will be
aligned to the same indentation. If False, existing indentation will
be strictly increased.
Returns
-------
outstr : string indented by ntabs and nspaces.
"""
if instr is None:
return
ind = '\t' * ntabs + ' ' * nspaces
if flatten:
pat = re.compile(r'^\s*', re.MULTILINE)
else:
pat = re.compile(r'^', re.MULTILINE)
outstr = re.sub(pat, ind, instr)
if outstr.endswith(os.linesep + ind):
return outstr[:-len(ind)]
else:
return outstr | 3872630a0fcd697ebd0c7a59f7a7cb05b004b465 | 27,528 |
def gat(gw,
feature,
hidden_size,
activation,
name,
num_heads=8,
feat_drop=0.6,
attn_drop=0.6,
is_test=False):
"""Implementation of graph attention networks (GAT)
This is an implementation of the paper GRAPH ATTENTION NETWORKS
(https://arxiv.org/abs/1710.10903).
Args:
gw: Graph wrapper object (:code:`StaticGraphWrapper` or :code:`GraphWrapper`)
feature: A tensor with shape (num_nodes, feature_size).
hidden_size: The hidden size for gat.
activation: The activation for the output.
name: Gat layer names.
num_heads: The head number in gat.
feat_drop: Dropout rate for feature.
attn_drop: Dropout rate for attention.
is_test: Whether in test phrase.
Return:
A tensor with shape (num_nodes, hidden_size * num_heads)
"""
def send_attention(src_feat, dst_feat, edge_feat):
output = src_feat["left_a"] + dst_feat["right_a"]
output = fluid.layers.leaky_relu(
output, alpha=0.2) # (num_edges, num_heads)
return {"alpha": output, "h": src_feat["h"]}
def reduce_attention(msg):
alpha = msg["alpha"] # lod-tensor (batch_size, seq_len, num_heads)
h = msg["h"]
alpha = paddle_helper.sequence_softmax(alpha)
old_h = h
h = fluid.layers.reshape(h, [-1, num_heads, hidden_size])
alpha = fluid.layers.reshape(alpha, [-1, num_heads, 1])
if attn_drop > 1e-15:
alpha = fluid.layers.dropout(
alpha,
dropout_prob=attn_drop,
is_test=is_test,
dropout_implementation="upscale_in_train")
h = h * alpha
h = fluid.layers.reshape(h, [-1, num_heads * hidden_size])
h = fluid.layers.lod_reset(h, old_h)
return fluid.layers.sequence_pool(h, "sum")
if feat_drop > 1e-15:
feature = fluid.layers.dropout(
feature,
dropout_prob=feat_drop,
is_test=is_test,
dropout_implementation='upscale_in_train')
ft = fluid.layers.fc(feature,
hidden_size * num_heads,
bias_attr=False,
param_attr=fluid.ParamAttr(name=name + '_weight'))
left_a = fluid.layers.create_parameter(
shape=[num_heads, hidden_size],
dtype='float32',
name=name + '_gat_l_A')
right_a = fluid.layers.create_parameter(
shape=[num_heads, hidden_size],
dtype='float32',
name=name + '_gat_r_A')
reshape_ft = fluid.layers.reshape(ft, [-1, num_heads, hidden_size])
left_a_value = fluid.layers.reduce_sum(reshape_ft * left_a, -1)
right_a_value = fluid.layers.reduce_sum(reshape_ft * right_a, -1)
msg = gw.send(
send_attention,
nfeat_list=[("h", ft), ("left_a", left_a_value),
("right_a", right_a_value)])
output = gw.recv(msg, reduce_attention)
bias = fluid.layers.create_parameter(
shape=[hidden_size * num_heads],
dtype='float32',
is_bias=True,
name=name + '_bias')
bias.stop_gradient = True
output = fluid.layers.elementwise_add(output, bias, act=activation)
return output | c653dc26dc2bb1dccf37481560d93ba7eee63f7c | 27,529 |
def _number(string):
"""
Extracts an int from a string.
Returns a 0 if None or an empty string was passed.
"""
if not string:
return 0
else:
try:
return int(string)
except ValueError:
return float(string) | d14a7a04f33f36efd995b74bc794fce2c5f4be97 | 27,530 |
def get_email(sciper):
"""
Return email of user
"""
attribute = 'mail'
response = LDAP_search(
pattern_search='(uniqueIdentifier={})'.format(sciper),
attribute=attribute
)
try:
email = get_attribute(response, attribute)
except Exception:
raise EpflLdapException("No email address corresponds to sciper {}".format(sciper))
return email | 5f9ce9f69e4e7c211f404a50b4f420eb6e978d64 | 27,531 |
import math
def convert_size(size):
""" Size should be in bytes.
Return a tuple (float_or_int_val, str_unit) """
if size == 0:
return (0, "B")
KILOBYTE = 1024
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size, KILOBYTE)))
p = math.pow(KILOBYTE, i)
result = round(size/p, 2)
return (result, size_name[i]) | b2bc18df8ae268e1b03bcc7addadca8e07309f7c | 27,532 |
def _backward(gamma, mask):
"""Backward recurrence of the linear chain crf."""
gamma = K.cast(gamma, 'int32') # (B, T, N)
def _backward_step(gamma_t, states):
# print('len(states)=', len(states))
# print(type(states))
# y_tm1 = K.squeeze(states[0], 0)
y_tm1 = states[0]
y_t = batch_gather(gamma_t, y_tm1)
# return y_t, [K.expand_dims(y_t, 0)]
# return K.expand_dims(y_t, 0), [K.expand_dims(y_t, 0)]
return y_t, [y_t]
# initial_states = [K.expand_dims(K.zeros_like(gamma[:, 0, 0]), 0)] # (1, B)
initial_states = [K.zeros_like(gamma[:, 0, 0])] # (1, B)
_, y_rev, _ = K.rnn(_backward_step,
gamma,
initial_states,
go_backwards=True)
y = K.reverse(y_rev, 1)
if mask is not None:
mask = K.cast(mask, dtype='int32')
# mask output
y *= mask
# set masked values to -1
y += -(1 - mask)
return y | ab71548e87023e09ccd28aac81b95a6671384205 | 27,533 |
def t3err(viserr, N=7):
""" provided visibilities, this put these into triple product"""
amparray = populate_symmamparray(viserr, N=N)
t3viserr = np.zeros(int(comb(N,3)))
nn=0
for kk in range(N-2):
for ii in range(N-kk-2):
for jj in range(N-kk-ii-2):
t3viserr[nn+jj] = np.sqrt(amparray[kk,ii+kk+1]**2 \
+ amparray[ii+kk+1,jj+ii+kk+2]**2 \
+ amparray[jj+ii+kk+2,kk]**2 )
nn=nn+jj+1
return t3viserr | f5d774bb361c389f470de504adc2a467c71f0ec7 | 27,534 |
import torch
from typing import Tuple
def compute_average_ml_vae(z0_mean: torch.Tensor, z0_var: torch.Tensor, z1_mean: torch.Tensor, z1_var: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Compute the product of the encoder distributions.
- Ada-ML-VAE Averaging function
(✓) Visual inspection against reference implementation:
https://github.com/google-research/disentanglement_lib (MLVae.model_fn)
# TODO: recheck
"""
# Diagonal matrix inverse: E^-1 = 1 / E
# https://proofwiki.org/wiki/Inverse_of_Diagonal_Matrix
z0_invvar, z1_invvar = z0_var.reciprocal(), z1_var.reciprocal()
# average var: E^-1 = E1^-1 + E2^-1
# disentanglement_lib: ave_var = 2 * z0_var * z1_var / (z0_var + z1_var)
ave_var = 2 * (z0_invvar + z1_invvar).reciprocal()
# average mean: u^T = (u1^T E1^-1 + u2^T E2^-1) E
# disentanglement_lib: ave_mean = (z0_mean/z0_var + z1_mean/z1_var) * ave_var * 0.5
ave_mean = (z0_mean*z0_invvar + z1_mean*z1_invvar) * ave_var * 0.5
# mean, logvar
return ave_mean, ave_var | 9c7b1c559dda2b158ad400cf8634f0a8eb36fdee | 27,535 |
def safe_sign_and_autofill_transaction(
transaction: Transaction, wallet: Wallet, client: Client
) -> Transaction:
"""
Signs a transaction locally, without trusting external rippled nodes. Autofills
relevant fields.
Args:
transaction: the transaction to be signed.
wallet: the wallet with which to sign the transaction.
client: a network client.
Returns:
The signed transaction.
"""
return safe_sign_transaction(_autofill_transaction(transaction, client), wallet) | ce2b898529e15c65c8c84906c12093b9865e4768 | 27,536 |
def getMetricValue(glyph, attr):
"""
Get the metric value for an attribute.
"""
attr = getAngledAttrIfNecessary(glyph.font, attr)
return getattr(glyph, attr) | 9637e8c1ee3e295b1d1da67d9a77108b761acddc | 27,537 |
from typing import Optional
from typing import Union
def temporal_train_test_split(
y: ACCEPTED_Y_TYPES,
X: Optional[pd.DataFrame] = None,
test_size: Optional[Union[int, float]] = None,
train_size: Optional[Union[int, float]] = None,
fh: Optional[FORECASTING_HORIZON_TYPES] = None,
) -> SPLIT_TYPE:
"""Split arrays or matrices into sequential train and test subsets.
Creates train/test splits over endogenous arrays an optional exogenous
arrays.
This is a wrapper of scikit-learn's ``train_test_split`` that
does not shuffle the data.
Parameters
----------
y : pd.Series
Target series
X : pd.DataFrame, optional (default=None)
Exogenous data
test_size : float, int or None, optional (default=None)
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
relative number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.25.
train_size : float, int, or None, (default=None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the relative number of train samples. If None,
the value is automatically set to the complement of the test size.
fh : ForecastingHorizon
Returns
-------
splitting : tuple, length=2 * len(arrays)
List containing train-test split of `y` and `X` if given.
References
----------
..[1] adapted from https://github.com/alkaline-ml/pmdarima/
"""
if fh is not None:
if test_size is not None or train_size is not None:
raise ValueError(
"If `fh` is given, `test_size` and `train_size` cannot "
"also be specified."
)
return _split_by_fh(y, fh, X=X)
else:
pd_format = isinstance(y, pd.Series) or isinstance(y, pd.DataFrame)
if pd_format is True and isinstance(y.index, pd.MultiIndex):
ys = get_time_index(y)
# Get index to group across (only indices other than timepoints index)
yi_name = y.index.names
yi_grp = yi_name[0:-1]
# Get split into test and train data for timeindex only
series = (ys,)
yret = _train_test_split(
*series,
shuffle=False,
stratify=None,
test_size=test_size,
train_size=train_size,
)
# Convert into list indices
ysl = ys.to_list()
yrl1 = yret[0].to_list()
yrl2 = yret[1].to_list()
p1 = [index for (index, item) in enumerate(ysl) if item in yrl1]
p2 = [index for (index, item) in enumerate(ysl) if item in yrl2]
# Subset by group based on identified indices
y_train = y.groupby(yi_grp, as_index=False).nth(p1)
y_test = y.groupby(yi_grp, as_index=False).nth(p2)
if X is not None:
X_train = X.groupby(yi_grp, as_index=False).nth(p1)
X_test = X.groupby(yi_grp, as_index=False).nth(p2)
return y_train, y_test, X_train, X_test
else:
return y_train, y_test
else:
series = (y,) if X is None else (y, X)
return _train_test_split(
*series,
shuffle=False,
stratify=None,
test_size=test_size,
train_size=train_size,
) | 4bb59ead5a035114f067ab37c40289d0d660df2d | 27,538 |
import re
def split_range_str(range_str):
"""
Split the range string to bytes, start and end.
:param range_str: Range request string
:return: tuple of (bytes, start, end) or None
"""
re_matcher = re.fullmatch(r'([a-z]+)=(\d+)?-(\d+)?', range_str)
if not re_matcher or len(re_matcher.groups()) != 3:
return None
unit, start, end = re_matcher.groups()
start = int(start) if type(start) == str else None
end = int(end) if type(end) == str else None
return unit, start, end | a6817017d708abf774277bf8d9360b63af78860d | 27,539 |
from typing import Union
def get_valid_extent(array: Union[np.ndarray, np.ma.masked_array]) -> tuple:
"""
Return (rowmin, rowmax, colmin, colmax), the first/last row/column of array with valid pixels
"""
if not array.dtype == 'bool':
valid_mask = ~get_mask(array)
else:
valid_mask = array
cols_nonzero = np.where(np.count_nonzero(valid_mask, axis=0) > 0)[0]
rows_nonzero = np.where(np.count_nonzero(valid_mask, axis=1) > 0)[0]
return rows_nonzero[0], rows_nonzero[-1], cols_nonzero[0], cols_nonzero[-1] | 9b906fcd88c901f84ff822d8ba667936d8b623ed | 27,540 |
import os
def get_downloaded_dataset_ids(preprocess=Preprocess.RAW):
"""Get all dataset ids in the corresponding preprocessed data folder"""
dataset_filenames = os.listdir(get_folder(preprocess))
dataset_filenames.sort()
return [int(filename.rstrip(OLD_PICKLE_EXT)) for filename in dataset_filenames] | d506ed1cd12c77f9cad21ff49814a39bd61d3089 | 27,541 |
def epsmu2nz(eps, mu):#{{{
""" Accepts permittivity and permeability, returns effective index of refraction and impedance"""
N = np.sqrt(eps*mu)
N *= np.sign(N.imag)
Z = np.sqrt(mu / eps)
return N, Z | 7575d4596706f574f2a88627982edfb704d81d94 | 27,542 |
from typing import List
import re
def tokenize_numbers(text_array: List[str]) -> List[str]:
"""
Splits large comma-separated numbers and floating point values.
This is done by replacing commas with ' @,@ ' and dots with ' @.@ '.
Args:
text_array: An already tokenized text as list
Returns:
A list of strings with tokenized numbers
Example::
>>> tokenize_numbers(["$", "5,000", "1.73", "m"])
["$", "5", "@,@", "000", "1", "@.@", "73", "m"]
"""
tokenized = []
for i in range(len(text_array)):
reg, sub = MATCH_NUMBERS
replaced = re.sub(reg, sub, text_array[i]).split()
tokenized.extend(replaced)
return tokenized | f87b94850baeefd242ad2bcc89858fc05662a638 | 27,543 |
def main(
datapath,
kwdpath,
in_annot,
note_types,
batch,
):
"""
Select notes for an annotation batch, convert them to CoNLL format and save them in folders per annotator. The overview of the batch is saved as a pickled DataFrame.
Parameters
----------
datapath: Path
path to raw data main folder
kwdpath: Path
path to the xlsx keywords file
in_annot: list
list of paths to batch pkl's that are currently in annotation and haven't been processed yet (these notes are excluded from the selection)
note_types: {list, None}
list of note types to select; if None, all note types are selected
batch: str
name of the batch
Returns
-------
None
"""
# load raw data
print("Loading raw data...")
all_2017 = pd.read_pickle(datapath / '2017_raw/processed.pkl')
all_2018 = pd.read_pickle(datapath / '2018_raw/processed.pkl')
all_2020 = pd.read_pickle(datapath / '2020_raw/processed.pkl')
cov_2020 = pd.read_pickle(datapath / '2020_raw/ICD_U07.1/notes_[U07.1]_2020_q1_q2_q3.pkl')
non_cov_2020 = remove_on_multikeys(all_2020, cov_2020, ['MDN', 'NotitieID'])
data = {'2017': all_2017, '2018': all_2018, 'cov_2020': cov_2020, 'non_cov_2020': non_cov_2020}
# annotated to exclude
print("Loading annotated and 'in annotation'...")
annotated = pd.read_csv(datapath / 'annotated_notes_ids.csv', dtype={'MDN': str, 'NotitieID': str})
in_annotation = pd.concat([pd.read_pickle(f) for f in in_annot])
exclude = annotated.NotitieID.append(in_annotation.NotitieID)
# exclude annotated and sample / select specific note types
def exclude_annotated_and_sample(df, annotated, n_sample=50000, random_state=45):
print(f"Before exclusion: {len(df)=}")
df = df.loc[~df.NotitieID.isin(annotated)].copy()
print(f"After exclusion: {len(df)=}")
if len(df) > n_sample:
df = df.sample(n_sample, random_state=random_state)
print(f"After sampling: {len(df)=}")
return df
def exclude_annotated_and_select_type(df, annotated, note_types):
print(f"Before exclusion: {len(df)=}")
df = df.loc[~df.NotitieID.isin(annotated)].copy()
print(f"After exclusion: {len(df)=}")
df = df.query(f"Typenotitie == {note_types}")
print(f"After type selection: {len(df)=}")
return df
if note_types is None:
for source, df in data.items():
print(f"{source}:")
data[source] = exclude_annotated_and_sample(df, exclude)
else:
for source, df in data.items():
print(f"{source}:")
data[source] = exclude_annotated_and_select_type(df, exclude, note_types=note_types)
# keywords search
keywords = pd.read_excel(kwdpath)
keywords['regex'] = keywords.apply(lambda row: get_regex(row.keyword, row.regex_template_id), axis=1)
reg_dict = get_reg_dict(keywords)
print("Looking for keyword matches...")
for source, df in data.items():
data[source] = find_keywords(df, reg_dict)
# select notes
print("Selecting notes for the batch...")
batch_args = BATCH_SETTINGS[batch]
df = select_notes(data, **batch_args)
tab = df.pivot_table(
index=['annotator'],
columns=['source', 'samp_meth'],
values='NotitieID',
aggfunc='count',
margins=True,
margins_name='Total',
).to_string()
print(f"Batch overview:\n{tab}")
# save batch info df
pklpath = PATHS.getpath('data_to_inception_conll') / f"{batch}.pkl"
df.to_pickle(pklpath)
print(f"Batch df is saved: {pklpath}")
# convert to conll and save in folder per annotator
conllpath = PATHS.getpath('data_to_inception_conll')
nlp = spacy.load('nl_core_news_sm')
annotators = BATCH_SETTINGS[batch]["annotators"]
for annotator in annotators:
outdir = conllpath / batch / annotator
outdir.mkdir(exist_ok=True, parents=True)
print(f"Converting notes to CoNLL and saving in {outdir}")
annot = df.query("annotator == @annotator")
annot.apply(row_to_conllfile, axis=1, nlp=nlp, outdir=outdir, batch=batch)
print("Done!") | cdbb4a6dbd91adccd4fdfdb2bb7c653d109801f2 | 27,544 |
def mask_to_bbox(mask):
""" Convert mask to bounding box (x, y, w, h).
Args:
mask (np.ndarray): maks with with 0 and 255.
Returns:
List: [x, y, w, h]
"""
mask = mask.astype(np.uint8)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
x = 0
y = 0
w = 0
h = 0
for contour in contours:
tmp_x, tmp_y, tmp_w, tmp_h = cv2.boundingRect(contour)
if tmp_w * tmp_h > w * h:
x = tmp_x
y = tmp_y
w = tmp_w
h = tmp_h
return [x, y, w, h] | d8fc111bdb3a2c52866ad35b8e930e5ac8ebf0ba | 27,545 |
def find_divisor(n, limit=1000000):
"""
Use sieve to find first prime divisor of given number
:param n: number
:param limit: sieve limit
:return: prime divisor if exists in sieve limit
"""
primes = get_primes(limit)
for prime in primes:
if n % prime == 0:
return prime
raise (Exception("No divisors found in range %d" % limit)) | c990f25e055d8f8b1053cf94d6f9bb2d4fcff0bb | 27,546 |
def sort_all(batch, lens):
""" Sort all fields by descending order of lens, and return the original indices. """
if batch == [[]]:
return [[]], []
unsorted_all = [lens] + [range(len(lens))] + list(batch)
sorted_all = [list(t) for t in zip(*sorted(zip(*unsorted_all), reverse=True))]
return sorted_all[2:], sorted_all[1] | 175f97a88371992472f1e65a9403910f404be914 | 27,547 |
def bh(p, fdr):
""" From vector of p-values and desired false positive rate,
returns significant p-values with Benjamini-Hochberg correction
"""
p_orders = np.argsort(p)
discoveries = []
m = float(len(p_orders))
for k, s in enumerate(p_orders):
if p[s] <= (k+1) / m * fdr:
discoveries.append(s)
else:
break
return np.array(discoveries, dtype=int) | ace0924fa72b39085d4504c3bb015f81e1c78546 | 27,548 |
import sys
import os
import asyncio
def runner(fun, *args):
"""
Generic asyncio.run() equivalent for Python >= 3.5
"""
if sys.version_info >= (3, 7):
if os.name == "nt" and sys.version_info < (3, 8):
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
return asyncio.run(fun(*args))
if os.name == "nt":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
asyncio.get_child_watcher().attach_loop(loop)
result = loop.run_until_complete(fun(*args))
loop.close()
return result | 1893c46c40e1d42fc4927b0fb3d240c209405318 | 27,549 |
from datetime import datetime
def guess_if_last(lmsg):
"""Guesses if message is the last one in a group"""
msg_day = lmsg['datetime'].split('T')[0]
msg_day = datetime.datetime.strptime(msg_day, '%Y-%m-%d')
check_day = datetime.datetime.today() - datetime.timedelta(days=1)
if msg_day >= check_day:
return True
return False | 2d62caeb21daff4c5181db4a0b5cd833916fb945 | 27,550 |
import time
def refine_by_split(featIds, n, m, topo_rules, grid_layer, progress_bar = None, labelIter = None ) :
"""
Description
----------
Split input_features in grid_layer and check their topology
Parameters
----------
featIds : ids of features from grid_layer to be refined
n : number of split for selected cells in the horizontal direction
m : number of split for selected cells in the vertical direction
topo_rules : topological rules for the propagation of refinement
grid_layer : grid layer to be refined
progress_bar : progress bar in dialog
labelIter : iteration label in dialog
Returns
-------
Nothing, just grid_layer is updated
Examples
--------
>>>
"""
start_time = time.time()
# -- Procedure for regular structured grids (MODFLOW , n_max = 1)
if topo_rules['nmax'] == 1 :
# build feature dictionary
all_features = {feature.id(): feature for feature in grid_layer.getFeatures()}
# init fix dictionary
rowFixDict = { 'id': [] , 'n':[], 'm':[] }
colFixDict = { 'id': [] , 'n':[], 'm':[] }
# Initialize spatial index
grid_layerIndex = QgsSpatialIndex(grid_layer.getFeatures())
# get bbox of grid layer
grid_bbox = grid_layer.extent()
# iterate over initial feature set
# -- cells that have to be split horizontally
if n > 1 :
for featId in featIds :
# only consider featId if current row has not been considered before
if featId not in rowFixDict['id'] :
# build bounding box over row
bbox = all_features[featId].geometry().boundingBox()
bbox.setXMinimum( grid_bbox.xMinimum() )
bbox.setXMaximum( grid_bbox.xMaximum() )
bbox.setYMinimum( bbox.yMinimum() + TOLERANCE )
bbox.setYMaximum( bbox.yMaximum() - TOLERANCE )
# get features in current row
rowFeatIds = grid_layerIndex.intersects( bbox )
# update fix_dict with features in current row
this_fix_dict = { 'id':rowFeatIds , 'n':[n]*len(rowFeatIds), 'm':[1]*len(rowFeatIds) }
rowFixtDict = update_fix_dict(rowFixDict,this_fix_dict)
# -- cells that have to be split along columns
if m > 1 :
for featId in featIds :
# only consider featId if current row has not been considered before
if featId not in colFixDict['id'] :
# build bounding box over column
bbox = all_features[featId].geometry().boundingBox()
bbox.setXMinimum( bbox.xMinimum() + TOLERANCE )
bbox.setXMaximum( bbox.xMaximum() - TOLERANCE )
bbox.setYMinimum( grid_bbox.yMinimum() )
bbox.setYMaximum( grid_bbox.yMaximum() )
# get features in current column
colFeatIds = grid_layerIndex.intersects( bbox )
# update fix_dict with features in current column
this_fix_dict = { 'id':colFeatIds , 'n':[1]*len(colFeatIds), 'm':[m]*len(colFeatIds) }
colFixtDict = update_fix_dict(colFixDict,this_fix_dict)
fix_dict = rowFixDict.copy()
fix_dict = update_fix_dict(fix_dict,colFixDict)
newFeatIds = split_cells(fix_dict, grid_layer)
#print("OPTIM OVER %s sec" % (time.time() - start_time))
return()
# -- Refinement procedure for nested grids
# init iteration counter
itCount = 0
# init fix dict
fix_dict = { 'id': featIds , 'n':[n]*len(featIds), 'm':[m]*len(featIds) }
# Continue until input_features is empty
while len(fix_dict['id']) > 0:
# Split input_features
newFeatIds = split_cells(fix_dict, grid_layer)
# Get all the features
all_features = {feature.id(): feature for feature in grid_layer.getFeatures()}
# Initialize spatial index
grid_layerIndex = QgsSpatialIndex()
# Fill spatial Index
for feat in all_features.values():
grid_layerIndex.insertFeature(feat)
# re-initialize the list of features to be fixed
fix_dict = { 'id':[] , 'n':[], 'm':[] }
# Initialize progress bar
if progress_bar is not None :
progress_bar.setRange(0,100)
progress_bar.setValue(0)
count = 0
countMax = len(newFeatIds)
countUpdate = countMax * 0.05 # update each 5%
# Iterate over newFeatures to check topology
for newFeatId in newFeatIds:
# Get the neighbors of newFeatId that must be fixed
this_fix_dict = check_topo( newFeatId, n, m, topo_rules, all_features, grid_layer, grid_layerIndex)
# Update fix_dict with this_fix_dict
fix_dict = update_fix_dict(fix_dict,this_fix_dict)
# update counter
count += 1
# update progress_bar
if int( np.fmod( count, countUpdate ) ) == 0:
prog = int( count / countMax * 100 )
if progress_bar is not None :
progress_bar.setValue(prog)
QApplication.processEvents()
if progress_bar is not None :
progress_bar.setValue(100)
# Update iteration counter
itCount+=1
if labelIter is not None :
labelIter.setText(unicode(itCount)) | 6a604bbea471d8401cbabe65a04e13a51c389413 | 27,551 |
def read_custom_enzyme(infile):
"""
Create a list of custom RNase cleaving sites from an input file
"""
outlist = []
with open(infile.rstrip(), 'r') as handle:
for line in handle:
if '*' in line and line[0] != '#':
outlist.append(line.rstrip())
return outlist | 144c6de30a04faa2c9381bfd36bc79fef1b78443 | 27,552 |
def all_features(movie_id):
"""Returns the concatenation of visual and audio features for a movie
The numbers of frames are not egal for the visual and the audio feature.
The overnumerous frame are deleted.
"""
T_v = all_visual_features(movie_id)
T_a = all_audio_features(movie_id)
min_ = min(T_v.shape[0], T_a.shape[0])
T_v = T_v[:min_, :]
T_a = T_a[:min_, :]
return np.concatenate((T_v, T_a), axis=1) | df46d70d2023beb0d707d62ef9cf4f2af6f9c102 | 27,553 |
def resnet34(pretrained=False, shift='TSM',num_segments = 8, flow_estimation=0,**kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if (shift =='TSM'):
model = ResNet(BasicBlock, BasicBlock, [3, 4, 6, 3],num_segments=num_segments , flow_estimation=flow_estimation, **kwargs)
if pretrained:
pretrained_dict = model_zoo.load_url(model_urls['resnet34'])
new_state_dict = model.state_dict()
for k, v in pretrained_dict.items():
if (k in new_state_dict):
new_state_dict.update({k:v})
# print ("%s layer has pretrained weights" % k)
model.load_state_dict(new_state_dict)
return model | db8468b1eb0c0d5ed022ee70623c67390dedef15 | 27,554 |
def normalize_data_v3(data, cal_zero_points=np.arange(-4, -2, 1),
cal_one_points=np.arange(-2, 0, 1), **kw):
"""
Normalizes data according to calibration points
Inputs:
data (numpy array) : 1D dataset that has to be normalized
cal_zero_points (range) : range specifying what indices in 'data'
correspond to zero
cal_one_points (range) : range specifying what indices in 'data'
correspond to one
"""
# Extract zero and one coordinates
I_zero = np.mean(data[cal_zero_points])
I_one = np.mean(data[cal_one_points])
# Translate the date
trans_data = data - I_zero
# Normalize the data
one_zero_dist = I_one - I_zero
normalized_data = trans_data / one_zero_dist
return normalized_data | 3c9017abb58745964cd936db21d79a6df4896b4e | 27,555 |
def read(path, encoding="utf-8"):
"""Read string from text file.
"""
with open(path, "rb") as f:
return f.read().decode(encoding) | 674a20d7dca76f2c18b140cde0e93138b69d94ea | 27,556 |
def min_equals_max(min, max):
"""
Return True if minimium value equals maximum value
Return False if not, or if maximum or minimum value is not defined
"""
return min is not None and max is not None and min == max | 1078e9ed6905ab8b31b7725cc678b2021fc3bc62 | 27,557 |
def loadDataSet(fileName):
""" 加载数据
解析以tab键分隔的文件中的浮点数
Returns:
dataMat : feature 对应的数据集
labelMat : feature 对应的分类标签,即类别标签
"""
# 获取样本特征的总数,不算最后的目标变量
numFeat = len(open(fileName).readline().split('\t')) - 1
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
# 读取每一行
lineArr = []
# 删除一行中以tab分隔的数据前后的空白符号
curLine = line.strip().split('\t')
# i 从0到2,不包括2
for i in range(numFeat):
# 将数据添加到lineArr List中,每一行数据测试数据组成一个行向量
lineArr.append(float(curLine[i]))
# 将测试数据的输入数据部分存储到dataMat 的List中
dataMat.append(lineArr)
# 将每一行的最后一个数据,即类别,或者叫目标变量存储到labelMat List中
labelMat.append(float(curLine[-1]))
return dataMat, labelMat | 3c9fd81d1aeb2e3723081e3a1b11d07ec08e2118 | 27,558 |
def infer_app_url(headers: dict, register_path: str) -> str:
"""
ref: github.com/aws/chalice#485
:return: The Chalice Application URL
"""
host: str = headers["host"]
scheme: str = headers.get("x-forwarded-proto", "http")
app_url: str = f"{scheme}://{host}{register_path}"
return app_url | f3c0d1a19c8a78a0fd2a8663e2cb86427ff8f61b | 27,559 |
def get_index_where_most_spikes_in_unit_list(unit_list):
"""Returns the position in the list of units that has the most spikes.
:param unit_list: list of unit dictionaries
:return: index
:rtype: int
"""
return np.argmax(Recordings.count_spikes_in_unit_list(unit_list)) | 41f4b1f4ae1fc1b813769bf9f4606848cb700c11 | 27,560 |
def ocr_boxes(
img,
boxes,
halve=False,
resize=False,
blur=True,
sharpen=False,
erode=False,
dilate=False,
lang='eng',
config=None):
"""
Detect strings in multiple related boxes and concatenate the results.
Parameters
----------
halve : bool, optional
Scan half of the rectangle
resize : bool, optional
Resize the box before scanning
blur : bool, optional
Blur the box for better results
sharpen : bool, optional
Sharpen the image
erode : bool, optional
Make the text thinner
dilate : bool, optional
Make the text thicker
lang : str, optional
Language for the Tesseract engine
config: str, optional
Configuration string for the 'config' parameter of 'pytesseract'
Returns
-------
str
Concatenated string of strings detected in each box
See Also
--------
ocr_box
"""
string_list = []
for order, box in enumerate(boxes):
coordinates = box[1]
box_string = ocr_box(
img,
coordinates,
halve=halve,
resize=resize,
blur=blur,
sharpen=sharpen,
erode=erode,
dilate=dilate,
lang=lang,
config=config)
if box_string != 'None' and box_string != 'N/A':
string_list.append(box_string)
try:
string_list[0] #Check if list is empty
ocr_string = ' '.join(string_list)
except:
ocr_string = 'None'
return ocr_string | c273118beb263e6ad837cbb343ad0829bd367d30 | 27,561 |
def multiply_inv_gaussians(mus, lambdas):
"""Multiplies a series of Gaussians that is given as a list of mean vectors and a list of precision matrices.
mus: list of mean with shape [n, d]
lambdas: list of precision matrices with shape [n, d, d]
Returns the mean vector, covariance matrix, and precision matrix of the product
"""
assert len(mus) == len(lambdas)
batch_size = int(mus[0].shape[0])
d_z = int(lambdas[0].shape[-1])
identity_matrix = tf.reshape(tf.tile(tf.eye(d_z), [batch_size,1]), [-1,d_z,d_z])
lambda_new = tf.reduce_sum(lambdas, axis=0) + identity_matrix
mus_summed = tf.reduce_sum([tf.einsum("bij, bj -> bi", lamb, mu)
for lamb, mu in zip(lambdas, mus)], axis=0)
sigma_new = tf.linalg.inv(lambda_new)
mu_new = tf.einsum("bij, bj -> bi", sigma_new, mus_summed)
return mu_new, sigma_new, lambda_new | d8bdce66668ebf5e94f86310633c699ae7c98e16 | 27,562 |
import random
def mutationShuffle(individual, indpb):
"""
Inputs : Individual route
Probability of mutation betwen (0,1)
Outputs : Mutated individual according to the probability
"""
size = len(individual)
for i in range(size):
if random.random() < indpb:
swap_indx = random.randint(0, size - 2)
if swap_indx >= i:
swap_indx += 1
individual[i], individual[swap_indx] = \
individual[swap_indx], individual[i]
return individual, | dea67e03b2905f1169e1c37b3456364fb55c7174 | 27,563 |
def best_shoot_angle(agent_coord: tuple, opponents: list):
""" Tries to shoot, if it fail, kicks to goal randomly """
# Get best shoot angle:
best_angles = []
player_coord = np.array(agent_coord)
goal_limits = [np.array([0.9, -0.2]), np.array([0.9, 0]),
np.array([0.9, 0.2])]
for goal_limit in goal_limits:
angles = []
for op_idx in range(0, len(opponents)):
op_coord = np.array([opponents[op_idx].x_pos,
opponents[op_idx].y_pos])
angles.append(get_angle(goalie=op_coord, player=player_coord,
point=goal_limit))
best_angles.append(min(angles))
# return the best angles avaiable
return max(best_angles) | 98c7cb90fc28b4063ce6c14a84fe8989907268d6 | 27,564 |
import json
import os
import subprocess
def adjust(*args, stdin=None):
"""run 'adjust' with the current directory set to the dir where this module is found.
Return a tuple (exitcode, parsed_stdout), the second item will be None if stdout was empty. An exception is raised if the process cannot be run or parsing stdout as json fails. A non-zero exit code from the process does not trigger an exception."""
if stdin is not None and not isinstance(stdin, bytes):
stdin = json.dumps(stdin)
stdin = stdin.encode("UTF-8")
mydir = os.path.dirname(os.path.abspath(__file__))
old_dir = os.getcwd()
try:
os.chdir(mydir)
r = subprocess.run(["./adjust"]+list(args), input=stdin, stdout=subprocess.PIPE, check=False)
finally:
os.chdir(old_dir)
# on success, parse the output from the subprocess (if not empty)
if r.stdout:
# take only the last line, if there are many (this discards any 'progress' lines)
stdout = r.stdout.strip().split(b"\n")[-1]
# return r.returncode, json.loads(stdout) # direct json.loads() of bytes doesn't work before py 3.6
# print(">>>", stdout.decode("UTF-8"), "<<<")
return r.returncode, json.loads(stdout.decode("UTF-8"))
else:
return r.returncode, None | 262d757520ff97a666df62b0700391acc03a42ba | 27,565 |
import os
def read_STAR_Logfinalout(Log_final_out):
"""
Log_final_out -- Folder or Log.final.out produced by STAR
"""
if os.path.isdir(Log_final_out):
Log_final_out = Log_final_out.rstrip('/')
dirfiles = os.listdir(Log_final_out)
if 'Log.final.out' in dirfiles:
Log_final_out += '/Log.final.out'
else:
raise RuntimeError("No Log.final.out file in directory")
Mapping = {}
for line in open(Log_final_out):
if '|' in line:
left, right = line.split('|')
left = left.strip()
right = right.strip()
if 'Number of input reads' == left:
Mapping['input'] = int(right)
elif 'Average input read length' == left:
Mapping['length'] = int(right)
elif 'Uniquely mapped reads number' == left:
Mapping['uniq_num'] = int(right)
elif 'Number of reads mapped to multiple loci' == left:
Mapping['mult_num'] = int(right)
elif 'Number of reads mapped to too many loci' == left:
Mapping['toomany_num'] = int(right)
unmapped = Mapping['input'] - Mapping['uniq_num'] - Mapping['mult_num'] - Mapping['toomany_num']
Mapping['unmap'] = unmapped
return Mapping | 9599a71624f5b65092cf67fd28c9dd3d7f44e259 | 27,566 |
from typing import Optional
def _parse_line(lineno: int, line: str) -> Optional[UciLine]: # pylint: disable=unsubscriptable-object
"""Parse a line, raising UciParseError if it is not valid."""
match = _LINE_REGEX.match(line)
if not match:
raise UciParseError("Error on line %d: unrecognized line type" % lineno)
if match[4] == "#":
return _parse_comment(lineno, match[3], match[5])
elif match[8]:
if match[8] == "package":
return _parse_package(lineno, match[10])
elif match[8] == "config":
return _parse_config(lineno, match[10])
elif match[8] == "option":
return _parse_option(lineno, match[10])
elif match[8] == "list":
return _parse_list(lineno, match[10])
return None | eab49cf766b15d769d7b194176276741d197d359 | 27,567 |
from django.contrib.auth import login
from django.contrib.auth import authenticate
from django.contrib.auth import login
def login(request, template_name="lfs/checkout/login.html"):
"""Displays a form to login or register/login the user within the check out
process.
The form's post request goes to lfs.customer.views.login where all the logic
happens - see there for more.
"""
# If the user is already authenticate we don't want to show this view at all
if request.user.is_authenticated():
return HttpResponseRedirect(reverse("lfs_checkout"))
shop = lfs.core.utils.get_default_shop(request)
# If only anonymous checkout allowed we don't want to show this view at all.
if shop.checkout_type == CHECKOUT_TYPE_ANON:
return HttpResponseRedirect(reverse("lfs_checkout"))
# Using Djangos default AuthenticationForm
login_form = AuthenticationForm()
login_form.fields["username"].label = _(u"E-Mail")
register_form = RegisterForm()
if request.POST.get("action") == "login":
login_form = AuthenticationForm(data=request.POST)
login_form.fields["username"].label = _(u"E-Mail")
if login_form.is_valid():
login(request, login_form.get_user())
return lfs.core.utils.set_message_cookie(reverse("lfs_checkout"),
msg=_(u"You have been logged in."))
elif request.POST.get("action") == "register":
register_form = RegisterForm(data=request.POST)
if register_form.is_valid():
email = register_form.data.get("email")
password = register_form.data.get("password_1")
# Create user
user = User.objects.create_user(
username=email, email=email, password=password)
# Notify
lfs.core.signals.customer_added.send(user)
# Log in user
user = authenticate(username=email, password=password)
login(request, user)
return lfs.core.utils.set_message_cookie(reverse("lfs_checkout"),
msg=_(u"You have been registered and logged in."))
return render_to_response(template_name, RequestContext(request, {
"login_form": login_form,
"register_form": register_form,
"anonymous_checkout": shop.checkout_type != CHECKOUT_TYPE_AUTH,
})) | 33d23ba50cff73580ca45519ad26d218194bd341 | 27,568 |
import inspect
def _is_valid_concrete_plugin_class(attr):
"""
:type attr: Any
:rtype: bool
"""
return (
inspect.isclass(attr)
and
issubclass(attr, BasePlugin)
and
# Heuristic to determine abstract classes
not isinstance(attr.secret_type, abstractproperty)
) | 383e0bff4edac6623b9a2d2da45c3c47c982d72e | 27,569 |
import os
def connect(production=False):
"""Connect to MTurk"""
# Load API keys
reseval.load.api_keys()
# Connect to MTurk
return boto3.Session(
aws_access_key_id=os.environ['AWSAccessKeyId'],
aws_secret_access_key=os.environ['AWSSecretKey']
).client(
'mturk',
region_name='us-east-1',
endpoint_url=URL['production' if production else 'development']) | 82084de6160ce957360e3ed4e1d77de82094e114 | 27,570 |
def arrQuartiles(arr, arrMap=None, method=1, key=None, median=None):
"""
Find quartiles. Also supports dicts.
This function know about this quartile-methods:
1. Method by Moore and McCabe's, also used in TI-85 calculator.
2. Classical method, also known as "Tukey's hinges". In common cases it use values from original set, not create new.
3. Mean between method[1] and method[2].
:param int method: Set method for find quartiles.
:Example:
>>> arrQuartiles([1, 5, 6, 7, 9, 12, 15, 19, 20], method=1)
(5.5, 9, 17.0)
>>> arrQuartiles([1, 5, 6, 7, 9, 12, 15, 19, 20], method=2)
(6, 9, 15)
>>> arrQuartiles([1, 5, 6, 7, 9, 12, 15, 19, 20], method=3)
(5.75, 9, 16.0)
>>> arrQuartiles([1, 1, 3, 5, 7, 9, 10, 14, 18], method=1)
(2.0, 7, 12.0)
>>> arrQuartiles([1, 1, 3, 5, 7, 9, 10, 14, 18], method=2)
(3, 7, 10)
>>> arrQuartiles([1, 1, 3, 5, 7, 9, 10, 14, 18], method=3)
(2.5, 7, 11.0)
"""
if method not in (1, 2, 3):
raise ValueError('Unknown method: %s'%method)
if not arr: return (0, 0, 0)
elif len(arr)==1:
#? что лучше отдавать
if isDict(arr):
r=key(arr.values()[0]) if isFunction(key) else arr.values()[0]
else:
r=key(arr[0]) if isFunction(key) else arr[0]
return (0, r, r+1)
if not arrMap:
arrMap=arrCreateIndexMap(arr, key=key)
if median is None:
median=arrMedian(arr, arrMap, key=key)
def getHalve(isLow=True, includeM=False):
tArr=[]
for i in arrMap:
v=key(arr[i]) if isFunction(key) else arr[i]
if isLow and (v<=median if includeM else v<median): tArr.append(v)
elif not isLow and (v>=median if includeM else v>median): tArr.append(v)
tArrMap=range(len(tArr))
return tArr, tArrMap
if method in (1, 2): #methods "Moore and McCabe's" and "Tukey's hinges"
tHalveL, tHalveL_arrMap=getHalve(True, method==2)
tHalveH, tHalveH_arrMap=getHalve(False, method==2)
qL=arrMedian(tHalveL, tHalveL_arrMap)
qH=arrMedian(tHalveH, tHalveH_arrMap)
elif method==3: #mean between method[1] and method[2]
tHalveL1, tHalveL1_arrMap=getHalve(True, False)
tHalveH1, tHalveH1_arrMap=getHalve(False, False)
qL1=arrMedian(tHalveL1, tHalveL1_arrMap)
qH1=arrMedian(tHalveH1, tHalveH1_arrMap)
tHalveL2, tHalveL2_arrMap=getHalve(True, True)
tHalveH2, tHalveH2_arrMap=getHalve(False, True)
qL2=arrMedian(tHalveL2, tHalveL2_arrMap)
qH2=arrMedian(tHalveH2, tHalveH2_arrMap)
qL=(qL1+qL2)/2.0
qH=(qH1+qH2)/2.0
return qL, median, qH | 52de942dc59f293a35696579bd35c877e15c091d | 27,571 |
def check_response_status_code(url, response, print_format):
"""
check and print response status of an input url.
Args:
url (str) : url text.
response (list) : request response from the url request.
print_format (str) : format to print the logs according to.
"""
if response.status_code == 200:
print(print_format % (url, colored(".", "green")))
return f"✓ [{response.status_code}] {url}"
else:
print(print_format % (url, colored("x", "red")))
return f"✘ [{response.status_code}] {url}" | c16f453174ef48b2e6accfba4ec075bb8f2cad0d | 27,572 |
def make_state_manager(config):
"""
Parameters
----------
config : dict
Parameters for this StateManager.
Returns
-------
state_manager : StateManager
The StateManager to be used by the Controller.
"""
manager_dict = {
"hierarchical": HierarchicalStateManager
}
if config is None:
config = {}
# Use HierarchicalStateManager by default
manager_type = config.pop("type", "hierarchical")
manager_class = manager_dict[manager_type]
state_manager = manager_class(**config)
return state_manager | f662b97c052ccc9188b9f7236b79228cfba983c2 | 27,573 |
from datetime import datetime
def get_books_information(url: str, headers: dict = None) -> list:
"""
Create list that contains dicts of cleaned data from google book API.
Parameters
----------
url: str
link to resources (default: link to volumes with q=war, target of recrutment task)
headers: dict
dict with params for requests.get method. It should contain key and value matched for given url.
Returns
-------
books_list: list
List containing dict with chosen values.
"""
url_json = get_books_json(url, headers)
items_list = url_json["items"]
list_of_wanted_params = ["id", "volumeInfo_authors", "volumeInfo_title", "volumeInfo_publishedDate", "volumeInfo_categories", "volumeInfo_averageRating", "volumeInfo_ratingsCount", "volumeInfo_imageLinks_thumbnail"]
parameters_list = []
for item in items_list:
flatten_df = pd.json_normalize(item, sep='_')
flatten_dict = flatten_df.to_dict(orient='records')[0]
book_params_dict = {key.rsplit("_", 1)[-1]: flatten_dict.get(key) for key in list_of_wanted_params}
book_params_dict["_id"] = book_params_dict.pop("id")
book_date = book_params_dict['publishedDate']
if "-" in book_date:
date_time_obj = datetime.datetime.strptime(book_date, '%Y-%m-%d')
book_re_year = int(date_time_obj.year)
book_params_dict['publishedYear'] = book_re_year
else:
book_re_year = int(book_date)
book_params_dict['publishedYear'] = book_re_year
parameters_list.append(book_params_dict)
return parameters_list | 1643c2368ed452f9f12ea81f25c468c0240404b4 | 27,574 |
from typing import Dict
from typing import Type
import pkg_resources
def get_agents() -> Dict[str, Type[Agent]]:
"""Returns dict of agents.
Returns:
Dictionary mapping agent entrypoints name to Agent instances.
"""
agents = {}
for entry_point in pkg_resources.iter_entry_points("agents"):
agents[entry_point.name] = entry_point.load()
return agents | 5b6959278f0dd4b53d81c38d1ef919b0756a5533 | 27,575 |
from typing import MutableMapping
from typing import Any
from typing import Set
import yaml
def rewrite_schemadef(document: MutableMapping[str, Any]) -> Set[str]:
"""Dump the schemadefs to their own file."""
for entry in document["types"]:
if "$import" in entry:
rewrite_import(entry)
elif "name" in entry and "/" in entry["name"]:
entry_file, entry["name"] = entry["name"].split("/")
for field in entry["fields"]:
field["name"] = field["name"].split("/")[2]
rewrite_types(field, entry_file, True)
with open(entry_file[1:], "a", encoding="utf-8") as entry_handle:
yaml.main.dump(
[entry], entry_handle, Dumper=yaml.dumper.RoundTripDumper
)
entry["$import"] = entry_file[1:]
del entry["name"]
del entry["type"]
del entry["fields"]
seen_imports = set()
def seen_import(entry: MutableMapping[str, Any]) -> bool:
if "$import" in entry:
external_file = entry["$import"]
if external_file not in seen_imports:
seen_imports.add(external_file)
return True
return False
return True
types = document["types"]
document["types"][:] = [entry for entry in types if seen_import(entry)]
return seen_imports | d6beec5cb41cef16f23cbcad154998b6d79f1cdb | 27,576 |
def decor(decoration, reverse=False):
"""
Return given decoration part.
:param decoration: decoration's name
:type decoration:str
:param reverse: true if second tail of decoration wanted
:type reverse:bool
:return decor's tail
"""
if isinstance(decoration, str) is False:
raise artError(DECORATION_TYPE_ERROR)
decoration = indirect_decoration(decoration)
if reverse is True:
return DECORATIONS_MAP[decoration][-1]
return DECORATIONS_MAP[decoration][0] | c8fb59478f72ab76298361fa2dfa4a8720c5a46b | 27,577 |
def group_data(data, degree=3, hash=hash):
"""
numpy.array -> numpy.array
Groups all columns of data into all combinations of triples
"""
new_data = []
m,n = data.shape
for indicies in combinations(range(n), degree):
if 5 in indicies and 7 in indicies:
print "feature Xd"
elif 2 in indicies and 3 in indicies:
print "feature Xd"
else:
new_data.append([hash(tuple(v)) for v in data[:,indicies]])
return array(new_data).T | 886afd2f12dc0b4bf0da5648de8bcbf294b74c74 | 27,578 |
def get_dataset(dataset_name: str, *args, **kwargs):
"""Get the Dataset instancing lambda from the dictionary and return its evaluation.
This way, a Dataset object is only instanced when this function is evaluated.
Arguments:
dataset_name:
The name of the Dataset to be instanced. Must be a key in the `DATASETS`
dictionary.
Returns:
The corresponding Dataset object.
"""
try:
return DATASETS[dataset_name](*args, **kwargs)
except KeyError as e:
raise type(e)("{} is not registered a Dataset.".format(dataset_name)) | f3ab44f6ebb9d867bdf9b08d31d70f25832fbb7d | 27,579 |
def _numeric_handler_factory(charset, transition, assertion, illegal_before_underscore, parse_func,
illegal_at_end=(None,), ion_type=None, append_first_if_not=None, first_char=None):
"""Generates a handler co-routine which tokenizes a numeric component (a token or sub-token).
Args:
charset (sequence): Set of ordinals of legal characters for this numeric component.
transition (callable): Called upon termination of this component (i.e. when a character not in ``charset`` is
found). Accepts the previous character ordinal, the current character ordinal, the current context, and the
previous transition. Returns a Transition if the component ends legally; otherwise, raises an error.
assertion (callable): Accepts the first character's ordinal and the current context. Returns True if this is
a legal start to the component.
illegal_before_underscore (sequence): Set of ordinals of illegal characters to precede an underscore for this
component.
parse_func (callable): Called upon ending the numeric value. Accepts the current token value and returns a
thunk that lazily parses the token.
illegal_at_end (Optional[sequence]): Set of ordinals of characters that may not legally end the value.
ion_type (Optional[IonType]): The type of the value if it were to end on this component.
append_first_if_not (Optional[int]): The ordinal of a character that should not be appended to the token if
it occurs first in this component (e.g. an underscore in many cases).
first_char (Optional[int]): The ordinal of the character that should be appended instead of the character that
occurs first in this component. This is useful for preparing the token for parsing in the case where a
particular character is peculiar to the Ion format (e.g. 'd' to denote the exponent of a decimal value
should be replaced with 'e' for compatibility with python's Decimal type).
"""
@coroutine
def numeric_handler(c, ctx):
assert assertion(c, ctx)
if ion_type is not None:
ctx.set_ion_type(ion_type)
val = ctx.value
if c != append_first_if_not:
first = c if first_char is None else first_char
val.append(first)
prev = c
c, self = yield
trans = ctx.immediate_transition(self)
while True:
if _ends_value(c):
if prev == _UNDERSCORE or prev in illegal_at_end:
_illegal_character(c, ctx, '%s at end of number.' % (_chr(prev),))
trans = ctx.event_transition(IonThunkEvent, IonEventType.SCALAR, ctx.ion_type, parse_func(ctx.value))
if c == _SLASH:
trans = ctx.immediate_transition(_number_slash_end_handler(c, ctx, trans))
else:
if c == _UNDERSCORE:
if prev == _UNDERSCORE or prev in illegal_before_underscore:
_illegal_character(c, ctx, 'Underscore after %s.' % (_chr(prev),))
else:
if c not in charset:
trans = transition(prev, c, ctx, trans)
else:
val.append(c)
prev = c
c, _ = yield trans
return numeric_handler | 5033f406918b6e9aeecba9d1470f3b6bc10761fa | 27,580 |
def unsafe_content(s):
"""Take the string returned by safe_content() and recreate the
original string."""
# don't have to "unescape" XML entities (parser does it for us)
# unwrap python strings from unicode wrapper
if s[:2]==unichr(187)*2 and s[-2:]==unichr(171)*2:
s = s[2:-2].encode('us-ascii')
return s | ec92c977838412ff6fb67297a300cbc77a450661 | 27,581 |
def _AcosGrad(op, grad):
"""Returns grad * -1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.subtract(one, x2))
inv = math_ops.reciprocal(den)
return -grad * inv | 7a6d67e09f6b2997476c41fe37c235349e5c5c79 | 27,582 |
def converter(n, decimals=0, base=pi):
"""takes n in base 10 and returns it in any base (default is pi
with optional x decimals"""
# your code here
result = [] # setting list to capture converted digit
# check for a proper base
if base <= 0:
base = pi
# if n is zero then set the starting power as zero
if n != 0:
s_power = int(log(abs(n), base))
else:
s_power = 0
div = abs(n) # starting point for iterative division
if n < 0:
result.append("-")
"""
n / base^(S_power - 0) = n1
R / base^(S_power - 1) = n2 ( R=remainder, change remainder when division
R / base^(S_power - 0) = n3 yields > 1)
..........
R / base^(0) = n..
R / base^(-1) = n..
..........
R / base^(-decimals) = n..
"""
for i in range(0, (s_power + abs(decimals) + 1)):
if (s_power - i) == -1:
result.append(".")
var = int(div / base**(s_power - i))
result.append(num[var])
if var > 0:
div = div % base**(s_power - i)
return "".join(result) | e14b2667ad73ddee13df2dd526c5cdcdfde8a2f5 | 27,583 |
import json
import requests
def mediaAddcastInfo():
"""
:return:
"""
reqUrl = req_url('media', "/filmCast/saveFilmCastList")
if reqUrl:
url = reqUrl
else:
return "服务host匹配失败"
headers = {
'Content-Type': 'application/json',
'X-Region-Id': '2',
}
body = json.dumps(
{
"filmId": 4934007395236725,
"castList": [{
"castId": 4886219191944844,
"phone": "",
"headImage": "https://g.smartcinemausa.com/images/1e4e91eb2d674d64a8922b9530525e94-649-649.jpg",
"international": {
"zh_TW": {
"castName": "導演信命",
"filmRoleName": "導演角色"
},
"zh_CN": {
"castName": "导演姓名",
"filmRoleName": "导演角色"
},
"en_US": {
"castName": "Name of director",
"filmRoleName": "Director role"
}
},
"filmRoleId": "2",
"roleId": None,
"idx": 1
}, {
"castId": 4886219191944845,
"phone": "",
"headImage": "https://g.smartcinemausa.com/images/3239d1b2a1ee4a4a87e6a2f534812e8e-658-658.jpg",
"international": {
"zh_TW": {
"castName": "繁体主演姓名",
"filmRoleName": "繁体主演角色"
},
"zh_CN": {
"castName": "简体主演姓名",
"filmRoleName": "简体主演角色"
},
"en_US": {
"castName": "Starring name",
"filmRoleName": "Starring role"
}
},
"filmRoleId": "1",
"roleId": None,
"idx": 2
}, {
"castId": 4886219191944846,
"phone": "",
"headImage": "https://g.smartcinemausa.com/images/33687cfa20fa49a3b064e4e884d8efe7-900-900.jpg",
"international": {
"zh_TW": {
"castName": "我是編劇",
"filmRoleName": "編劇人"
},
"zh_CN": {
"castName": "我是编剧",
"filmRoleName": "编剧人"
},
"en_US": {
"castName": "Screenwriter",
"filmRoleName": "Screenwriter json"
}
},
"filmRoleId": "4",
"roleId": None,
"idx": 3
}, {
"castId": 4886219191944847,
"phone": "",
"headImage": "https://g.smartcinemausa.com/images/d5f54a31a1e5466f991b50dc31eb024f-441-441.jpg",
"international": {
"zh_TW": {
"castName": "我是群演",
"filmRoleName": "路人丁"
},
"zh_CN": {
"castName": "我是群演",
"filmRoleName": "路人丁"
},
"en_US": {
"castName": "我是群演",
"filmRoleName": "路人丁"
}
},
"filmRoleId": "101",
"roleId": None,
"idx": 4
}]
}
)
result = requests.post(url=url, headers=headers, data=body)
resultJ = json.loads(result.content)
return resultJ | 0826c22acea653234817f9838cd82cbc7045b07b | 27,584 |
def process_register_fns(): # noqa: WPS210
"""Registration in FNS process."""
form = RegisterFnsForm()
if form.validate_on_submit():
email = current_user.email
name = current_user.username
phone = form.telephone.data
registration_fns(email, name, phone)
flash('Ждите SMS от KKT-NALOG')
return redirect(
url_for('user.profile', username=current_user.username)
)
for field, errors in form.errors.items():
for error in errors:
flash(
f'Ошибка в поле "{getattr(form, field).label.text}": - {error}'
)
return redirect(url_for('user.profile', username=current_user.username)) | f5e7b3b88bbcd610d675aef020538fb87d1a8887 | 27,585 |
import socket
def validate_args(args):
""" Checks if the arguments are valid or not. """
# Is the number of sockets positive ?
if not args.number > 0:
print("[ERROR] Number of sockets should be positive. Received %d" % args.number)
exit(1)
# Is a valid IP address or valid name ?
try:
servers = socket.getaddrinfo(args.address, args.port, proto=socket.IPPROTO_TCP)
return servers[0]
except socket.gaierror as error:
print(error)
print("Please, provide a valid IPv4, IPv6 address or a valid domain name.")
exit(1) | 37a77f59ae78e3692e08742fab07f35cf6801e54 | 27,586 |
import re
def register(request):
"""注册"""
if request.method == 'GET':
# 显示注册页面
return render(request, 'register.html')
else:
# 进行注册处理
# 1.接收参数
username = request.POST.get('user_name') # None
password = request.POST.get('pwd')
email = request.POST.get('email')
# 2.参数校验(后端校验)
# 校验数据的完整性
if not all([username, password, email]):
return render(request, 'register.html', {'errmsg': '数据不完整'})
# 校验邮箱格式
if not re.match(r'^[a-z0-9][\w.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return render(request, 'register.html', {'errmsg': '邮箱格式不正确'})
# 校验用户名是否已注册
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = None
if user is not None:
return render(request, 'register.html', {'errmsg': '用户名已注册'})
# 校验邮箱是否被注册...
# 3.业务处理:注册
user = User.objects.create_user(username, email, password)
user.is_active = 0
user.save()
# 4.返回应答: 跳转到首页
return redirect(reverse('goods:index')) | 54f8a1eb8a0378e18634f0708cc5c4de44b88b20 | 27,587 |
def triangulation_to_vedo_mesh(tri, **kwargs):
"""
Transform my triangulation class to Trimesh class
:param kwargs:
"""
coords = tri.coordinates
trias = tri.triangulation
m = vedo.Mesh([coords, trias], **kwargs)
return m | 73c14809269dab93ef3acef973e8727528e7d1bf | 27,588 |
def build_dropout(cfg, default_args=None):
"""Builder for drop out layers."""
return build_from_cfg(cfg, DROPOUT_LAYERS, default_args) | e04797e311992da39ea2b3b07c90508e3afa44ce | 27,589 |
def conv2d_transpose(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1],
padding='SAME',
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None):
""" 2D convolution transpose with non-linear operation.
Args:
inputs: no_dropout-D tensor variable BxHxWxC
num_output_channels: int
kernel_size: a list of 128 ints
scope: string
stride: a list of 128 ints
padding: 'SAME' or 'VALID'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,fv_noise]
is_training: bool Tensor variable
Returns:
Variable tensor
Note: conv2d(conv2d_transpose(a, num_out, ksize, stride), a.shape[-fv_noise], ksize, stride) == a
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_h, kernel_w,
num_output_channels, num_in_channels] # reversed to conv2d
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
stride_h, stride_w = stride
# from slim.convolution2d_transpose
def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
dim_size *= stride_size
if padding == 'VALID' and dim_size is not None:
dim_size += max(kernel_size - stride_size, 0)
return dim_size
# caculate output shape
batch_size = inputs.get_shape()[0].value
height = inputs.get_shape()[1].value
width = inputs.get_shape()[2].value
out_height = get_deconv_dim(height, stride_h, kernel_h, padding)
out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
output_shape = [batch_size, out_height, out_width, num_output_channels]
outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape,
[1, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv2d(outputs, is_training,
bn_decay=bn_decay, scope='bn')
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs | df0bd44b9ef10ca9af3f1106f314b82cf84358b8 | 27,590 |
def open_py_file(f_name):
"""
:param f_name: name of the .py file (with extension)
:return: a new file with as many (1) as needed to not already exist
"""
try:
f = open(f_name, "x")
return f, f_name
except IOError:
return open_py_file(f_name[:-3] + "(1)" + f_name[-3:]) | bcc40f7757e0e4573b69b843c5050ad27546be53 | 27,591 |
def cast_rays(rays_o, rays_d, z_vals, r):
"""shoot viewing rays from camera parameters.
Args:
rays_o: tensor of shape `[...,3]` origins of the rays.
rays_d: tensor of shape `[...,3]` directions of the rays.
z_vals: tensor of shape [...,N] segments of the rays
r: radius of ray cone. 1/f*2/\sqrt(12)
Returns:
mu: tensor of shape `[...,N,3]` mean query positions
cov_diag: tensor of shape `[...,N,3]` corvirance of query
positions.
"""
t0, t1 = z_vals[..., :-1], z_vals[..., 1:]
c, d = (t0 + t1)/2, (t1 - t0)/2
t_mean = c + (2*c*d**2) / (3*c**2 + d**2)
t_var = (d**2)/3 - (4/15) * ((d**4 * (12*c**2 - d**2))
/ (3*c**2 + d**2)**2)
r_var = r**2 * ((c**2)/4 + (5/12) * d**2 - (4/15)
* (d**4) / (3*c**2 + d**2))
mu = rays_d[..., None, :] * t_mean[..., None]
null_outer_diag = 1 - (rays_d**2) / \
sum(rays_d**2, axis=-1, keepdims=True)
cov_diag = (t_var[..., None] * (rays_d**2)[..., None, :]
+ r_var[..., None] * null_outer_diag[..., None, :])
return mu + rays_o[..., None, :], cov_diag | a647feda10263755e519e6965517ca311fcf87df | 27,592 |
from typing import Any
def ifnone(x: Any, y: Any):
"""
returns x if x is none else returns y
"""
val = x if x is not None else y
return val | f2c7cf335ff919d610a23fac40d6af61e6a1e595 | 27,593 |
def get_session(region, default_bucket):
"""Gets the sagemaker session based on the region.
Args:
region: the aws region to start the session
default_bucket: the bucket to use for storing the artifacts
Returns:
`sagemaker.session.Session instance
"""
boto_session = boto3.Session(region_name=region)
sagemaker_client = boto_session.client("sagemaker")
runtime_client = boto_session.client("sagemaker-runtime")
return sagemaker.session.Session(
boto_session=boto_session,
sagemaker_client=sagemaker_client,
sagemaker_runtime_client=runtime_client,
default_bucket=default_bucket,
) | e4944e2b21f9bc666ad29a8ad1d09ac8c44df390 | 27,594 |
def xi_einasto_at_r(r, M, conc, alpha, om, delta=200, rhos=-1.):
"""Einasto halo profile.
Args:
r (float or array like): 3d distances from halo center in Mpc/h comoving
M (float): Mass in Msun/h; not used if rhos is specified
conc (float): Concentration
alpha (float): Profile exponent
om (float): Omega_matter, matter fraction of the density
delta (int): Overdensity, default is 200
rhos (float): Scale density in Msun h^2/Mpc^3 comoving; optional
Returns:
float or array like: Einasto halo profile.
"""
r = _ArrayWrapper(r, 'r')
xi = _ArrayWrapper.zeros_like(r)
cluster_toolkit._lib.calc_xi_einasto(r.cast(), len(r), M, rhos,
conc, alpha, delta, om, xi.cast())
return xi.finish() | 68b86aabc08bd960e1fa25691a1f421414dc25fa | 27,595 |
import re
def _rst_links(contents: str) -> str:
"""Convert reStructuredText hyperlinks"""
links = {}
def register_link(m: re.Match[str]) -> str:
refid = re.sub(r"\s", "", m.group("id").lower())
links[refid] = m.group("url")
return ""
def replace_link(m: re.Match[str]) -> str:
text = m.group("id")
refid = re.sub(r"[\s`]", "", text.lower())
try:
return f"[{text.strip('`')}]({links[refid]})"
except KeyError:
return m.group(0)
# Embedded URIs
contents = re.sub(
r"`(?P<text>[^`]+)<(?P<url>.+?)>`_", r"[\g<text>](\g<url>)", contents
)
# External Hyperlink Targets
contents = re.sub(
r"^\s*..\s+_(?P<id>[^\n:]+):\s*(?P<url>http\S+)",
register_link,
contents,
flags=re.MULTILINE,
)
contents = re.sub(r"(?P<id>[A-Za-z0-9_\-.:+]|`[^`]+`)_", replace_link, contents)
return contents | c7c937cdc04f9d5c3814538978062962e6407d65 | 27,596 |
def fibonacci(n: int) -> int:
"""
Calculate the nth Fibonacci number using naive recursive implementation.
:param n: the index into the sequence
:return: The nth Fibonacci number is returned.
"""
if n == 1 or n == 2:
return 1
else:
return fibonacci(n - 1) + fibonacci(n - 2) | 08de1ff55f7cada6a940b4fb0ffe6ba44972b42d | 27,597 |
def validator_msg(msg):
"""Validator decorator wraps return value in a message container.
Usage:
@validator_msg('assert len(x) <= 2')
def validate_size(x):
return len(x) <= 2
Now, if `validate_size` returns a falsy value `ret`, for instance if
provided with range(4), we will have
`ret.msg = 'assert len(x) <= 2 is false on input [0, 1, 2, 3]`.
On the other hand, if `validate_size` returns a true value `ret`, for
instance if provided with [0, 1], we will have
`ret.msg = 'assert len(x) <= 2 is true on input [0, 1]`.
"""
def real_decorator(function):
class Wrapper(object):
def __init__(self, function, msg):
self._function = function
self._msg = msg
@property
def msg(self):
return self._msg
def _build_argument_str(self, *args, **kwargs):
elems = [str(arg) for arg in args]
elems += [
"{}={}".format(str(key), str(value))
for key, value in kwargs.items()
]
return ", ".join(elems)
def __call__(self, *args, **kwargs):
res = self._function(*args, **kwargs)
argument_str = self._build_argument_str(*args, **kwargs)
return BooleanResult(res, self._msg, argument_str)
return Wrapper(function, msg)
return real_decorator | 1913408a9e894fbe3ca70a4b6529322958e75678 | 27,598 |
def iter_fgsm_t(x_input_t, preds_t, target_labels_t,
steps, total_eps, step_eps,
clip_min=0.0, clip_max=1.0, ord=np.inf, targeted=False):
"""
I-FGSM attack.
"""
eta_t = fgm(x_input_t, preds_t, y=target_labels_t, eps=step_eps, ord=ord,
clip_min=clip_min, clip_max=clip_max, targeted=targeted) - x_input_t
if ord == np.inf:
eta_t = tf.clip_by_value(eta_t, -total_eps, total_eps)
elif ord in [1, 2]:
reduc_ind = list(xrange(1, len(tf.shape(eta_t))))
if ord == 1:
norm = tf.reduce_sum(tf.abs(eta_t),
reduction_indices=reduc_ind,
keep_dims=True)
elif ord == 2:
norm = tf.sqrt(tf.reduce_sum(tf.square(eta_t),
reduction_indices=reduc_ind,
keep_dims=True))
eta_t = eta_t * total_eps / norm
x_adv_t = x_input_t + eta_t
return x_adv_t | 381f50b66ce3105c13f31afae5127e8574346a36 | 27,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.