content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def ask_the_user(runner: Runner) -> Direction:
"""Ask the user what to do (in absolute UP, DOWN, etc.)"""
return runner.ask_absolute()
|
2f289aba30e1368abd675a9b9bb2be0924984d3d
| 3,648,200
|
import pandas as pd
import os
def patents_hgh(path):
"""Dynamic Relation Between Patents and R\\&D
a panel of 346 observations from 1975 to 1979
*number of observations* : 1730
*observation* : production units
*country* : United States
A dataframe containing :
obsno
firm index
year
year
cusip
Compustat's identifying number for the firm (Committee on Uniform
Security Identification Procedures number)
ardsic
a two-digit code for the applied R&D industrial classification
(roughly that in Bound, Cummins, Griliches, Hall, and Jaffe, in the
Griliches R&D, Patents, and Productivity volume)
scisect
is the firm in the scientific sector ?
logk
the logarithm of the book value of capital in 1972.
sumpat
the sum of patents applied for between 1972-1979.
logr
the logarithm of R&D spending during the year (in 1972 dollars)
logr1
the logarithm of R&D spending (one year lag)
logr2
the logarithm of R&D spending (two years lag)
logr3
the logarithm of R&D spending (three years lag)
logr4
the logarithm of R&D spending (four years lag)
logr5
the logarithm of R&D spending (five years lag)
pat
the number of patents applied for during the year that were
eventually granted
pat1
the number of patents (one year lag)
pat2
the number of patents (two years lag)
pat3
the number of patents (three years lag)
pat4
the number of patents (four years lag)
Hall, Bronwyn , Zvi Griliches and Jerry Hausman (1986) “Patents and R&D:
Is There a Lag?”, *International Economic Review*, **27**, 265-283.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `patents_hgh.csv`.
Returns:
Tuple of np.ndarray `x_train` with 1730 rows and 18 columns and
dictionary `metadata` of column headers (feature names).
"""
path = os.path.expanduser(path)
filename = 'patents_hgh.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/PatentsHGH.csv'
maybe_download_and_extract(path, url,
save_file_name='patents_hgh.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
a3093aa59ea567f37560fb0d591e0cb9544e24f8
| 3,648,201
|
def optimize_spot_bid(ctx, instance_type, spot_bid):
"""
Check whether the bid is sane and makes an effort to place the instance in a sensible zone.
"""
spot_history = _get_spot_history(ctx, instance_type)
if spot_history:
_check_spot_bid(spot_bid, spot_history)
zones = ctx.ec2.get_all_zones()
most_stable_zone = choose_spot_zone(zones, spot_bid, spot_history)
logger.info("Placing spot instances in zone %s.", most_stable_zone)
return most_stable_zone
|
25bd3d9c952c256df12c3cc7efa257629d127af9
| 3,648,202
|
def new_hassle_participants():
"""Select participants for the room helpers."""
# Get a list of all current members.
members = helpers.get_all_members()
return flask.render_template('hassle_new_participants.html', members=members)
|
c8062ae498691ac72a17a969cf2ba7547e08eb9c
| 3,648,203
|
import json
def data_store_remove_folder(request):
"""
remove a sub-folder/sub-collection in hydroshareZone or any federated zone used for HydroShare
resource backend store. It is invoked by an AJAX call and returns json object that include a
status of 'success' if succeeds, and HttpResponse of status code of 403, 400, or 500 if fails.
The AJAX request must be a POST request with input data passed in for res_id and folder_path
where folder_path is the relative path for the folder to be removed under
res_id collection/directory.
"""
res_id = request.POST.get('res_id', None)
if res_id is None:
return HttpResponse('Bad request - resource id is not included',
status=status.HTTP_400_BAD_REQUEST)
res_id = str(res_id).strip()
try:
resource, _, user = authorize(request, res_id,
needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
except NotFound:
return HttpResponse('Bad request - resource not found', status=status.HTTP_400_BAD_REQUEST)
except PermissionDenied:
return HttpResponse('Permission denied', status=status.HTTP_401_UNAUTHORIZED)
folder_path = request.POST.get('folder_path', None)
if folder_path is None:
return HttpResponse('Bad request - folder_path is not included',
status=status.HTTP_400_BAD_REQUEST)
folder_path = str(folder_path).strip()
if not folder_path:
return HttpResponse('Bad request - folder_path cannot be empty',
status=status.HTTP_400_BAD_REQUEST)
if not folder_path.startswith('data/contents/'):
return HttpResponse('Bad request - folder_path must start with data/contents/',
status=status.HTTP_400_BAD_REQUEST)
if folder_path.find('/../') >= 0 or folder_path.endswith('/..'):
return HttpResponse('Bad request - folder_path must not contain /../',
status=status.HTTP_400_BAD_REQUEST)
try:
remove_folder(user, res_id, folder_path)
except SessionException as ex:
return HttpResponse(ex.stderr, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as ex:
return HttpResponse(ex.message, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return_object = {'status': 'success'}
return HttpResponse(
json.dumps(return_object),
content_type="application/json"
)
|
d6583dca0967fdf282a3510defcdeeb70da6c7f7
| 3,648,204
|
import math
def distance(x1: float, y1: float, x2: float, y2: float) -> float:
"""
Finds distance between two given points
Parameters:
x1, y1 : The x and y coordinates of first point
x2, y2 : The x and y coordinates of second point
Returns:
Distance upto two decimal places.
"""
distance = math.sqrt( ((x1-x2)**2)+((y1-y2)**2) )
return round(distance,2)
|
63f103f46b52aae146b52f385e15bc3441f042e5
| 3,648,205
|
def load_target_class(input_dir):
"""Loads target classes."""
df = pd.read_csv(join(input_dir, "target_class.csv"), header=None, index_col=0, names=["Target"])
return df
|
58fea2aebd6c04dd0b51ec3ef3fc627212aa2b29
| 3,648,206
|
import os
import io
import PIL
import hashlib
def dict_to_tf_example(data,
dataset_directory,
label_map_path,
ignore_difficult_instances=False,
image_subdirectory='Images',
is_debug=False):
"""Convert XML derived dict to tf.Example proto.
Notice that this function normalizes the bounding box coordinates provided
by the raw data.
Args:
data: dict holding PASCAL XML fields for a single image (obtained by
running dataset_util.recursive_parse_xml_to_dict)
dataset_directory: Path to root directory holding PASCAL dataset
label_map_path: the prototxt file that contains a map from string label names to integers ids.
ignore_difficult_instances: Whether to skip difficult instances in the
dataset (default: False).
image_subdirectory: String specifying subdirectory within the
PASCAL dataset directory holding the actual image data.
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
label_map_dict = label_map_util.get_label_map_dict(label_map_path)
img_path = os.path.join(data['folder'], image_subdirectory, data['filename'])
full_path = os.path.join(dataset_directory, img_path)
with tf_gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
width = int(data['size']['width'])
height = int(data['size']['height'])
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
if 'object' in data:
for obj in data['object']:
difficult = bool(int(obj['difficult']))
if ignore_difficult_instances and difficult:
continue
difficult_obj.append(int(difficult))
xmin.append(float(obj['bndbox']['xmin']) / width)
ymin.append(float(obj['bndbox']['ymin']) / height)
xmax.append(float(obj['bndbox']['xmax']) / width)
ymax.append(float(obj['bndbox']['ymax']) / height)
classes_text.append(obj['name'].encode('utf8'))
classes.append(label_map_dict[obj['name']])
truncated.append(int(obj['truncated']))
poses.append(obj['pose'].encode('utf8'))
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}))
if is_debug:
# Each box is ymin, xmin, ymax, xmax = box in [0, 1]
bboxes_array = np.array([ymin, xmin, ymax, xmax])
bboxes_array = np.transpose(bboxes_array)
classes_array = np.array(classes)
scores_array = None
category_index = label_map_util.create_category_index_from_labelmap(\
label_map_path, use_display_name=True)
display_bbox(np.array(image), bboxes_array, classes_array, scores_array, category_index)
return example
|
af307dacf5b0b2ccfbc2ceaa74059794dbf872f2
| 3,648,207
|
def fix_labels(ply_gt, ply_seg):
"""
Remove extra vertices from the ground truth
"""
size = len(ply_gt.elements[0]["x"])
gt_x = np.array(ply_gt.elements[0]["x"])
seg_x = np.array(ply_seg.elements[0]["x"])
new_gt_label = np.zeros_like(seg_x)
gt_label = np.array(ply_gt.elements[0]["label"])
for i in range(size):
if seg_x.shape[0] > i:
if abs(gt_x[i] - seg_x[i]) < 1e-16:
new_gt_label[i] = gt_label[i]
new_gt_label = clean_gt(new_gt_label).astype(np.int)
return new_gt_label
|
291fedd887b82e6099f5aba6a006fd0e33a7fb18
| 3,648,208
|
import requests
def get_coin_price(api_url: str, currency: str) -> float:
"""
Get the USD price of a coin from Gemini
Args:
api_url: The API URL for Gemini
currency: The cryptocurrency the bot is monitoring
Returns:
coin_price: The price the coin currently holds in USD
"""
# Instantiate Gemini and query the price
coin_price = -1
api_query = "/v1/pricefeed"
try:
price_feeds = requests.get(api_url + api_query).json()
for feed in price_feeds:
if feed.get('pair') == currency + "USD":
coin_price = float(feed.get('price'))
except Exception as err:
print("ERROR: Unable to get price due to %s" % err)
print("Price feed: %s" % price_feeds)
return coin_price
|
0683554aea85faa1dd105cbf81144685d7d2deec
| 3,648,209
|
import hmac
import base64
def GenerateAuthToken(key_name, user_id, action_id='', when=None):
"""Generates a URL-safe token based on XSRFToken but for generla purpose.
Args:
key_name (str): name of secret key to generate token.
user_id (str): the user ID of the authenticated user.
action_id (str): a string identifier of the action they requested
authorization for.
when (datetime): the time when the user was authorized for this action.
If not set the current utc time is used.
Returns:
A string token.
"""
key = SecretKey.GetSecretKey(key_name)
when = when or time_util.GetUTCNow()
when_timestamp = time_util.ConvertToTimestamp(when)
digester = hmac.new(key)
digester.update(str(user_id))
digester.update(_DELIMITER)
digester.update(action_id)
digester.update(_DELIMITER)
digester.update(str(when_timestamp))
digest = digester.digest()
return base64.urlsafe_b64encode('%s%s%d' % (digest, _DELIMITER,
when_timestamp))
|
8375889ba4cdc1fc996c48330cf0cd23824f5946
| 3,648,210
|
import os
def download_dataset(file_url, file_name):
"""
Utility to download a dataset
"""
# %%
new_dir = up(up(up(up(os.path.abspath(__file__)))))
os.chdir(new_dir)
file_path = r'artificial_neural_networks/datasets/' + file_name
exists = os.path.isfile(file_path)
if exists:
print(file_name + ' already exists.')
print('You have to delete it first, if you want to re-download it.')
else:
urlretrieve(file_url, file_path)
print(file_name + ' was downloaded succesfully.')
# %%
return file_path
|
90a2cf4da99b1a69e60e160dd69db3df333924bb
| 3,648,211
|
import torch
def get_dataset_psnr(device, model, dataset, source_img_idx_shift=64,
batch_size=10, max_num_scenes=None):
"""Returns PSNR for each scene in a dataset by comparing the view predicted
by a model and the ground truth view.
Args:
device (torch.device): Device to perform PSNR calculation on.
model (models.neural_renderer.NeuralRenderer): Model to evaluate.
dataset (misc.dataloaders.SceneRenderDataset): Dataset to evaluate model
performance on. Should be one of "chairs-test" or "cars-test".
source_img_idx_shift (int): Index of source image for each scene. For
example if 00064.png is the source view, then
source_img_idx_shift = 64.
batch_size (int): Batch size to use when generating predictions. This
should be a divisor of the number of images per scene.
max_num_scenes (None or int): Optionally limit the maximum number of
scenes to calculate PSNR for.
Notes:
This function should be used with the ShapeNet chairs and cars *test*
sets.
"""
num_imgs_per_scene = dataset.num_imgs_per_scene
# Set number of scenes to calculate
num_scenes = dataset.num_scenes
if max_num_scenes is not None:
num_scenes = min(max_num_scenes, num_scenes)
# Calculate number of batches per scene
assert (num_imgs_per_scene - 1) % batch_size == 0, "Batch size {} must divide number of images per scene {}."
# Comparison are made against all images except the source image (and
# therefore subtract 1 from total number of images)
batches_per_scene = (num_imgs_per_scene - 1) // batch_size
# Initialize psnr values
psnrs = []
for i in range(num_scenes):
# Extract source view
source_img_idx = i * num_imgs_per_scene + source_img_idx_shift
img_source = dataset[source_img_idx]["img"].unsqueeze(0).repeat(batch_size, 1, 1, 1).to(device)
render_params = dataset[source_img_idx]["render_params"]
azimuth_source = torch.Tensor([render_params["azimuth"]]).repeat(batch_size).to(device)
elevation_source = torch.Tensor([render_params["elevation"]]).repeat(batch_size).to(device)
# Infer source scene
scenes = model.inverse_render(img_source)
# Iterate over all other views of scene
num_points_in_batch = 0
data_list = []
scene_psnr = 0.
for j in range(num_imgs_per_scene):
if j == source_img_idx_shift:
continue # Do not compare against same image
# Add new image to list of images we want to compare to
data_list.append(dataset[i * num_imgs_per_scene + j])
num_points_in_batch += 1
# If we have filled up a batch, make psnr calculation
if num_points_in_batch == batch_size:
# Create batch for target data
img_target, azimuth_target, elevation_target = create_batch_from_data_list(data_list)
img_target = img_target.to(device)
azimuth_target = azimuth_target.to(device)
elevation_target = elevation_target.to(device)
# Rotate scene and render image
rotated = model.rotate_source_to_target(scenes, azimuth_source,
elevation_source, azimuth_target,
elevation_target)
img_predicted = model.render(rotated).detach()
scene_psnr += get_psnr(img_predicted, img_target)
data_list = []
num_points_in_batch = 0
psnrs.append(scene_psnr / batches_per_scene)
print("{}/{}: Current - {:.3f}, Mean - {:.4f}".format(i + 1,
num_scenes,
psnrs[-1],
torch.mean(torch.Tensor(psnrs))))
return psnrs
|
0ce2274aac72d2510fd0c9c067a6efa542c103ec
| 3,648,212
|
def smallest_continuous_multiple(max_multiple):
"""
Function takes an int, and returns the smallest natural number evenly divisible by all numbers
less than or equal to the input max_multiple.
REQ: max_multiple >= 0 and whole
:param max_multiple: {int}
:return: smallest natural number evenly divisible by all number less than or equal to input
Function reduces time complexity by iteratively removing redundant factors from the
check_list, ie. suppose 12 exists in the list, then 6, 4, 3, 2 and 1 will have been removed.
This check is done to remove the factors of every int, to reduce the lookup time later.
"""
# all numbers less than or equal to 2 are evenly divisible by themselves and below
if max_multiple <= 2:
return max_multiple
check_list = []
# make a list of all ints from 1 to input
for i in range(max_multiple):
check_list.append(i + 1)
# loop through check list backwards
for i in reversed(check_list):
# get factors of i without i included
temp_factors = get_factors(i)
temp_factors.remove(i)
# loop through the remaining factors, removing them from the check_list
for j in temp_factors:
try:
check_list.remove(j)
except ValueError:
pass
temp_num = max_multiple
# loop indefinitely until find smallest int that that satisfies exit condition
while True:
# if all factors less than max_multiple divide evenly into curr, return
if all(temp_num % n == 0 for n in check_list):
return temp_num
else:
temp_num += max_multiple
|
57423ed0941d18b54a1da33bc561f79ed19ae145
| 3,648,213
|
def context_list_entities(context):
"""
Returns list of entities to be displayed in list view
"""
# log.info(context['List_rows'])
if 'List_rows' in context:
return context['List_rows']['field_value']
elif 'entities' in context:
return context['entities']
log.warning("No entity list found in context %r"%(context.keys()))
return None
|
1b00e5cd6593a7e0c8770e9bbeaae5c3b47ac78a
| 3,648,214
|
def run(arg):
"""Entry point"""
error_map = {}
validate_path(arg, None, error_map)
if len(error_map) > 0:
error_count = 0
for file, errors in error_map.items():
print(f"Error in {file}:")
for error in errors:
print(f" {error}")
error_count+=1
print("")
print(f"{error_count} error(s) found in {len(error_map)} file(s)")
return 1
return 0
|
0e124b87b62076af713b8caea686e3c44a4e83a2
| 3,648,215
|
import struct
def _bitcode_symbols_partial_impl(
*,
actions,
binary_artifact,
bitcode_symbol_maps,
dependency_targets,
label_name,
output_discriminator,
package_bitcode,
platform_prerequisites):
"""Implementation for the bitcode symbols processing partial."""
bitcode_dirs = []
bitcode_symbols = {}
if bitcode_symbol_maps:
bitcode_symbols.update(bitcode_symbol_maps)
if binary_artifact and bitcode_symbols:
bitcode_files = []
copy_commands = []
for arch in bitcode_symbols:
bitcode_file = bitcode_symbols[arch]
if not bitcode_file:
continue
bitcode_files.append(bitcode_file)
# Get the UUID of the arch slice and use that to name the bcsymbolmap file.
copy_commands.append(
("cp {bitcode_file} " +
"${{OUTPUT_DIR}}/$(dwarfdump -u {binary} " +
"| grep \"({arch})\" | cut -d' ' -f2).bcsymbolmap").format(
arch = arch,
binary = binary_artifact.path,
bitcode_file = bitcode_file.path,
),
)
if bitcode_files:
bitcode_dir = intermediates.directory(
actions = actions,
target_name = label_name,
output_discriminator = output_discriminator,
dir_name = "bitcode_files",
)
bitcode_dirs.append(bitcode_dir)
apple_support.run_shell(
actions = actions,
apple_fragment = platform_prerequisites.apple_fragment,
inputs = [binary_artifact] + bitcode_files,
outputs = [bitcode_dir],
command = "mkdir -p ${OUTPUT_DIR} && " + " && ".join(copy_commands),
env = {"OUTPUT_DIR": bitcode_dir.path},
mnemonic = "BitcodeSymbolsCopy",
xcode_config = platform_prerequisites.xcode_version_config,
)
transitive_bitcode_files = depset(
direct = bitcode_dirs,
transitive = [
x[_AppleBitcodeInfo].bitcode
for x in dependency_targets
if _AppleBitcodeInfo in x
],
)
if package_bitcode:
bundle_files = [(processor.location.archive, "BCSymbolMaps", transitive_bitcode_files)]
else:
bundle_files = []
return struct(
bundle_files = bundle_files,
providers = [_AppleBitcodeInfo(bitcode = transitive_bitcode_files)],
)
|
64606e63a7831a110585ceb83fb34699b373db0a
| 3,648,216
|
def _str_trim_left(x):
"""
Remove leading whitespace.
"""
return x.str.replace(r"^\s*", "")
|
2718086073706411929b45edf80a1d464dfaeff6
| 3,648,217
|
def zipcompress(items_list, flags_list):
"""
SeeAlso:
vt.zipcompress
"""
return [compress(list_, flags) for list_, flags in zip(items_list, flags_list)]
|
e8f85c058db442a967d89ef2f74e5e32cc58a737
| 3,648,218
|
def test_config_file_fails_missing_value(monkeypatch, presence, config):
"""Check if test fails with missing value in database configuration."""
def mock_file_config(self):
return {'database': {}}
monkeypatch.setattr(presence.builder, "fetch_file_config", mock_file_config)
status, msg = presence.check_configuration_file()
assert status == "Skipping"
assert "No configuration" in msg
assert presence.db_config == {}
|
34e14fa3b72fbd3a64930b9ce46da61e76138650
| 3,648,219
|
def construct_run_config(iterations_per_loop):
"""Construct the run config."""
# Parse hparams
hparams = ssd_model.default_hparams()
hparams.parse(FLAGS.hparams)
return dict(
hparams.values(),
num_shards=FLAGS.num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
resnet_checkpoint=FLAGS.resnet_checkpoint,
val_json_file=FLAGS.val_json_file,
model_dir=FLAGS.model_dir,
iterations_per_loop=iterations_per_loop,
steps_per_epoch=FLAGS.num_examples_per_epoch // FLAGS.train_batch_size,
eval_samples=FLAGS.eval_samples,
transpose_input=False if FLAGS.input_partition_dims is not None else True,
use_spatial_partitioning=True
if FLAGS.input_partition_dims is not None else False,
dataset_threadpool_size=FLAGS.dataset_threadpool_size
)
|
d2989e795ab14a9931837356cb7a6c3752538429
| 3,648,220
|
def bezier_curve(points, nTimes=1000):
"""
Given a set of control points, return the
bezier curve defined by the control points.
Control points should be a list of lists, or list of tuples
such as [ [1,1],
[2,3],
[4,5], ..[Xn, Yn] ]
nTimes is the number of time steps, defaults to 1000
See http://processingjs.nihongoresources.com/bezierinfo/
"""
nPoints = len(points)
xPoints = np.array([p[0] for p in points])
yPoints = np.array([p[1] for p in points])
t = np.linspace(0.0, 1.0, nTimes)
polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints) ])
xvals = np.dot(xPoints, polynomial_array)
yvals = np.dot(yPoints, polynomial_array)
return xvals, yvals
|
3d4ea2a42e3e4bb7ff739b262e3fc69784656fed
| 3,648,221
|
def _compact_temporaries(exprs):
"""
Drop temporaries consisting of isolated symbols.
"""
# First of all, convert to SSA
exprs = makeit_ssa(exprs)
# What's gonna be dropped
mapper = {e.lhs: e.rhs for e in exprs
if e.lhs.is_Symbol and (q_leaf(e.rhs) or e.rhs.is_Function)}
processed = []
for e in exprs:
if e.lhs not in mapper:
# The temporary is retained, and substitutions may be applied
expr = e
while True:
handle = uxreplace(expr, mapper)
if handle == expr:
break
else:
expr = handle
processed.append(handle)
return processed
|
b28b839cc17124b83b6b26b972394486cd7d8741
| 3,648,222
|
def print_formula(elements):
"""
The input dictionary, atoms and their amount, is processed to produce
the chemical formula as a string
Parameters
----------
elements : dict
The elements that form the metabolite and their corresponding amount
Returns
-------
formula : str
The formula of the metabolite
"""
formula = "".join([f"{k}{int(v)}" for k, v in elements.items()])
return formula
|
a3c404ef0d18c417e44aee21106917f4ee203065
| 3,648,223
|
def try_get_code(url):
"""Returns code of URL if exists in database, else None"""
command = """SELECT short FROM urls WHERE full=?;"""
result = __execute_command(command, (url,))
if result is None:
return None
return result[0]
|
63a88471f6fdfc44bc22383edda0eb65f9bf1b84
| 3,648,224
|
import unicodedata
def is_chinese_char(cc):
"""
Check if the character is Chinese
args:
cc: char
output:
boolean
"""
return unicodedata.category(cc) == 'Lo'
|
d376e6097e628ac2f3a7934ba42ee2772177f857
| 3,648,225
|
import json
def _get_ec2_on_demand_prices(region_name: str) -> pd.DataFrame:
"""
Returns a dataframe with columns instance_type, memory_gb, logical_cpu, and price
where price is the on-demand price
"""
# All comments about the pricing API are based on
# https://www.sentiatechblog.com/using-the-ec2-price-list-api
# us-east-1 is the only region this pricing API is available and the pricing
# endpoint in us-east-1 has pricing data for all regions.
pricing_client = boto3.client("pricing", region_name="us-east-1")
filters = [
# only get prices for the specified region
{
"Type": "TERM_MATCH",
"Field": "regionCode",
"Value": region_name,
},
# filter out instance types that come with SQL Server pre-installed
{
"Type": "TERM_MATCH",
"Field": "preInstalledSw",
"Value": "NA",
},
# limit ourselves to just Linux instances for now
# TODO add support for Windows eventually
{
"Type": "TERM_MATCH",
"Field": "operatingSystem",
"Value": "Linux",
},
# Shared is a "regular" EC2 instance, as opposed to Dedicated and Host
{"Type": "TERM_MATCH", "Field": "tenancy", "Value": "Shared"},
# This relates to EC2 capacity reservations. Used is correct for when we don't
# have any reservations
{"Type": "TERM_MATCH", "Field": "capacitystatus", "Value": "Used"},
]
records = []
for product_json in _boto3_paginate(
pricing_client.get_products,
Filters=filters,
ServiceCode="AmazonEC2",
FormatVersion="aws_v1",
):
product = json.loads(product_json)
attributes = product["product"]["attributes"]
instance_type = attributes["instanceType"]
# We don't expect the "warnings" to get hit, we just don't want to get thrown
# off if the data format changes unexpectedly or something like that.
if "physicalProcessor" not in attributes:
print(
f"Warning, skipping {instance_type} because physicalProcessor is not "
"specified"
)
continue
# effectively, this skips Graviton (ARM-based) processors
# TODO eventually support Graviton processors.
if (
"intel" not in attributes["physicalProcessor"].lower()
and "amd" not in attributes["physicalProcessor"].lower()
):
# only log if we see non-Graviton processors
if "AWS Graviton" not in attributes["physicalProcessor"]:
print(
"Skipping non-Intel/AMD processor "
f"{attributes['physicalProcessor']} in {instance_type}"
)
continue
if "OnDemand" not in product["terms"]:
print(
f"Warning, skipping {instance_type} because there was no OnDemand terms"
)
continue
on_demand = list(product["terms"]["OnDemand"].values())
if len(on_demand) != 1:
print(
f"Warning, skipping {instance_type} because there was more than one "
"OnDemand SKU"
)
continue
price_dimensions = list(on_demand[0]["priceDimensions"].values())
if len(price_dimensions) != 1:
print(
f"Warning, skipping {instance_type} because there was more than one "
"priceDimensions"
)
continue
pricing = price_dimensions[0]
if pricing["unit"] != "Hrs":
print(
f"Warning, skipping {instance_type} because the pricing unit is not "
f"Hrs: {pricing['unit']}"
)
continue
if "USD" not in pricing["pricePerUnit"]:
print(
f"Warning, skipping {instance_type} because the pricing is not in USD"
)
continue
usd_price = pricing["pricePerUnit"]["USD"]
try:
usd_price_float = float(usd_price)
except ValueError:
print(
f"Warning, skipping {instance_type} because the price is not a float: "
f"{usd_price}"
)
continue
memory = attributes["memory"]
if not memory.endswith(" GiB"):
print(
f"Warning, skipping {instance_type} because memory doesn't end in GiB: "
f"{memory}"
)
continue
try:
memory_gb_float = float(memory[: -len(" GiB")])
except ValueError:
print(
f"Warning, skipping {instance_type} because memory isn't an float: "
f"{memory}"
)
continue
try:
vcpu_int = int(attributes["vcpu"])
except ValueError:
print(
f"Warning, skipping {instance_type} because vcpu isn't an int: "
f"{attributes['vcpu']}"
)
continue
records.append((instance_type, memory_gb_float, vcpu_int, usd_price_float))
return pd.DataFrame.from_records(
records, columns=["instance_type", "memory_gb", "logical_cpu", "price"]
)
|
8946260abee2f11f47c9ed93f1b6e2c90cd17c31
| 3,648,226
|
def resize_image(image, min_dim=None, max_dim=None, padding=False):
"""
Resizes an image keeping the aspect ratio.
min_dim: if provided, resizes the image such that it's smaller
dimension == min_dim
max_dim: if provided, ensures that the image longest side doesn't
exceed this value.
padding: If true, pads image with zeros so it's size is max_dim x max_dim
Returns:
image: the resized image
window: (y1, x1, y2, x2). If max_dim is provided, padding might
be inserted in the returned image. If so, this window is the
coordinates of the image part of the full image (excluding
the padding). The x2, y2 pixels are not included.
scale: The scale factor used to resize the image
padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]
"""
# Default window (y1, x1, y2, x2) and default scale == 1.
h, w = image.shape[:2]
window = (0, 0, h, w)
scale = 1
# Scale?
if min_dim:
# Scale up but not down
scale = max(1, min_dim / min(h, w))
# Does it exceed max dim?
if max_dim:
image_max = max(h, w)
if round(image_max * scale) > max_dim:
scale = max_dim / image_max
# Resize image and mask
if scale != 1:
image = cv2.resize(
image, (round(w * scale), round(h * scale)))
# Need padding?
if padding:
# Get new height and width
h, w = image.shape[:2]
top_pad = (max_dim - h) // 2
bottom_pad = max_dim - h - top_pad
left_pad = (max_dim - w) // 2
right_pad = max_dim - w - left_pad
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
return image, window, scale, padding
|
17c8cb953753321f1aea169ebcb199598b7fd2f1
| 3,648,227
|
def idwt(approx, wavelets, h=np.array([1.0 / np.sqrt(2), -1.0 / np.sqrt(2)]),
g=np.array([1.0 / np.sqrt(2), 1.0 / np.sqrt(2)])):
"""
Simple inverse discrete wavelet transform.
for good reference: http://www.mathworks.com/help/wavelet/ref/dwt.html
@param approx: approximation of signal at low resolution
@param h: high pass filter (for details space)
@param g: low pass filter (for approximation space)
@return: recovered signal
"""
wave_level = iter(wavelets[::-1])
h, g = g[::-1], h[::-1]
recovered = approx
for wave in wave_level:
#upsample
recovered = np.column_stack([recovered, np.zeros(recovered.size)]).flatten()
wave_up = np.column_stack([wave, np.zeros(wave.size)]).flatten()
recovered = np.convolve(recovered, h)[:-(h.size - 1)]
recovered = recovered + np.convolve(wave_up, g)[:-(g.size - 1)]
return recovered
|
a41cd22d81de733428123681bbea10837d4c7237
| 3,648,228
|
import json
def app_durations():
"""Generate JavaScript for appDurations."""
return 'appDurations = ' + json.dumps(supported_durations)
|
3be9ecc801cd650a5cd1a3c4db1c50957ccfa1c0
| 3,648,229
|
def generic_cc(mag=10,dmag=8,band='K'):
"""Returns a generic contrast curve.
Keyword arguments:
mag -- magnitude of target star in passband
dmag -- can currently be either 8 or 4.5 (two example generic cc's being used)
band -- passband of observation.
"""
if dmag==8:
return fpp.ContrastCurveFromFile('%s/data/contrast_curves/ex8_K.txt' % KEPLERDIR,band,mag)
elif dmag==4.5:
return fpp.ContrastCurveFromFile('%s/data/contrast_curves/ex4.5_K.txt' % KEPLERDIR,band,mag)
|
04ca148c2a5b8d9eb2d0c60a0d6ad8e177901c5f
| 3,648,230
|
from typing import Any
def read_routes(*, db: Session = Depends(deps.get_db),data_in: schemas.DictDataCreate,current_user: models.User = Depends(deps.get_current_active_user)) -> Any:
"""
Retrieve Mock Data.
"""
db.add(models.Dict_Data(**jsonable_encoder(data_in)))
return {
"code": 20000,
"data": "",
"message":"修改成功",
}
|
09e575a8262a0818c7904e4e077d86f492f3407e
| 3,648,231
|
def get_companies_pagination_from_lagou(city_id=0, finance_stage_id=0, industry_id=0, page_no=1):
"""
爬取拉勾公司分页数据
:param city_id: 城市 id
:param finance_stage_id: 融资阶段 id
:param industry_id: 行业 id
:param page_no: 页码
:return: 拉勾公司分页数据
:rtype: utils.pagination.Pagination
"""
url = constants.COMPANIES_URL.format(city_id=city_id,
finance_stage_id=finance_stage_id,
industry_id=industry_id)
params = {'pn': page_no, 'sortField': constants.SORTED_BY_JOBS_COUNT}
response_json = utils.http_tools.requests_get(url=url, params=params).json()
pagination = utils.pagination.Pagination(per_page=int(response_json['pageSize']),
total=int(response_json['totalCount']))
return pagination
|
7a82e0dd7ad8ab960dbabb749e68867607b70878
| 3,648,232
|
def is_quant_contam(contam_model):
"""Get the flag for quantitative contamination"""
# the list of quantitative models
quant_models = ['GAUSS', 'FLUXCUBE']
# set the default value
isquantcont = True
# check whether the contamination is not quantitative
if not contam_model.upper() in quant_models:
# re-set the flag
isquantcont = False
# return the flag
return isquantcont
|
8a88609857ac8eb61bfddfa8d8227ffa237d2641
| 3,648,233
|
def nms_wrapper(scores, boxes, threshold = 0.7, class_sets = None):
"""
post-process the results of im_detect
:param scores: N * K numpy
:param boxes: N * (K * 4) numpy
:param class_sets: e.g. CLASSES = ('__background__','person','bike','motorbike','car','bus')
:return: a list of K-1 dicts, no background, each is {'class': classname, 'dets': None | [[x1,y1,x2,y2,score],...]}
"""
num_class = scores.shape[1] if class_sets is None else len(class_sets)
assert num_class * 4 == boxes.shape[1],\
'Detection scores and boxes dont match'
class_sets = ['class_' + str(i) for i in range(0, num_class)] if class_sets is None else class_sets
res = []
for ind, cls in enumerate(class_sets[1:]):
ind += 1 # skip background
cls_boxes = boxes[:, 4*ind : 4*(ind+1)]
cls_scores = scores[:, ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, thresh=0.3)
dets = dets[keep, :]
dets = dets[np.where(dets[:, 4] > threshold)]
r = {}
if dets.shape[0] > 0:
r['class'], r['dets'] = cls, dets
else:
r['class'], r['dets'] = cls, None
res.append(r)
return res
|
7f6a260811a1c20da40e41cc179488440bfc5164
| 3,648,234
|
def Rbf(
gamma: float = 1.0) -> InternalLayer:
"""Dual activation function for normalized RBF or squared exponential kernel.
Dual activation function is `f(x) = sqrt(2)*sin(sqrt(2*gamma) x + pi/4)`.
NNGP kernel transformation correspond to (with input dimension `d`)
`k = exp(- gamma / d * ||x - x'||^2) = exp(- gamma*(q11 + q22 - 2 * q12))`.
Args:
gamma:
related to characteristic length-scale (l) that controls width of the
kernel, where `gamma = 1 / (2 l^2)`.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
def fn(x):
return np.sqrt(2) * np.sin(np.sqrt(2 * gamma) * x + np.pi/4)
@_requires(diagonal_spatial=_Diagonal()) # pytype:disable=wrong-keyword-args
def kernel_fn(k: Kernel) -> Kernel:
"""Compute new kernels after an `Rbf` layer."""
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
sum11, sum12, sum22 = _get_diagonal_outer_prods(cov1,
cov2,
k.diagonal_batch,
k.diagonal_spatial,
op.add)
def nngp_ntk_fn(nngp, sum_, ntk):
nngp = np.exp(gamma * (-sum_ + 2 * nngp))
if ntk is not None:
ntk *= 2 * gamma * nngp
return nngp, ntk
def nngp_fn_diag(nngp):
return np.ones_like(nngp)
nngp, ntk = nngp_ntk_fn(nngp, sum12, ntk)
if k.diagonal_batch and k.diagonal_spatial:
cov1 = nngp_fn_diag(cov1)
if cov2 is not None:
cov2 = nngp_fn_diag(cov2)
else:
cov1, _ = nngp_ntk_fn(cov1, sum11, None)
if cov2 is not None:
cov2, _ = nngp_ntk_fn(cov2, sum22, None)
return k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
return _elementwise(fn, f'Rbf({gamma})', kernel_fn)
|
c7c44f6227d0d337da40a1afe8eff359ccaebbf5
| 3,648,235
|
from typing import Dict
from typing import Any
import os
def upgrade_state_dict_with_xlm_weights(
state_dict: Dict[str, Any], pretrained_xlm_checkpoint: str,
) -> Dict[str, Any]:
"""
Load XLM weights into a Transformer encoder or decoder model.
Args:
state_dict: state dict for either TransformerEncoder or
TransformerDecoder
pretrained_xlm_checkpoint: checkpoint to load XLM weights from
Raises:
AssertionError: If architecture (num layers, attention heads, etc.)
does not match between the current Transformer encoder or
decoder and the pretrained_xlm_checkpoint
"""
if not os.path.exists(pretrained_xlm_checkpoint):
raise IOError("Model file not found: {}".format(pretrained_xlm_checkpoint))
state = checkpoint_utils.load_checkpoint_to_cpu(pretrained_xlm_checkpoint)
xlm_state_dict = state["model"]
for key in xlm_state_dict.keys():
for search_key in ["embed_tokens", "embed_positions", "layers"]:
if search_key in key:
subkey = key[key.find(search_key) :]
assert subkey in state_dict, (
"{} Transformer encoder / decoder "
"state_dict does not contain {}. Cannot "
"load {} from pretrained XLM checkpoint "
"{} into Transformer.".format(
str(state_dict.keys()), subkey, key, pretrained_xlm_checkpoint
)
)
state_dict[subkey] = xlm_state_dict[key]
return state_dict
|
4f73d82117cb0fa6a07926a9527ef5a0f185c1cf
| 3,648,236
|
def create_returns_tear_sheet(returns, positions=None,
transactions=None,
live_start_date=None,
cone_std=(1.0, 1.5, 2.0),
benchmark_rets=None,
bootstrap=False,
turnover_denom='AGB',
header_rows=None,
return_fig=False):
"""
Generate a number of plots for analyzing a strategy's returns.
- Fetches benchmarks, then creates the plots on a single figure.
- Plots: rolling returns (with cone), rolling beta, rolling sharpe,
rolling Fama-French risk factors, drawdowns, underwater plot, monthly
and annual return plots, daily similarity plots,
and return quantile box plot.
- Will also print the start and end dates of the strategy,
performance statistics, drawdown periods, and the return range.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame, optional
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame, optional
Executed trade volumes and fill prices.
- See full explanation in create_full_tear_sheet.
live_start_date : datetime, optional
The point in time when the strategy began live trading,
after its backtest period.
cone_std : float, or tuple, optional
If float, The standard deviation to use for the cone plots.
If tuple, Tuple of standard deviation values to use for the cone plots
- The cone is a normal distribution with this standard deviation
centered around a linear regression.
benchmark_rets : pd.Series, optional
Daily noncumulative returns of the benchmark.
- This is in the same style as returns.
bootstrap : boolean, optional
Whether to perform bootstrap analysis for the performance
metrics. Takes a few minutes longer.
turnover_denom : str, optional
Either AGB or portfolio_value, default AGB.
- See full explanation in txn.get_turnover.
header_rows : dict or OrderedDict, optional
Extra rows to display at the top of the perf stats table.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
"""
if benchmark_rets is not None:
returns = utils.clip_returns_to_benchmark(returns, benchmark_rets)
plotting.show_perf_stats(returns, benchmark_rets,
positions=positions,
transactions=transactions,
turnover_denom=turnover_denom,
bootstrap=bootstrap,
live_start_date=live_start_date,
header_rows=header_rows)
plotting.show_worst_drawdown_periods(returns)
vertical_sections = 11
if live_start_date is not None:
vertical_sections += 1
live_start_date = ep.utils.get_utc_timestamp(live_start_date)
if benchmark_rets is not None:
vertical_sections += 1
if bootstrap:
vertical_sections += 1
fig = plt.figure(figsize=(14, vertical_sections * 6))
gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)
ax_rolling_returns = plt.subplot(gs[:2, :])
i = 2
ax_rolling_returns_vol_match = plt.subplot(gs[i, :],
sharex=ax_rolling_returns)
i += 1
ax_rolling_returns_log = plt.subplot(gs[i, :],
sharex=ax_rolling_returns)
i += 1
ax_returns = plt.subplot(gs[i, :],
sharex=ax_rolling_returns)
i += 1
if benchmark_rets is not None:
ax_rolling_beta = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_rolling_volatility = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_rolling_sharpe = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_drawdown = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_underwater = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_monthly_heatmap = plt.subplot(gs[i, 0])
ax_annual_returns = plt.subplot(gs[i, 1])
ax_monthly_dist = plt.subplot(gs[i, 2])
i += 1
ax_return_quantiles = plt.subplot(gs[i, :])
i += 1
plotting.plot_rolling_returns(
returns,
factor_returns=benchmark_rets,
live_start_date=live_start_date,
cone_std=cone_std,
ax=ax_rolling_returns)
ax_rolling_returns.set_title(
'Cumulative returns')
plotting.plot_rolling_returns(
returns,
factor_returns=benchmark_rets,
live_start_date=live_start_date,
cone_std=None,
volatility_match=(benchmark_rets is not None),
legend_loc=None,
ax=ax_rolling_returns_vol_match)
ax_rolling_returns_vol_match.set_title(
'Cumulative returns volatility matched to benchmark')
plotting.plot_rolling_returns(
returns,
factor_returns=benchmark_rets,
logy=True,
live_start_date=live_start_date,
cone_std=cone_std,
ax=ax_rolling_returns_log)
ax_rolling_returns_log.set_title(
'Cumulative returns on logarithmic scale')
plotting.plot_returns(
returns,
live_start_date=live_start_date,
ax=ax_returns,
)
ax_returns.set_title(
'Returns')
if benchmark_rets is not None:
plotting.plot_rolling_beta(
returns, benchmark_rets, ax=ax_rolling_beta)
plotting.plot_rolling_volatility(
returns, factor_returns=benchmark_rets, ax=ax_rolling_volatility)
plotting.plot_rolling_sharpe(
returns, ax=ax_rolling_sharpe)
# Drawdowns
plotting.plot_drawdown_periods(
returns, top=5, ax=ax_drawdown)
plotting.plot_drawdown_underwater(
returns=returns, ax=ax_underwater)
plotting.plot_monthly_returns_heatmap(returns, ax=ax_monthly_heatmap)
plotting.plot_annual_returns(returns, ax=ax_annual_returns)
plotting.plot_monthly_returns_dist(returns, ax=ax_monthly_dist)
plotting.plot_return_quantiles(
returns,
live_start_date=live_start_date,
ax=ax_return_quantiles)
if bootstrap and (benchmark_rets is not None):
ax_bootstrap = plt.subplot(gs[i, :])
plotting.plot_perf_stats(returns, benchmark_rets,
ax=ax_bootstrap)
elif bootstrap:
raise ValueError('bootstrap requires passing of benchmark_rets.')
for ax in fig.axes:
plt.setp(ax.get_xticklabels(), visible=True)
if return_fig:
return fig
|
5d01bb52ed3bd642ed8c7743ee41b4d57f732c2f
| 3,648,237
|
def vectorize_text(text_col: pd.Series,
vec_type: str = 'count',
**kwargs):
"""
Vectorizes pre-processed text. Instantiates the vectorizer and
fit_transform it to the data provided.
:param text_col: Pandas series, containing preprocessed text.
:param vec_type: string indicating what type of vectorization
(count or tfidf currently).
:param **kwargs: dict of keyworded arguments for sklearn vectorizer
functions.
:return: A tuple containing vectorized (doc-feature matrix that as d rows
and f columns for count and tfidf vectorization) and vectorizer_obj
(vectorization sklearn object representing trained vectorizer).
"""
# Check if vectorization type is supported
assert vec_type in ['count', 'tfidf']
# Get raw values from pandas series
text_raw = text_col.tolist()
# Lets the vectorizer know the input has already been pre-tokenized
# and is now delimited by whitespaces
kwargs['analyzer'] = str.split
# Apply proper vectorization
if vec_type == 'count':
count_vec = CountVectorizer(**kwargs)
vectorized = count_vec.fit_transform(text_raw)
vectorizer_obj = count_vec
elif vec_type == 'tfidf':
tfidf_vec = TfidfVectorizer(**kwargs)
vectorized = tfidf_vec.fit_transform(text_raw)
vectorizer_obj = tfidf_vec
# Return vectorized object
return vectorized, vectorizer_obj
|
fd1b720c5eee83d788684a49f5fe7ad26e955016
| 3,648,238
|
def creation_LS(X,y,N):
"""Generates a random learning set of size N from the data in X
(containing the input samples) and in y (containing the corresponding
output values).
Parameters
----------
X: array containing the input samples
y: array containing the corresponding output values
Return
------
X_random_rows : array of shape [N, (number of columns of X)]
y_random_rows : array of shape [N]
"""
number_of_rows = X.shape[0]
random_indices = np.random.choice(number_of_rows, size=N, replace=False)
X_random_rows = X[random_indices, :]
y_random_rows= y[random_indices]
return X_random_rows, y_random_rows
|
fdcf5fe96082a75b096b43747f940b8cf46f326b
| 3,648,239
|
def print_summary(show="all",
blocks=False, cid=True, blobs=True, size=True,
typ=False, ch=False, ch_online=True,
name=True, title=False, path=False,
sanitize=False,
start=1, end=0, channel=None, invalid=False,
reverse=False,
file=None, fdate=False, sep=";",
server="http://localhost:5279"):
"""Print a summary of the items downloaded from the LBRY network.
Parameters
----------
show: str, optional
It defaults to `'all'`, in which case it shows all items.
If it is `'incomplete'` it will show claims that are missing blobs.
If it is `'full'` it will show claims that have all blobs.
If it is `'media'` it will show claims that have the media file
(mp4, mp3, mkv, etc.).
Normally only items that have all blobs also have a media file;
however, if the claim is currently being downloaded
a partial media file may be present.
If it is `'missing'` it will show claims that don't have
the media file, whether the full blobs are present or not.
blocks: bool, optional
It defaults to `False`, in which case it won't print
the `height` block of the claims.
If it is `True` it will print this value, which gives some idea
of when the claim was registered in the blockchain.
cid: bool, optional
It defaults to `True`.
Show the `'claim_id'` of the claim.
It is a 40 character alphanumeric string.
blobs: bool, optional
It defaults to `True`.
Show the number of blobs in the file, and how many are complete.
size: bool, optional
It defaults to `True`.
Show the length of the stream in minutes and seconds, like `14:12`,
when possible (audio and video), and also the size in mebibytes (MB).
typ: bool, optional
It defaults to `False`.
Show the type of claim (video, audio, document, etc.)
ch: bool, optional
It defaults to `False`.
Show the name of the channel that published the claim.
This is slow if `ch_online=True`.
ch_online: bool, optional
It defaults to `True`, in which case it searches for the channel name
by doing a reverse search of the item online. This makes the search
slow.
By setting it to `False` it will consider the channel name
stored in the input dictionary itself, which will be faster
but it won't be the full name of the channel. If no channel is found
offline, then it will set a default value `'_None_'` just so
it can be printed with no error.
This parameter only has effect if `ch=True`, or if `channel`
is used, as it internally sets `ch=True`.
name: bool, optional
It defaults to `True`.
Show the name of the claim.
title: bool, optional
It defaults to `False`.
Show the title of the claim.
path: bool, optional
It defaults to `False`.
Show the full path of the saved media file.
sanitize: bool, optional
It defaults to `False`, in which case it will not remove the emojis
from the name of the claim and channel.
If it is `True` it will remove these unicode characters.
This option requires the `emoji` package to be installed.
start: int, optional
It defaults to 1.
Show claims starting from this index in the list of items.
end: int, optional
It defaults to 0.
Show claims until and including this index in the list of items.
If it is 0, it is the same as the last index in the list.
channel: str, optional
It defaults to `None`.
It must be a channel's name, in which case it shows
only the claims published by this channel.
Using this parameter sets `ch=True`.
invalid: bool, optional
It defaults to `False`, in which case it prints every single claim
previously downloaded.
If it is `True` it will only print those claims that are 'invalid',
that is, those that cannot be resolved anymore from the online
database. This probably means that the author decided to remove
the claims at some point after they were downloaded originally.
This can be verified with the blockchain explorer, by following
the claim ID for an 'unspent' transaction.
Using this parameter sets `ch_online=False` as the channel name
of invalid claims cannot be resolved online, only from the offline
database.
reverse: bool, optional
It defaults to `False`, in which case older items come first
in the output list.
If it is `True` newer claims are at the beginning of the list.
file: str, optional
It defaults to `None`.
It must be a writable path to which the summary will be written.
Otherwise the summary will be printed to the terminal.
fdate: bool, optional
It defaults to `False`.
If it is `True` it will add the date to the name of the summary file.
sep: str, optional
It defaults to `;`. It is the separator character between
the data fields in the printed summary. Since the claim name
can have commas, a semicolon `;` is used by default.
server: str, optional
It defaults to `'http://localhost:5279'`.
This is the address of the `lbrynet` daemon, which should be running
in your computer before using any `lbrynet` command.
Normally, there is no need to change this parameter from its default
value.
Returns
-------
bool
It returns `True` if it printed the summary successfully.
If there is any error it will return `False`.
"""
if not funcs.server_exists(server=server):
return False
output = sort.sort_items_size(reverse=False, invalid=invalid,
server=server)
items = output["claims"]
if not items or len(items) < 1:
if file:
print("No file written.")
return False
if invalid:
ch_online = False
print()
status = prnt.print_items(items=items, show=show,
blocks=blocks, cid=cid, blobs=blobs,
size=size,
typ=typ, ch=ch, ch_online=ch_online,
name=name, title=title, path=path,
sanitize=sanitize,
start=start, end=end, channel=channel,
reverse=reverse,
file=file, fdate=fdate, sep=sep,
server=server)
return status
|
6888917bd6a944c6e91c0d9796383b279db68315
| 3,648,240
|
def nice_number_en(number, speech, denominators=range(1, 21)):
""" English helper for nice_number
This function formats a float to human understandable functions. Like
4.5 becomes "4 and a half" for speech and "4 1/2" for text
Args:
number (int or float): the float to format
speech (bool): format for speech (True) or display (False)
denominators (iter of ints): denominators to use, default [1 .. 20]
Returns:
(str): The formatted string.
"""
result = convert_to_mixed_fraction(number, denominators)
if not result:
# Give up, just represent as a 3 decimal number
return str(round(number, 3))
whole, num, den = result
if not speech:
if num == 0:
# TODO: Number grouping? E.g. "1,000,000"
return str(whole)
else:
return '{} {}/{}'.format(whole, num, den)
if num == 0:
return str(whole)
den_str = _FRACTION_STRING_EN[den]
if whole == 0:
if num == 1:
return_string = 'a {}'.format(den_str)
else:
return_string = '{} {}'.format(num, den_str)
elif num == 1:
return_string = '{} and a {}'.format(whole, den_str)
else:
return_string = '{} and {} {}'.format(whole, num, den_str)
if num > 1:
return_string += 's'
return return_string
|
9816277c3374ddc5ba8c9b598f5ef27893fb3f1b
| 3,648,241
|
import os
import re
def read_dataframe_by_substring(directory, substring, index_col=None, parse_dates=False, **kwargs):
"""Return a dataframe for the file containing substring.
Parameters
----------
directory : str
substring : str
identifier for output file, must be unique in directory
index_col : str | int | None
Index column name or index
kwargs : kwargs
Passed to underlying library for dataframe conversion.
Returns
-------
pd.DataFrame
"""
files = [x for x in os.listdir(directory) if substring in x]
# Exclude any files that may have rolled, such as
# Circuits-Losses-1-2.feather.1.gz
regex = re.compile(r"\.\w+\.\d+(?:\.\w+)?$")
files = [x for x in files if regex.search(x) is None]
if not files:
return None
assert len(files) == 1, f"found multiple {substring} files in {directory}"
filename = files[0]
return read_dataframe(
os.path.join(directory, filename), index_col=index_col, parse_dates=parse_dates, **kwargs
)
|
c4b8d10c36f8262263b45fbaae0cdb6306e5d6cc
| 3,648,242
|
import logging
def load_embeddings(path):
"""
Load embeddings from file and put into dict.
:param path: path to embeddings file
:return: a map word->embedding
"""
logging.info('Loading embeddings...')
embeddings = dict()
with open(path, 'r') as f:
for line in f:
line = line.split(' ')
embeddings[line[0]] = np.array([float(a) for a in line[1:]])
return embeddings
|
3e7e05cc9131dfb9d06c4c220d5e13d6965180b7
| 3,648,243
|
def helm_preserve(preserve):
"""Convert secret data to a "--set" string for Helm deployments.
Args:
preserve (Iterable): Set of secrets we wish to get data from to assign to the Helm Chart.
Returns:
str: String containing variables to be set with Helm release.
"""
env_vars = []
for item in preserve:
if isinstance(item, tuple):
item = HelmPreserve(*item)
elif not isinstance(item, HelmPreserve):
raise TypeError("Items in preserve array must be HelmPerserve named tuples")
secret_data = secret_read(item.secret_name, item.secret_namespace)
env_vars.append(HelmSet(item.values_path, secret_data[item.data_item]))
# Environmental variables
# TODO: This may well be its own subfunction
env_vars_string = "".join(
[
" --set{} {}={}".format(
"-string" if item.set_string else "", item.key, item.value
)
for item in env_vars
]
)
return env_vars_string
|
095d5b914feb327c81e4630347ab04d1954d2f14
| 3,648,244
|
def format_component_descriptor(name, version):
"""
Return a properly formatted component 'descriptor' in the format
<name>-<version>
"""
return '{0}-{1}'.format(name, version)
|
2edb92f20179ae587614cc3c9ca8198c9a4c240e
| 3,648,245
|
import sqlite3
def dbconn():
"""
Initializing db connection
"""
sqlite_db_file = '/tmp/test_qbo.db'
return sqlite3.connect(sqlite_db_file, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
|
b0c6dd235490cee93ada20f060d681a319b120f0
| 3,648,246
|
import hashlib
def md5(fname):
"""
Compute the md5 of a file in chunks.
Avoid running out of memory when hashing large files.
"""
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
|
9e0bfbd625df6a46d5bff4cd0e9f065d1eaf8a4b
| 3,648,247
|
def get_r(x, y, x1, y1):
"""
Get r vector following Xu et al. (2006) Eq. 4.2
x, y = arrays; x1, y1 = single points; or vice-versa
"""
return ((x-x1)**2 + (y-y1)**2)**0.5
|
424408f86e6e3301ee6eca72e2da7da5bf1f8140
| 3,648,248
|
import re
def replace_empty_bracket(tokens):
"""
Remove empty bracket
:param tokens: List of tokens
:return: Fixed sequence
"""
merged = "".join(tokens)
find = re.search(r"\{\}", merged)
while find:
merged = re.sub(r"\{\}", "", merged)
find = re.search(r"\{\}", merged)
return list(merged)
|
fd2c9f2f1c2e199056e89dbdba65f92e4d5834eb
| 3,648,249
|
def presentation():
"""
This route is the final project and will be test
of all previously learned skills.
"""
return render_template("")
|
c592ae4b28c5c9b89592c3842e73c0e76cc4bd66
| 3,648,250
|
def extra_credit(grades,students,bonus):
"""
Returns a copy of grades with extra credit assigned
The dictionary returned adds a bonus to the grade of
every student whose netid is in the list students.
Parameter grades: The dictionary of student grades
Precondition: grades has netids as keys, ints as values.
Parameter netids: The list of students to give extra credit
Precondition: netids is a list of valid (string) netids
Parameter bonus: The extra credit bonus to award
Precondition: bonus is an int
"""
# DICTIONARY COMPREHENSION
#return { k:(grades[k]+bonus if k in students else grades[k]) for k in grades }
# ACCUMULATOR PATTERN
result = {}
for k in grades:
if k in students:
result[k] = grades[k]+bonus
else:
result[k] = grades[k]
return result
|
334a9edb3d1d045832009e20c6cba7f24e5c181d
| 3,648,251
|
def get_geo_signal_combos(data_source):
"""
Get list of geo type-signal type combinations that we expect to see.
Cross references based on combinations reported available by COVIDcast metadata.
"""
meta = covidcast.metadata()
source_meta = meta[meta['data_source'] == data_source]
# Need to convert np.records to tuples so they are hashable and can be used in sets and dicts.
geo_signal_combos = list(map(tuple,
source_meta[["geo_type", "signal"]].to_records(index=False)))
print("Number of expected geo region-signal combinations:",
len(geo_signal_combos))
return geo_signal_combos
|
90d030372b3e7d9ed2de0d53b6aa42fdf3723355
| 3,648,252
|
def absolute_(x, track_types = True, **kwargs):
"""Compute the absolute value of x.
Parameters
----------
x : :obj:`xarray.DataArray`
Data cube containing the values to apply the operator to.
track_types : :obj:`bool`
Should the operator promote the value type of the output object, based
on the value type of the input object?
**kwargs:
Ignored.
Returns
-------
:obj:`xarray.DataArray`
A data cube with the same shape as ``x`` containing the results of all
evaluated expressions.
Note
-----
When tracking value types, this operator uses the following type promotion
manual, with the keys being the supported value types of ``x``, and the
corresponding value being the promoted value type of the output.
.. exec_code::
:hide_code:
from semantique.processor.types import TYPE_PROMOTION_MANUALS
obj = TYPE_PROMOTION_MANUALS["absolute"]
obj.pop("__preserve_labels__")
print(obj)
"""
if track_types:
promoter = TypePromoter(x, function = "absolute")
promoter.check()
f = lambda x: np.absolute(x)
out = xr.apply_ufunc(f, x)
if track_types:
out = promoter.promote(out)
return out
|
b88c6662890832b0d54f752e59c97c9a9ca9ceb5
| 3,648,253
|
def any_input(sys_, t, input_signal=0, init_cond=None, *, plot=True):
"""
Accept any input signal, then calculate the response of the system.
:param sys_: the system
:type sys_: TransferFunction | StateSpace
:param t: time
:type t: array_like
:param input_signal: input signal accepted by the system
:type input_signal: numbers.Real | np.ndarray
:param init_cond: initial condition of the system
:type init_cond: None | numbers.Real | np.ndarray
:param plot: If plot is True, it will show the response curve.
:type plot: bool
:return: system output and time array
:rtype: tuple[np.ndarray, np.ndarray]
"""
if isinstance(sys_, TransferFunction):
sys_ = tf2ss(sys_)
u = _setup_input_signal(input_signal, t, sys_.inputs)
y, t = _any_input(sys_, t, u, init_cond)
if plot:
plot_response_curve(y, t, "response", sys_.is_ctime)
return y, t
|
fc6d72b5c22d585a4ba7c5be895b1266e72e70dd
| 3,648,254
|
import os
def get_combinations(suite_dir, fields, subset,
limit, filter_in, filter_out,
include_facet):
"""
Describes the combinations of a suite, optionally limiting
or filtering output based on the given parameters. Includes
columns for the subsuite and facets when include_facet is True.
Returns a tuple of (headers, rows) where both elements are lists
of strings.
"""
configs = [(combine_path(suite_dir, item[0]), item[1]) for item in
build_matrix(suite_dir, subset)]
num_listed = 0
rows = []
facet_headers = set()
dirs = {}
max_dir_depth = 0
for _, fragment_paths in configs:
if limit > 0 and num_listed >= limit:
break
if filter_in and not any([f in path for f in filter_in
for path in fragment_paths]):
continue
if filter_out and any([f in path for f in filter_out
for path in fragment_paths]):
continue
fragment_fields = [extract_info(path, fields)
for path in fragment_paths]
# merge fields from multiple fragments by joining their values with \n
metadata = {}
for fragment_meta in fragment_fields:
for field, value in fragment_meta.items():
if value == '':
continue
if field in metadata:
metadata[field] += '\n' + str(value)
else:
metadata[field] = str(value)
if include_facet:
# map final dir (facet) -> filename without the .yaml suffix
for path in fragment_paths:
facet_dir = os.path.dirname(path)
facet = os.path.basename(facet_dir)
metadata[facet] = os.path.basename(path)[:-5]
facet_headers.add(facet)
facet_dirs = facet_dir.split('/')[:-1]
for i, dir_ in enumerate(facet_dirs):
if i not in dirs:
dirs[i] = set()
dirs[i].add(dir_)
metadata['_dir_' + str(i)] = os.path.basename(dir_)
max_dir_depth = max(max_dir_depth, i)
rows.append(metadata)
num_listed += 1
subsuite_headers = []
if include_facet:
first_subsuite_depth = max_dir_depth
for i in range(max_dir_depth):
if len(dirs[i]) > 1:
first_subsuite_depth = i
break
subsuite_headers = ['subsuite depth ' + str(i)
for i in
range(0, max_dir_depth - first_subsuite_depth + 1)]
for row in rows:
for i in range(first_subsuite_depth, max_dir_depth + 1):
row[subsuite_headers[i - first_subsuite_depth]] = \
row.get('_dir_' + str(i), '')
headers = subsuite_headers + sorted(facet_headers) + fields
return headers, sorted([[row.get(field, '') for field in headers]
for row in rows])
|
27fa774468b8fcb8aa34579ab0e1868375d7683e
| 3,648,255
|
from .mnext import mnext
def mnext_mbv2_cfg(pretrained=False,in_chans=3,drop_rate=0.2,drop_connect_rate=0.5,bn_tf=False,bn_momentum=0.9,bn_eps=0.001, global_pool=False, **kwargs):
"""Creates a MNeXt Large model. Tensorflow compatible variant
"""
model = mnext(**kwargs)
return model
|
5f8fcdcaa6abf4047b4fc06ea7dcb92f6fbeade7
| 3,648,256
|
def _embeddings_from_arguments(column,
args,
weight_collections,
trainable,
output_rank=2):
"""Returns embeddings for a column based on the computed arguments.
Args:
column: the column name.
args: the _DeepEmbeddingLookupArguments for this column.
weight_collections: collections to store weights in.
trainable: whether these embeddings should be trainable.
output_rank: the desired rank of the returned `Tensor`. Inner dimensions will
be combined to produce the desired rank.
Returns:
the embeddings.
Raises:
ValueError: if not possible to create.
"""
# pylint: disable=protected-access
input_tensor = layers._inner_flatten(args.input_tensor, output_rank)
weight_tensor = None
if args.weight_tensor is not None:
weight_tensor = layers._inner_flatten(args.weight_tensor, output_rank)
# pylint: enable=protected-access
# This option is only enabled for scattered_embedding_column.
if args.hash_key:
embeddings = contrib_variables.model_variable(
name="weights",
shape=[args.vocab_size],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=(trainable and args.trainable),
collections=weight_collections)
return embedding_ops.scattered_embedding_lookup_sparse(
embeddings,
input_tensor,
args.dimension,
hash_key=args.hash_key,
combiner=args.combiner,
name="lookup")
if args.shared_embedding_name is not None:
shared_embedding_collection_name = (
"SHARED_EMBEDDING_COLLECTION_" + args.shared_embedding_name.upper())
graph = ops.get_default_graph()
shared_embedding_collection = (
graph.get_collection_ref(shared_embedding_collection_name))
shape = [args.vocab_size, args.dimension]
if shared_embedding_collection:
if len(shared_embedding_collection) > 1:
raise ValueError(
"Collection %s can only contain one "
"(partitioned) variable." % shared_embedding_collection_name)
else:
embeddings = shared_embedding_collection[0]
if embeddings.get_shape() != shape:
raise ValueError(
"The embedding variable with name {} already "
"exists, but its shape does not match required "
"embedding shape here. Please make sure to use "
"different shared_embedding_name for different "
"shared embeddings.".format(args.shared_embedding_name))
else:
embeddings = contrib_variables.model_variable(
name=args.shared_embedding_name,
shape=shape,
dtype=dtypes.float32,
initializer=args.initializer,
trainable=(trainable and args.trainable),
collections=weight_collections)
graph.add_to_collection(shared_embedding_collection_name, embeddings)
else:
embeddings = contrib_variables.model_variable(
name="weights",
shape=[args.vocab_size, args.dimension],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=(trainable and args.trainable),
collections=weight_collections)
if _is_variable(embeddings):
embeddings = [embeddings]
else:
embeddings = embeddings._get_variable_list() # pylint: disable=protected-access
# pylint: disable=protected-access
_maybe_restore_from_checkpoint(column._checkpoint_path(), embeddings)
return embedding_ops.safe_embedding_lookup_sparse(
embeddings,
input_tensor,
sparse_weights=weight_tensor,
combiner=args.combiner,
name=column.name + "weights",
max_norm=args.max_norm)
|
30c305bfbf20d48af48dd25aa32c1648f8f95fce
| 3,648,257
|
def stuw_laagstedoorstroombreedte(damo_gdf=None, obj=None, damo_doorstroombreedte="DOORSTROOMBREEDTE",
damo_kruinvorm="WS_KRUINVORM"):
"""
als LAAGSTEDOORSTROOMHOOGTE is NULL en WS_KRUINVORM =3 (rechthoek) dan LAAGSTEDOORSTROOMBREEDTE = DOORSTROOMBREEDTE
"""
return damo_gdf.apply(
lambda x: _stuw_get_laagstedoorstroombreedte_rechthoek(x[damo_kruinvorm], x[damo_doorstroombreedte]), axis=1)
|
534d917326222ef77fc0a8022ed84ea08bb0be0a
| 3,648,258
|
def manage_categories():
"""
Display all categories to manage categories page (admin only)
"""
# Denied user access to manage_categories page
if session["user"] != "admin":
return redirect(url_for('error', code=403))
# query for all categories from categories collection
manage_categories = list(mongo.db.categories.find().sort(
"category_name", 1))
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# call the paginated function to display only the
# specific number of categories per page
paginated_categories = paginated(manage_categories)
# get the page pagination
pagination = get_pagination(manage_categories)
# total number of categories found
total = len(manage_categories)
# set up the page_set object
page_set = {
"title": "Manage Categories",
"type": "form"
}
return render_template("pages/manage_categories.html",
page_set=page_set,
nav_categories=nav_categories,
manage_categories=paginated_categories,
pagination=pagination,
total=total)
|
5002375f904240f2274aa8040b426da8515122a7
| 3,648,259
|
def callback(id):
"""
获取指定记录
"""
# 检查用户权限
_common_logic.check_user_power()
_positions_logic = positions_logic.PositionsLogic()
# 读取记录
result = _positions_logic.get_model_for_cache(id)
if result:
# 直接输出json
return web_helper.return_msg(0, '成功', result)
else:
return web_helper.return_msg(-1, "查询失败")
|
3451cc1ebb18004f46847f6538c751afd86bdf74
| 3,648,260
|
import json
def setup_exps_rllib(flow_params,
n_cpus,
n_rollouts):
"""Return the relevant components of an RLlib experiment.
Parameters
----------
flow_params : dict
flow-specific parameters (see flow/utils/registry.py)
n_cpus : int
number of CPUs to run the experiment over
n_rollouts : int
number of rollouts per training iteration
Returns
-------
str
name of the training algorithm
str
name of the gym environment to be trained
dict
training configuration parameters
"""
horizon = flow_params['env'].horizon
alg_run = "PPO"
agent_cls = get_agent_class(alg_run)
config = deepcopy(agent_cls._default_config)
config["num_workers"] = n_cpus
config["num_cpus_per_worker"] = 1
config["use_pytorch"] = False
config["num_gpus"] = 0
config["train_batch_size"] = horizon * n_rollouts
config["gamma"] = 0.999 # discount rate
# config["model"].update({"fcnet_hiddens": [32, 32, 32]})
config["use_gae"] = True
config["lambda"] = 0.97
config["kl_target"] = 0.02
config["num_sgd_iter"] = 10
config['clip_actions'] = True # FIXME(ev) temporary ray bug
config["horizon"] = horizon
config["callbacks"] = MyCallbacks
# save the flow params for reply
flow_json = json.dumps(
flow_params, cls=FlowParamsEncoder, sort_keys=True, indent=4)
config['env_config']['flow_params'] = flow_json
config['env_config']['run'] = alg_run
create_env, gym_name = make_create_env(params=flow_params)
# Register as rllib env
register_env(gym_name, create_env)
return alg_run, gym_name, config
|
af4f1bb6a11b2502efcfae77ae7dbaf4bb30c1b3
| 3,648,261
|
def sort_cluster(x: list, t: np.ndarray) -> list:
"""
sort x according to t
:param x:
:param t:
:return:
"""
return [x[i] for i in np.argsort(t)]
|
a2bcd57bb9c402aa19f12483e792f1e4379c4481
| 3,648,262
|
def gettof(*args):
"""gettof(flags_t F) -> ushort"""
return _idaapi.gettof(*args)
|
d377fc28b7515a45112083fc38c722b82caee0b9
| 3,648,263
|
def generate(temp):
"""
Wrapper that checks generated names against the base street names to avoid a direct
regurgitation of input data.
returns list
"""
is_in_dict = True
while is_in_dict:
result = textgen.generate(temperature=temp, return_as_list=True)
str = ' '.join(result)
is_in_dict = basenames.get(str, False)
return result
|
2bfc6d366d0543d6ada762539c0c6cb301d729a8
| 3,648,264
|
import re
def __create_pyramid_features(backbone_dict,
ndim=2,
feature_size=256,
include_final_layers=True,
lite=False,
upsample_type='upsamplelike',
interpolation='bilinear',
z_axis_convolutions=False):
"""Creates the FPN layers on top of the backbone features.
Args:
backbone_dict (dictionary): A dictionary of the backbone layers, with
the names as keys, e.g. ``{'C0': C0, 'C1': C1, 'C2': C2, ...}``
feature_size (int): The feature size to use for
the resulting feature levels.
include_final_layers (bool): Add two coarser pyramid levels
ndim (int): The spatial dimensions of the input data.
Must be either 2 or 3.
lite (bool): Whether to use depthwise conv instead of regular conv for
feature pyramid construction
upsample_type (str): Choice of upsampling methods
from ``['upsamplelike','upsamling2d','upsampling3d']``.
interpolation (str): Choice of interpolation mode for upsampling
layers from ``['bilinear', 'nearest']``.
Returns:
dict: The feature pyramid names and levels,
e.g. ``{'P3': P3, 'P4': P4, ...}``
Each backbone layer gets a pyramid level, and two additional levels
are added, e.g. ``[C3, C4, C5]`` --> ``[P3, P4, P5, P6, P7]``
Raises:
ValueError: ``ndim`` is not 2 or 3
ValueError: ``upsample_type`` not in
``['upsamplelike','upsampling2d', 'upsampling3d']``
"""
# Check input to ndims
acceptable_ndims = [2, 3]
if ndim not in acceptable_ndims:
raise ValueError('Only 2 and 3 dimensional networks are supported')
# Check if inputs to ndim and lite are compatible
if ndim == 3 and lite:
raise ValueError('lite models are not compatible with 3 dimensional '
'networks')
# Check input to interpolation
acceptable_interpolation = {'bilinear', 'nearest'}
if interpolation not in acceptable_interpolation:
raise ValueError('Interpolation mode "{}" not supported. '
'Choose from {}.'.format(
interpolation, list(acceptable_interpolation)))
# Check input to upsample_type
acceptable_upsample = {'upsamplelike', 'upsampling2d', 'upsampling3d'}
if upsample_type not in acceptable_upsample:
raise ValueError('Upsample method "{}" not supported. '
'Choose from {}.'.format(
upsample_type, list(acceptable_upsample)))
# Get names of the backbone levels and place in ascending order
backbone_names = get_sorted_keys(backbone_dict)
backbone_features = [backbone_dict[name] for name in backbone_names]
pyramid_names = []
pyramid_finals = []
pyramid_upsamples = []
# Reverse lists
backbone_names.reverse()
backbone_features.reverse()
for i, N in enumerate(backbone_names):
level = int(re.findall(r'\d+', N)[0])
pyramid_names.append('P{}'.format(level))
backbone_input = backbone_features[i]
# Don't add for the bottom of the pyramid
if i == 0:
if len(backbone_features) > 1:
upsamplelike_input = backbone_features[i + 1]
else:
upsamplelike_input = None
addition_input = None
# Don't upsample for the top of the pyramid
elif i == len(backbone_names) - 1:
upsamplelike_input = None
addition_input = pyramid_upsamples[-1]
# Otherwise, add and upsample
else:
upsamplelike_input = backbone_features[i + 1]
addition_input = pyramid_upsamples[-1]
pf, pu = create_pyramid_level(backbone_input,
upsamplelike_input=upsamplelike_input,
addition_input=addition_input,
upsample_type=upsample_type,
level=level,
ndim=ndim,
lite=lite,
interpolation=interpolation,
z_axis_convolutions=z_axis_convolutions)
pyramid_finals.append(pf)
pyramid_upsamples.append(pu)
# Add the final two pyramid layers
if include_final_layers:
# "Second to last pyramid layer is obtained via a
# 3x3 stride-2 conv on the coarsest backbone"
N = backbone_names[0]
F = backbone_features[0]
level = int(re.findall(r'\d+', N)[0]) + 1
P_minus_2_name = 'P{}'.format(level)
if ndim == 2:
P_minus_2 = Conv2D(feature_size, kernel_size=(3, 3),
strides=(2, 2), padding='same',
name=P_minus_2_name)(F)
else:
P_minus_2 = Conv3D(feature_size, kernel_size=(1, 3, 3),
strides=(1, 2, 2), padding='same',
name=P_minus_2_name)(F)
pyramid_names.insert(0, P_minus_2_name)
pyramid_finals.insert(0, P_minus_2)
# "Last pyramid layer is computed by applying ReLU
# followed by a 3x3 stride-2 conv on second to last layer"
level = int(re.findall(r'\d+', N)[0]) + 2
P_minus_1_name = 'P{}'.format(level)
P_minus_1 = Activation('relu', name='{}_relu'.format(N))(P_minus_2)
if ndim == 2:
P_minus_1 = Conv2D(feature_size, kernel_size=(3, 3),
strides=(2, 2), padding='same',
name=P_minus_1_name)(P_minus_1)
else:
P_minus_1 = Conv3D(feature_size, kernel_size=(1, 3, 3),
strides=(1, 2, 2), padding='same',
name=P_minus_1_name)(P_minus_1)
pyramid_names.insert(0, P_minus_1_name)
pyramid_finals.insert(0, P_minus_1)
pyramid_dict = dict(zip(pyramid_names, pyramid_finals))
return pyramid_dict
|
956a04a1ebe14e11061de009894b27e7c2640cb2
| 3,648,265
|
def graphviz(self, filename=None, directory=None, isEdge=False,showLabel=True, **kwargs):
"""Return graphviz source for visualizing the lattice graph."""
return lattice(self, filename, directory, isEdge, showLabel, **kwargs)
|
1c7426efe0f0379822c4c9c0a765a615f26f04a1
| 3,648,266
|
def get_rectangle(origin, end):
"""Return all points of rectangle contained by origin and end."""
size_x = abs(origin[0]-end[0])+1
size_y = abs(origin[1]-end[1])+1
rectangle = []
for x in range(size_x):
for y in range(size_y):
rectangle.append((origin[0]+x, origin[1]+y))
return rectangle
|
36badfd8aefaaeda806215b02ed6e92fce6509a3
| 3,648,267
|
def corr_list(df, target, thresh=0.1, sort=True, fill=True):
"""
List Most Correlated Features
Returns a pandas Series with the most correlated features to a certain
target variable. The function will return features with a correlation value
bigger than some threshold, which can be adjusted.
Parameters
----------
df : pandas DataFrame
`df` must contain only numerical values.
target : str or int
String or integer indicating the target variable.
thresh : float, optional
Float indicating the minimum correlation between a feature and the
target above wich the feature will be present in the returned list.
Default value is 0.1.
sort : bool, optional
Wheter to sort the returned pandas Series. If True, it will be sorted
descending. Default value is False.
fill : bool, optional
Wheter to fill null values. If True, Null values will be replaced
with 0's. Default value is False.
Returns
-------
pandas Series
"""
if fill:
interest = df.corr().fillna(0)[target]
else:
interest = df.corr()[target]
interest = interest[np.abs(interest) > thresh]
if len(interest) > 0:
if sort:
return interest.sort_values(ascending=False)
else:
return interest
else:
return []
|
d9562d1bbc7947338cf87ddc6703ef54a21554e0
| 3,648,268
|
def compute_epsilon(steps):
"""Computes epsilon value for given hyperparameters."""
if FLAGS.noise_multiplier == 0.0:
return float('inf')
orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))
sampling_probability = FLAGS.batch_size / NUM_TRAIN_EXAMPLES
rdp = compute_rdp(q=sampling_probability,
noise_multiplier=FLAGS.noise_multiplier,
steps=steps,
orders=orders)
# Delta is set to 1e-5 because MNIST has 60000 training points.
return get_privacy_spent(orders, rdp, target_delta=1e-5)[0]
|
9819cafafeec66cd29b13a916432c839e4365ded
| 3,648,269
|
def get_native_includes(object):
"""
After method association, check which native types an object uses
and return a corresponding string list of include file
This will also add the include needed for inheritance
"""
includes = set()
for proc in object.procs:
for argname,arg in proc.args.items():
if arg.native:
includes.add(arg.type.type)
if arg.type.matrix and not opts.no_fmat:
includes.add(matrix_classname)
if arg.type.type=='CHARACTER' and arg.intent!='in':
if opts.std_string:
# The use of angle brackets is handled specially
# in the output code
includes.add('<string>')
else:
includes.add(string_classname)
if proc.retval and proc.retval.type.dt and proc.retval.pointer:
includes.add(proc.retval.type.type)
# For inheritance:
if object.extends:
includes.add(object.extends)
return includes
|
0c09d39bd61b5a711bd718dcb38fab7e4e1e01bf
| 3,648,270
|
from sys import path
def libraries_data_path():
"""
Path to Packages/User/Deviot/pio/libraries.json
"""
user_data = user_pio_path()
return path.join(user_data, 'libraries.json')
|
1e60f00544d8008bf44b47536ed19ebdc32bed33
| 3,648,271
|
import torch
def dice_coeff(input, target):
"""Dice coeff for batches"""
if input.is_cuda:
s = torch.FloatTensor(1).to(device_f).zero_()
else:
s = torch.FloatTensor(1).zero_()
for i, c in enumerate(zip(input, target)):
s = s + DiceCoeff().forward(c[0], c[1])
return s / (i + 1)
|
da390729d2e1d8e2ae53814f8ac398a6c7e5380a
| 3,648,272
|
def group_error_rates(labels, predictions, groups):
"""Returns a list containing error rates for each protected group."""
errors = []
for jj in range(groups.shape[1]):
if groups[:, jj].sum() == 0: # Group is empty?
errors.append(0.0)
else:
signed_labels_jj = 2 * labels[groups[:, jj] == 1] - 1
predictions_jj = predictions[groups[:, jj] == 1]
errors.append(np.mean(signed_labels_jj * predictions_jj <= 0))
return errors
|
0b390dfde16910332f10afacaa2f9031c04d846a
| 3,648,273
|
def get_emails_by_user_names(user_names):
"""Get emails by user names."""
emails_service = emails_digest_service.DailyEmailsService()
emails_service.open_emails_digest()
user_emails_dict = dict.fromkeys(user_names)
for user_name in user_names:
user_emails_dict[user_name] = emails_service.get_email_by_user_name(
user_name)
return user_emails_dict
|
331d5799bac79c08240770260306ba84bf2f568b
| 3,648,274
|
def inbound_and_outbound_node_sets(C, CT):
"""
Returns the set of nodes that can reach an event and can be reached by an event,
and the difference between those sets (outbound / inbound).
"""
inbound = defaultdict(set)
for node, event in zip(*np.nonzero(C)):
inbound[event].add(node)
outbound = defaultdict(set)
for node, event in zip(*np.nonzero(CT)):
outbound[event].add(node)
difference = {}
for event, in_nodes in inbound.items():
difference[event] = outbound[event] - in_nodes
return inbound, outbound, difference
|
517746700a7a978a49a597237db362eee98d91b6
| 3,648,275
|
def policy(Q):
"""Hard max over prescriptions
Params:
-------
* Q: dictionary of dictionaries
Nested dictionary representing a table
Returns:
-------
* policy: dictonary of states to policies
"""
pol = {}
for s in Q:
pol[s] = max(Q[s].items(), key=lambda x: x[1])[0]
return pol
|
e69f66fba94b025034e03428a5e93ba1b95918e8
| 3,648,276
|
def fft(series):
"""
FFT of a series
Parameters
----------
series
Returns
-------
"""
signal = series.values
time = series.index
dt = np.mean(np.diff(time))
#n = 11*len(time)
n = 50000
frequencies = np.fft.rfftfreq(n=n, d=dt) # [Hz]
dft = np.abs(np.fft.rfft(signal, n=n))
return frequencies, dft
|
a6d1f7cfa45d504a86b434702a49eafa08737006
| 3,648,277
|
def local_variance(V, tsize=5):
""" local non-linear variance calculation
Parameters
----------
V : numpy.array, size=(m,n), dtype=float
array with one velocity component, all algorithms are indepent of their
axis.
Parameters
----------
sig_V : numpy.array, size=(m,n), dtype=float
statistical local variance, based on the procedure described in [1],[2]
References
----------
.. [1] Joughin "Ice-sheet velocity mapping: a combined interferometric and
speckle-tracking approach", Annuals of glaciology vol.34 pp.195-201.
.. [2] Joughin et al. "Greenland ice mapping project 2 (GIMP-2) algorithm
theoretical basis document", Making earth system data records for use in
research environment (MEaSUREs) documentation.
"""
V_class = local_mad_filter(V, tsize=tsize)
V[V_class] = np.nan
V_0 = local_infilling_filter(V, tsize=tsize)
# running mean adjustment
mean_kernel = np.ones((tsize, tsize), dtype=float)/(tsize**2)
V = ndimage.convolve(V, mean_kernel)
# plane fitting and variance of residual
sig_V = local_nonlin_var_filter(V, tsize=tsize)
return sig_V
|
e7e10f8c73f01b20a27ad06813defdd406bf977a
| 3,648,278
|
def get_virtual_device_configuration(device):
"""Get the virtual device configuration for a PhysicalDevice.
Returns the list of VirtualDeviceConfiguration objects previously configured
by a call to `tf.config.experimental.set_virtual_device_configuration()`.
For example:
>>> physical_devices = tf.config.experimental.list_physical_devices('CPU')
>>> assert len(physical_devices) == 1, "No CPUs found"
>>> configs = tf.config.experimental.get_virtual_device_configuration(
... physical_devices[0])
>>> try:
... assert configs is None
... tf.config.experimental.set_virtual_device_configuration(
... physical_devices[0],
... [tf.config.experimental.VirtualDeviceConfiguration(),
... tf.config.experimental.VirtualDeviceConfiguration()])
... configs = tf.config.experimental.get_virtual_device_configuration(
... physical_devices[0])
... assert len(configs) == 2
... except:
... # Cannot modify virtual devices once initialized.
... pass
Args:
device: PhysicalDevice to query
Returns:
List of `tf.config.experimental.VirtualDeviceConfiguration` objects or
`None` if no virtual device configuration has been set for this physical
device.
"""
return context.context().get_virtual_device_configuration(device)
|
49a99c17c2859bb40a7bfbbac840bf82310428e1
| 3,648,279
|
def user_directory_path(instance, filename):
"""Sets path to user uploads to: MEDIA_ROOT/user_<id>/<filename>"""
return f"user_{instance.user.id}/{filename}"
|
84be5fe74fa5059c023d746b2a0ff6e32c14c10d
| 3,648,280
|
def setup(app):
"""Setup the Sphinx extension."""
# Register builder.
app.add_builder(BeamerBuilder)
# Add setting for allowframebreaks.
app.add_config_value("beamer_allowframebreaks", True, "beamer")
# Add setting for Beamer theme.
app.add_config_value("beamer_theme", "Warsaw", "beamer")
# Adjust titles upon doctree-resolved.
app.connect("doctree-resolved", adjust_titles)
return {
"version": "1.0",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
cc5f48eeff65876a2d052dad285a77bc76e115c0
| 3,648,281
|
def extract_text(arg: Message_T) -> str:
"""
提取消息中的纯文本部分(使用空格合并纯文本消息段)。
参数:
arg (nonebot.typing.Message_T):
"""
arg_as_msg = Message(arg)
return arg_as_msg.extract_plain_text()
|
06d19c9ca4e907edf433f910600161d142ca914e
| 3,648,282
|
def dtw(x, y, dist, warp=1):
"""
Computes Dynamic Time Warping (DTW) of two sequences.
:param array x: N1*M array
:param array y: N2*M array
:param func dist: distance used as cost measure
:param int warp: how many shifts are computed.
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
"""
assert len(x)
assert len(y)
r, c = len(x), len(y)
D0 = zeros((r + 1, c + 1))
D0[0, 1:] = inf
D0[1:, 0] = inf
D1 = D0[1:, 1:] # view
for i in range(r):
for j in range(c):
D1[i, j] = dist(x[i], y[j])
C = D1.copy()
for i in range(r):
for j in range(c):
min_list = [D0[i, j]]
for k in range(1, warp + 1):
i_k = min(i + k, r - 1)
j_k = min(j + k, c - 1)
min_list += [D0[i_k, j], D0[i, j_k]]
D1[i, j] += min(min_list)
if len(x) == 1:
path = zeros(len(y)), range(len(y))
elif len(y) == 1:
path = range(len(x)), zeros(len(x))
else:
path = _traceback(D0)
return D1[-1, -1], C, D1, path
|
a30a492d816e5234590d9fadfbba722db0ae9f72
| 3,648,283
|
def format_line_count_break(padding: int) -> str:
"""Return the line count break."""
return format_text(
" " * max(0, padding - len("...")) + "...\n", STYLE["detector_line_start"]
)
|
2fe4d4b8195468787f31b3407d32a4e039f7bb6c
| 3,648,284
|
from typing import Tuple
from typing import get_args
def identify_generic_specialization_types(
cls: type, generic_class: type
) -> Tuple[type, ...]:
"""
Identify the types of the specialization of generic class the class cls derives from.
:param cls: class which derives from a specialization of generic class.
:param generic_class: a generic class.
:return: specialization types.
"""
return get_args(find_generic_specialization_parent_class(cls, generic_class))
|
3932062a5a4543b280ebc8601126e10d11136717
| 3,648,285
|
def Metadata():
"""Get a singleton that fetches GCE metadata.
Returns:
_GCEMetadata, An object used to collect information from the GCE metadata
server.
"""
def _CreateMetadata(unused_none):
global _metadata
if not _metadata:
_metadata = _GCEMetadata()
_metadata_lock.lock(function=_CreateMetadata, argument=None)
_metadata_lock.unlock()
return _metadata
|
096ac4f0278e0048944d5a10c4153be7c60aae88
| 3,648,286
|
def pa11y_counts(results):
"""
Given a list of pa11y results, return three integers:
number of errors, number of warnings, and number of notices.
"""
num_error = 0
num_warning = 0
num_notice = 0
for result in results:
if result['type'] == 'error':
num_error += 1
elif result['type'] == 'warning':
num_warning += 1
elif result['type'] == 'notice':
num_notice += 1
return num_error, num_warning, num_notice
|
346c1efe0cae5934e623a8643b0f23f85300181d
| 3,648,287
|
def parse_properties(df, columns_to_integer=None, columns_to_datetime=None, columns_to_numeric=None, columns_to_boolean=None, columns_to_string = None, dt_unit = 'ms', boolean_dict = {'true': True, 'false': False, '': None}):
"""
Parse string columns to other formats. This function is used in hubspot routine, its not yet scaled to other routines
df: pd.DataFrame
columns_to_: list with names of the columns to parse
return: pd.DataFrame with parsed columns
"""
if columns_to_integer:
df[columns_to_integer] = df[columns_to_integer].apply(string_to_integer)
if columns_to_datetime:
df[columns_to_datetime] = df[columns_to_datetime].apply(pd.to_datetime, unit = dt_unit)
if columns_to_numeric:
df[columns_to_numeric] = df[columns_to_numeric].apply(pd.to_numeric, errors = 'coerce', downcast='float')
if columns_to_boolean:
df[columns_to_boolean] = df[columns_to_boolean].replace(boolean_dict).astype('boolean')
if columns_to_string:
df[columns_to_string] = df[columns_to_string].apply(int_to_string)
return df
|
a162397308d98faac6ab24f07aceee439aa32095
| 3,648,288
|
import requests
def http_request(method, url, headers, data=None):
"""
Request util
:param method: GET or POST or PUT
:param url: url
:param headers: headers
:param data: optional data (needed for POST)
:return: response text
"""
response = requests.request(method, url, headers=headers, data=data)
if response.status_code not in [200, 201, 204]:
http_error_msg = u'%s HTTP request failed: %s for url: %s' % (response.status_code, response.text, url)
#print ("utils.http_request ", http_error_msg)
raise requests.exceptions.HTTPError(response.text)
return response.text
|
6d0453be79b3ae0f7ed60b5a8759b9295365dd6c
| 3,648,289
|
import time
def beNice(obj):
"""Be nice : exponential backoff when over quota"""
wait = 1
while wait :
try :
return_value = obj.execute()
wait = 0
except : #FIXME : we should test the type of the exception
print("EXCEPT : Wait for %d seconds" % wait)
time.sleep(wait)
wait *= 2
return(return_value)
|
a68d4369c02ec37c48518a109f8c27fa1e014aa3
| 3,648,290
|
def parse_title(line):
"""if this is title, return Tuple[level, content],
@type line: str
@return: Optional[Tuple[level, content]]
"""
line = line.strip()
if not line.startswith('#'):
return None
sharp_count = 0
for c in line:
if c == '#':
sharp_count += 1
else:
break
if sharp_count == len(line):
return None
title = line[sharp_count:].strip()
return sharp_count, title
|
7c170f417755c878d225b780b8475a379501c19f
| 3,648,291
|
import typing
def issubtype(cls: type, clsinfo: type) -> bool:
"""
Return whether ``cls`` is a subclass of ``clsinfo`` while also considering
generics.
:param cls: the subject.
:param clsinfo: the object.
:return: True if ``cls`` is a subclass of ``clsinfo`` considering generics.
"""
info_generic_type, info_args = _split_generic(clsinfo)
if clsinfo in (typing.Any, object):
result = True
elif info_args:
result = _issubtype_generic(cls, info_generic_type, info_args)
else:
result = issubclass(_without_generic(cls), _without_generic(clsinfo))
return result
|
942d5760c3de4d63bcd9c3f5934fcc89727dc958
| 3,648,292
|
def delete_status(id):
"""Delete an existing status
The status to be deleted should be posted as JSON using
'application/json as the content type. The posted JSON needs to
have 2 required fields:
* user (the username)
* api_key
An example of the JSON::
{
"user": "r1cky",
"api_key": "qwertyuiopasdfghjklzxcvbnm1234567890"
}
"""
db = get_session(current_app)
# The data we need
user = request.json.get('user')
if not (id and user):
return jsonify(dict(error='Missing required fields.')), 400
status = db.query(Status).filter_by(id=id)
if not status.count():
return jsonify(dict(error='Status does not exist.')), 400
if not status[0].user.username == user:
return jsonify(dict(error='You cannot delete this status.')), 403
status.delete()
db.commit()
return jsonify(dict(id=id))
|
d6a9ebbc787283f3ac247935f3fe5ad9080d2bd0
| 3,648,293
|
def process_data(data):
""" Change labels, group by planner and format for latex."""
data = data.replace(
{
"grid_run_1": "Grid",
"prm_run_1": "PRM A",
"prm_run_2": "PRM B",
"prm_run_3": "PRM C",
}
)
data = data.rename(
columns={"num_samples": "samples", "cc_checks": "collision checks"}
)
df = data.groupby(["run"]).sum()[["samples", "jvm", "time", "collision checks"]]
df["samples"] = np.round(df["samples"])
df["time"] = np.round(df["time"])
df["samples"] = np.round(df["collision checks"])
sr = data.groupby(["run"]).sum()[["success"]]
df["solved"] = sr.astype(int).astype(str) + "/14"
latex = df.to_latex(
formatters={
"samples": "{:,.0f}".format,
"jvm": "{:.2f}".format,
"collision checks": "{:,.0f}".format,
"time": "{:.0f}".format,
}
)
return df, latex
|
24ac1c2ee872c5051eccc9774943f922671267b1
| 3,648,294
|
def detect_outlier(TS, samples_wind=60, order=3):
"""Find outliers in TS by interpolate one sample at a time, measure diff.
between rec. sample and interpolated, and getting the peaks in the int diff
across recording.
Parameters
-------------
TS : array (x, y) x n_samples
Times series to extract features
samples_wind : int
Window length of segment where a sample is interpolated.
order : int
B-sline interpolation order
Returns
--------
outliers: list of array n_chans [n_outliers]
Indices of outliers per chans
outliers_int: list of array n_chans [n_outliers]
New interpolated values of the outliers
"""
s_win_half = int(samples_wind/2)
outliers = []
outliers_int = []
zdiffs = []
for ts in TS:
n_samples, = ts.shape
diff = [np.nan]
ts_int_one = [np.nan]
for w in range(1,n_samples-1):
wix = [w-s_win_half,w+s_win_half]
# Bound beg or end if outside
wix[0] = 0 if wix[0]<0 else wix[0]
wix[1] = n_samples if wix[1]>n_samples else wix[1]
seg1, seg2 = ts[wix[0]:w], ts[w+1:wix[1]]
seg = np.concatenate((seg1,seg2))
# make indexes ts with and without sample
ixs = np.arange(seg.shape[0]+1)
ixs_out =np. delete(ixs, np.argwhere(ixs == seg1.shape[0]))
# Interpolate and measure diff
fcubic = interpolate.interp1d(ixs_out, seg, kind=order)
ts_int_out = fcubic(ixs)
smpl_int = ts_int_out[seg1.shape[0]]
diff.append(np.abs(smpl_int-ts[w]))
ts_int_one.append(smpl_int)
diff_z = zscore(diff)
pks_p, _ = feat_ext.find_maxmin_peaks(diff_z[1:], height=5)
pks_p = pks_p + 1 # add 1 sampl ( first is nan)
int_smp = np.array(ts_int_one)[pks_p]
outliers.append(pks_p)
outliers_int.append(int_smp)
zdiffs.append(diff_z)
return outliers, outliers_int, np.array(zdiffs)
|
91515770554155ddb0da507e94e4cffc611202d9
| 3,648,295
|
def postprocess(backpointers, best_tag_id):
"""Do postprocess."""
best_tag_id = best_tag_id.asnumpy()
batch_size = len(best_tag_id)
best_path = []
for i in range(batch_size):
best_path.append([])
best_local_id = best_tag_id[i]
best_path[-1].append(best_local_id)
for bptrs_t in reversed(backpointers):
bptrs_t = bptrs_t[0].asnumpy()
local_idx = bptrs_t[i]
best_local_id = local_idx[best_local_id]
best_path[-1].append(best_local_id)
# Pop off the start tag (we dont want to return that to the caller)
best_path[-1].pop()
best_path[-1].reverse()
return best_path
|
5be856610a3c81453c11c584507dcb4ad0e4cf61
| 3,648,296
|
def carnatic_string_to_ql_array(string_):
"""
:param str string_: A string of carnatic durations separated by spaces.
:return: The input string converted to a quarter length array.
:rtype: numpy.array.
>>> carnatic_string_to_ql_array('oc o | | Sc S o o o')
array([0.375, 0.25 , 0.5 , 0.5 , 1.5 , 1. , 0.25 , 0.25 , 0.25 ])
"""
split_string = string_.split()
vals = []
for token in split_string:
try:
if carnatic_symbols[token] is not None:
vals.append(carnatic_symbols[token]["value"])
except KeyError:
pass
return np.array(vals)
|
19386ac13233c3f5cc70eea7f75d287784f6a969
| 3,648,297
|
import tqdm
import json
import os
def get_examples(fpath, doc_dir, max_seq_len=-1, max_sent_num=200, sent_level=True):
"""
Get data from tsv files.
Input:
fpath -- the file path.
Assume number of classes = 2
Output:
ts -- a list of strings (each contain the text)
ys -- float32 np array (num_example, )
zs -- float32 np array (num_example, )
ss -- float32 np array (num_example, num_sent, sequence_length)
szs -- float32 np array (num_example, num_sent)
"""
n = -1
ts = []
ys = []
zs = []
ss = []
s_labels = []
min_len = 10000
max_len = 0
avg_z_len = 0.
avg_num_sent = 0.
real_max_sent_num = 0
avg_r_num = 0.
with open(fpath, "r") as f:
for line in tqdm(f):
json_data = json.loads(line.strip())
doc_filename = json_data['annotation_id']
file_doc = open(os.path.join(doc_dir, doc_filename))
sentences = file_doc.readlines()
s_masks = []
sentences = [s.strip().split() for s in sentences]
t = [inner for outer in sentences for inner in outer]
cur_id = 0
for sentence in sentences:
if len(s_masks) < max_sent_num:
s_masks.append([0.0] * len(t))
for token in sentence:
s_masks[-1][cur_id] = 1.0
cur_id += 1
avg_num_sent += len(s_masks)
if len(s_masks) > real_max_sent_num:
real_max_sent_num = len(s_masks)
if max_seq_len > 0:
t = t[:max_seq_len]
# print(t)
if len(t) > max_len:
max_len = len(t)
if len(t) < min_len:
min_len = len(t)
y = json_data['classification']
if y == 'POS':
y = 1
elif y == 'NEG':
y = 0
else:
print('ERROR: label {}'.format(y))
evidences = json_data['evidences']
z = [0] * len(t)
z_len = 0
for evidence_list in evidences:
for evidence in evidence_list:
z_start = evidence['start_token']
z_end = evidence['end_token']
z_end = min(z_end, len(t))
z_text = evidence['text']
for idx in range(z_start, z_end):
z[idx] = 1
z_len += 1
if max_seq_len < 0:
assert z_text == ' '.join(t[z_start:z_end]), z_text + '<->' + ' '.join(t[z_start:z_end])
else:
if z_end < max_seq_len:
assert z_text == ' '.join(t[z_start:z_end]), z_text + '<->' + ' '.join(t[z_start:z_end])
# print(z_text)
# print(t[z_start:z_end])
avg_z_len += z_len
if sent_level:
s_label = [0.] * len(s_masks)
new_z = [0] * len(t)
for sid, s_mask in enumerate(s_masks):
is_rationale = False
for idx, val in enumerate(s_mask):
if idx >= max_seq_len:
continue
if val == 1.0:
if z[idx] == 1:
is_rationale = True
break
if is_rationale:
avg_r_num += 1
s_label[sid] = 1.
for idx, val in enumerate(s_mask):
if idx >= max_seq_len:
continue
if val == 1.0:
new_z[idx] = 1
# z = new_z
# break
# s_spans = json_data['sentences']
# # if len(s_spans) > max_sent_num:
# # max_sent_num = len(s_spans)
# # # print(line)
# s_masks = []
# for sid, s_span in enumerate(s_spans):
# (b, e) = s_span
# if b >= max_seq_len:
# break
# # print(len(s_masks))
# # print(max_sent_num)
# if len(s_masks) < max_sent_num:
# s_masks.append([0.0] * len(t))
# for i in range(b, e):
# # print(len(s_masks[-1]), i)
# if i >= max_seq_len:
# break
# s_masks[-1][i] = 1.0
# if len(s_masks) > real_max_sent_num:
# real_max_sent_num = len(s_masks)
ts.append(t)
ys.append(y)
zs.append(z)
ss.append(s_masks)
if sent_level:
s_labels.append(s_label)
# print('len s_mask:', len(s_masks))
# print('len s_label:', len(s_label))
assert len(s_masks) == len(s_label)
n += 1
# print(avg_z_len)
print("Number of examples: %d" % n)
print("Maximum doc length: %d" % max_len)
print("Minimum doc length: %d" % min_len)
print("Average length of rationales: %.4f" % (avg_z_len / n) )
print("Average sent number: %d" % (avg_num_sent/n))
print("Maximum sent number: %d" % real_max_sent_num)
print("Average rationle-sent number: %d" % (avg_r_num / n))
if sent_level:
return ts, ys, zs, ss, s_labels
return ts, ys, zs, ss
|
54c1743528d27e9c2a2dbf176172a29c2ab48d50
| 3,648,298
|
def login_redirect(request: HttpRequest) -> HttpResponse:
"""
Redirects the user to the Strava authorization page
:param request: HttpRequest
:return: HttpResponse
"""
strava_uri = get_strava_uri()
return redirect(strava_uri)
|
80eb714ab8f1fde25f2a3ce57bdc540a5a7a980d
| 3,648,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.