content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def multiply_str(char, times):
"""
Return multiplied character in string
"""
return char * times
|
cc69f0e16cba1b8c256301567905e861c05291ea
| 3,645,500
|
def calories_per_item(hundr, weight, number_cookies, output_type):
"""
>>> calories_per_item(430, 0.3, 20, 0)
'One item has 64.5 kcal.'
>>> calories_per_item(430, 0.3, 20, 1)
'One item has 64.5 Calories.'
>>> calories_per_item(1, 1000, 10, 1)
'One item has 1000.0 Calories.'
>>> calories_per_item(1, 1000, 10, 0)
'One item has 1000.0 kcal.'
>>> calories_per_item(0, 1000, 10, 0)
'One item has 0.0 kcal.'
"""
kcal_per_item = hundr * 10 # convert kcal per 100g to kcal per kg
unit = 'kcal'
if output_type == 1: # change output unit based on input
unit = 'Calories'
return 'One item has ' + str((kcal_per_item * weight) / number_cookies) + ' ' + unit + '.'
|
9ca16eee8aa8a81424aeaa30f696fb5bec5e3956
| 3,645,501
|
def bitcoind_call(*args):
"""
Run `bitcoind`, return OS return code
"""
_, retcode, _ = run_subprocess("/usr/local/bin/bitcoind", *args)
return retcode
|
efa585a741da1ba3bf94650de1d7296228c15e7e
| 3,645,502
|
def getItemProduct(db, itemID):
"""
Get an item's linked product id
:param db: database pointer
:param itemID: int
:return: int
"""
# Get the one we want
item = db.session.query(Item).filter(Item.id == itemID).first()
# if the query didn't return anything, raise noresult exception
if (not item):
raise NoResult
# otherwise, return the product_id
else:
# Filter the thing off
return item.product_id
|
fbbd2b2108bba78af1abc4714653065e12906ee3
| 3,645,503
|
from typing import Optional
def find_board(board_id: BoardID) -> Optional[Board]:
"""Return the board with that id, or `None` if not found."""
board = db.session.get(DbBoard, board_id)
if board is None:
return None
return _db_entity_to_board(board)
|
16f687304d1008b3704d641a7e9e5e624475e045
| 3,645,504
|
from typing import Any
def test_isin_pattern_0():
"""
Test IsIn pattern which expresses the IsIn/OneOf semantics.
"""
inputs = Tensor(np.ones([42]), mindspore.float16)
softmax_model = nn.Softmax()
@register_pass(run_only_once=True)
def softmax_relu_pass():
x = Any()
softmax_pattern = Prim(P.Softmax())
call_softmax = Call(softmax_pattern, [x])
relu_pattern = Prim(P.ReLU())
call_relu = Call(relu_pattern, [x])
pattern = OneOf([call_softmax, call_relu])
relu6_pattern = Prim(P.ReLU6())
target = Call(relu6_pattern, [x])
return pattern, target
transformed_repr = get_func_graph(softmax_model, inputs).get_return().expanded_str(2)
unregister_pass(softmax_relu_pass)
assert "ReLU6" in transformed_repr
assert "Softmax" not in transformed_repr
|
78e169fcab894c3cf7956884bd3553983fda5bae
| 3,645,505
|
from datetime import datetime
def ENsimtime():
"""retrieves the current simulation time t as datetime.timedelta instance"""
return datetime.timedelta(seconds= _current_simulation_time.value )
|
4dd971b3af9d0a2544e809ea7726521d9ce8e5b1
| 3,645,506
|
def solar_true_longitude(solar_geometric_mean_longitude, solar_equation_of_center):
"""Returns the Solar True Longitude with Solar Geometric Mean Longitude,
solar_geometric_mean_longitude, and Solar Equation of Center,
solar_equation_of_center."""
solar_true_longitude = solar_geometric_mean_longitude + solar_equation_of_center
return solar_true_longitude
|
a335bb82002846eb2bc2106675c13e9f3ee28900
| 3,645,507
|
import base64
def image_to_fingerprint(image, size=FINGERPRINT_SIZE):
"""Create b64encoded image signature for image hash comparisons"""
data = image.copy().convert('L').resize((size, size)).getdata()
return base64.b64encode(bytes(data)).decode()
|
83ff567bce0530b69a9b43c40ea405af825831ff
| 3,645,508
|
from datetime import datetime
import pandas
def get_indices(
time: str | datetime | date, smoothdays: int = None, forcedownload: bool = False
) -> pandas.DataFrame:
"""
alternative going back to 1931:
ftp://ftp.ngdc.noaa.gov/STP/GEOMAGNETIC_DATA/INDICES/KP_AP/
20 year Forecast data from:
https://sail.msfc.nasa.gov/solar_report_archives/May2016Rpt.pdf
"""
dtime = todatetime(time)
fn = downloadfile(dtime, forcedownload)
# %% load data
dat: pandas.DataFrame = load(fn)
# %% optional smoothing over days
if isinstance(smoothdays, int):
periods = np.rint(timedelta(days=smoothdays) / (dat.index[1] - dat.index[0])).astype(int)
if "f107" in dat:
dat["f107s"] = dat["f107"].rolling(periods, min_periods=1).mean()
if "Ap" in dat:
dat["Aps"] = dat["Ap"].rolling(periods, min_periods=1).mean()
# %% pull out the times we want
i = [dat.index.get_loc(t, method="nearest") for t in dtime]
Indices = dat.iloc[i, :]
return Indices
|
e8880caac96e9b3333c2f1f557b5918ee40cdbbe
| 3,645,509
|
import argparse
def get_args():
"""Get CLI arguments and options
:return: AccuRev branch, git repository location, append option boolean
"""
parser = argparse.ArgumentParser(description='Migrate AccuRev branch history to git')
parser.add_argument('accurevBranch', help='The AccuRev branch which will be migrated', type=is_stream)
parser.add_argument('repoLocation', help='The location of the git repository in which the clone will happen',
action=FullPaths, type=is_valid_dest)
parser.add_argument('-a', '--append', help='Append new AccuRev branch history to an existing git repository',
action='store_true')
args = parser.parse_args()
source = args.accurevBranch
dest = args.repoLocation
append = args.append
return source, dest, append
|
cb90769315ddce080c3eb9e234f8f7a569a546e2
| 3,645,510
|
import json
def check(device, value):
"""Test for valid setpoint without actually moving."""
value = json.loads(value)
return zmq_single_request("check_value", {"device": device, "value": value})
|
f08e80348f97531ed51207aff685a470ca62bc41
| 3,645,511
|
import sys
def prove(formula, verbose):
"""
:param formula: String representation of a modal formula.
The syntax for such a formula is per the grammar as stipulated in the README.
Example input: "(a|b) & (~c => d)"
:return string showing the outcome of the proof, that is valid or not valid.
"""
try:
sys.setrecursionlimit(15000)
negated_fml = "~(" + str(formula) + ")"
negated_clausal_fml = call_function(verbose, transform, negated_fml, False)
if call_function(verbose, k_prove, negated_clausal_fml) == sat:
return "Psi is NOT valid"
else:
return "Psi is valid"
finally:
sys.setrecursionlimit(1000)
|
280570027016d5d285caae6610b0be8d39e6252c
| 3,645,512
|
def get_projectID(base_url, start, teamID, userID):
"""
Get all the project from jama
Args:
base_url (string): jama instance base url
start (int): start at a specific location
teamID (string): user team ID, for OAuth
userID (string): user ID, for OAuth
Returns:
(dict): Returns JSON object of the Jama API /projects
"""
url = base_url + "/rest/latest/projects?startAt=" +\
str(start) + "&maxResults=50"
return api_caller.get(teamID, userID, url)
|
92deaf007530b67be6459c7fd0a0e196dbe18216
| 3,645,513
|
def to_world(points_3d, key2d, root_pos):
""" Trasform coordenates from camera to world coordenates """
_, _, rcams = data_handler.get_data_params()
n_cams = 4
n_joints_h36m = 32
# Add global position back
points_3d = points_3d + np.tile(root_pos, [1, n_joints_h36m])
# Load the appropriate camera
key3d = data_handler.get_key3d(key2d[:3])
subj, _, sname = key3d
subj = int(subj)
cname = sname.split('.')[1] # <-- camera name
scams = {(subj, c+1): rcams[(subj, c+1)] for c in range(n_cams)} # cams of this subject
scam_idx = [scams[(subj, c+1)][-1] for c in range(n_cams)].index(cname) # index of camera used
the_cam = scams[(subj, scam_idx+1)] # <-- the camera used
R, T, f, c, k, p, name = the_cam
assert name == cname
def cam2world_centered(data_3d_camframe):
data_3d_worldframe = cameras.camera_to_world_frame(data_3d_camframe.reshape((-1, 3)), R, T)
data_3d_worldframe = data_3d_worldframe.reshape((-1, n_joints_h36m*3))
# subtract root translation
return data_3d_worldframe - np.tile(data_3d_worldframe[:, :3], (1, n_joints_h36m))
# Apply inverse rotation and translation
return cam2world_centered(points_3d)
|
9b56b946569dac35231282009389a777e908d09f
| 3,645,514
|
def orbital_energies_from_filename(filepath):
"""Returns the orbital energies from the given filename through
functional composition
:param filepath: path to the file
"""
return orbital_energies(spe_list(
lines=list(content_lines(filepath, CMNT_STR))))
|
669bfbe18bb8686e2f9fdc89dcdb3a36aeec6940
| 3,645,515
|
def _dict_merge(a, b):
""" `_dict_merge` deep merges b into a and returns the new dict.
"""
if not isinstance(b, dict):
return b
result = deepcopy(a)
for k, v in b.items():
if k in result and isinstance(result[k], dict):
result[k] = _dict_merge(result[k], v)
else:
result[k] = deepcopy(v)
return result
|
278bfa6f8895fda0ae86b0ff2014602a7e9225df
| 3,645,516
|
import stat
import functools
import operator
def flags(flags: int, modstring: str) -> int:
""" Modifies the stat flags according to *modstring*, mirroring the syntax for POSIX `chmod`. """
mapping = {
'r': (stat.S_IRUSR, stat.S_IRGRP, stat.S_IROTH),
'w': (stat.S_IWUSR, stat.S_IWGRP, stat.S_IWOTH),
'x': (stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH)
}
target, direction = 'a', None
for c in modstring:
if c in '+-':
direction = c
continue
if c in 'ugoa':
target = c
direction = None # Need a - or + after group specifier.
continue
if c in 'rwx' and direction and direction in '+-':
if target == 'a':
mask = functools.reduce(operator.or_, mapping[c])
else:
mask = mapping[c]['ugo'.index(target)]
if direction == '-':
flags &= ~mask
else:
flags |= mask
continue
raise ValueError('invalid chmod: {!r}'.format(modstring))
return flags
|
9acfeb4d9b90a12d2308c0ec992cfbb47f11000c
| 3,645,517
|
def _can_contain(ob1, ob2, other_objects, all_obj_locations, end_frame,
min_dist):
""" Return true if ob1 can contain ob2. """
assert len(other_objects) == len(all_obj_locations)
# Only cones do the contains, and can contain spl or smaller sphere/cones,
# cylinders/cubes are too large
if (len(ob1) == 1 and ob1[0][0]['sized'] > ob2[0][0]['sized'] and
ob1[0][0]['shape'] == 'cone' and
ob2[0][0]['shape'] in ['cone', 'sphere', 'spl']):
# Also make sure the moved object will not collide with anything
# there
collisions = [
_obj_overlap(
# ob2 location since the ob1 will be moved to ob2's location
# but will have the size of ob1,
(ob2[0][1].location[0], ob2[0][1].location[1],
ob1[0][1].location[2]),
ob1[0][0]['sized'],
# top objects location at the end point, and its size
other_locations[0][end_frame], other_obj[0][0]['sized'],
min_dist)
for other_obj, other_locations in
zip(other_objects, all_obj_locations)]
if not any(collisions):
return True
return False
|
391119dae5e86efe0c99bae7c603a1f785c69c04
| 3,645,518
|
def twolmodel(attr, pulse='on'):
"""
This is the 2-layer ocean model
requires a forcing in W/m2
pulse = on - radiative pulse W/m2
pulse = off - time varyin radaitive forcing W/m2/yr
pulse = time - use output from simple carbon model
"""
#### Parameters ####
yeartosec = 30.25*24*60*60*12
rho = 1025 # density of sea water kg/m3
cw = 3985 # specific heat of sea water J/KgK
###################
# define time steps of the model
timesteps = np.arange(0,attr['endtime']+attr['dt'],attr['dt'])
df = pd.DataFrame(index=timesteps,columns=['T_sfc','T_deep'],data=np.zeros((len(timesteps), 2)))
for t in range(len(timesteps)-1):
if pulse is 'on':
if t == 0:
df.iloc[t+1,df.columns.get_indexer(['T_sfc'])] = df.iloc[t]['T_sfc'] + (attr['dt']*yeartosec/(rho*cw*attr['hsfc'])) * (attr['lb']*df.iloc[t]['T_sfc'] + attr['R'] + attr['beta']*attr['e']*(df.iloc[t]['T_deep'] - df.iloc[t]['T_sfc']))
df.iloc[t+1,df.columns.get_indexer(['T_deep'])] = df.iloc[t]['T_deep'] + (attr['dt']*yeartosec/(rho*cw*attr['hdeep'])) * (attr['beta'] * (df.iloc[t]['T_sfc'] - df.iloc[t]['T_deep']))
else:
df.iloc[t+1,df.columns.get_indexer(['T_sfc'])] = df.iloc[t]['T_sfc'] + (attr['dt']*yeartosec/(rho*cw*attr['hsfc'])) * (attr['lb']*df.iloc[t]['T_sfc'] + 0 + attr['beta']*attr['e']*(df.iloc[t]['T_deep'] - df.iloc[t]['T_sfc']))
df.iloc[t+1,df.columns.get_indexer(['T_deep'])] = df.iloc[t]['T_deep'] + (attr['dt']*yeartosec/(rho*cw*attr['hdeep'])) * (attr['beta'] * (df.iloc[t]['T_sfc'] - df.iloc[t]['T_deep']))
elif pulse is 'off':
df.iloc[t+1,df.columns.get_indexer(['T_sfc'])] = df.iloc[t]['T_sfc'] + (attr['dt']*yeartosec/(rho*cw*attr['hsfc'])) * (attr['lb']*df.iloc[t]['T_sfc'] + attr['R']*timesteps[t] + attr['beta']*attr['e']*(df.iloc[t]['T_deep'] - df.iloc[t]['T_sfc']))
df.iloc[t+1,df.columns.get_indexer(['T_deep'])] = df.iloc[t]['T_deep'] + (attr['dt']*yeartosec/(rho*cw*attr['hdeep'])) * (attr['beta'] * (df.iloc[t]['T_sfc'] - df.iloc[t]['T_deep']))
elif pulse is 'time':
df.iloc[t+1,df.columns.get_indexer(['T_sfc'])] = df.iloc[t]['T_sfc'] + (attr['dt']*yeartosec/(rho*cw*attr['hsfc'])) * (attr['lb']*df.iloc[t]['T_sfc'] + attr['R'][t] + attr['beta']*attr['e']*(df.iloc[t]['T_deep'] - df.iloc[t]['T_sfc']))
df.iloc[t+1,df.columns.get_indexer(['T_deep'])] = df.iloc[t]['T_deep'] + (attr['dt']*yeartosec/(rho*cw*attr['hdeep'])) * (attr['beta'] * (df.iloc[t]['T_sfc'] - df.iloc[t]['T_deep']))
return df
|
4f6649a8df1febe54a6c04fdee938591a0c997b2
| 3,645,519
|
def maximumToys(prices, k):
"""Problem solution."""
prices.sort()
c = 0
for toy in prices:
if toy > k:
return c
else:
k -= toy
c += 1
return c
|
0ce709ff7b106b5379217cb6b7f1f481d27c94e7
| 3,645,520
|
import os
import pprint
import errno
def batch_process(process, **kwargs):
"""Runs a process on a set of files and batches them into subdirectories.
Arguments:
process ((IN, OUT, Verbosity) -> str): The function to execute on each
file.
Keyword Arguments:
file (Optional[str]): The input files and directories.
outpu_dir (Optional[str]): The output directory.
batch_size (Optional[int]): The size of each subdirectory or the number
of subdirectories, depending on the batch_mode.
batch_mode (Optional[str]): The batch mode. Can be one of 'count' or
'divide'. In count mode, each batch will contain at most the number
of files specified by the batch_size (default 10). In divide mode,
there will be that number of batch directories, and files will be
divided evenly between them.
batch_dir_format (Optional[str]): The format string for batch
subdirectory names. Defaults to 'batch{:03}'
verbosity (int): The verbosity of the output.
Returns:
(None)
"""
# Get values from kwargs:
search_locations = kwargs.get('file', ['.'])
search_locations.extend(kwargs.get('extra_files', []))
extensions = kwargs.get('extensions', [])
recursive = kwargs.get('recursive', False)
output_dir = os.path.abspath(kwargs.get('output_dir', '.'))
no_overwrite = kwargs.get('no_overwrite', [])
verbosity = kwargs.get('verbose', 1)
if kwargs.get('quiet', False):
verbosity = 0
batch_mode = kwargs.get('batch_mode', 'none')
batch_size = kwargs.get('batch_size', 10)
batch_dir_format = kwargs.get('batch_dir_prefix', 'batch{:03}')
# Get files to process
files, file_count = get_files(search_locations,
extensions,
recursive)
if verbosity >= 3:
pprint(kwargs)
# Prepare batching info
if batch_mode == 'none':
batch_count = 0
elif batch_mode == 'divide':
batch_count = batch_size
elif batch_mode == 'count':
batch_count = int(ceil(file_count / batch_size))
batches = []
for batch_num in range(0, batch_count):
batch_name = batch_dir_format.format(batch_num)
batch_path = os.path.join(output_dir, batch_name)
batches.append((batch_path, batch_name))
# Create batch directory.
try:
if verbosity >= 3:
print('Creating directory: {}', os.path.relpath(batch_path))
os.makedirs(batch_path)
except OSError as e:
if e.errno == errno.EEXIST:
# We don't care if directory already exists.
pass
# Assign files to batches using (input_file, output_location)
out = output_dir
assigned_files = []
for i, item in enumerate(files):
if batch_count > 0:
out, short = batches[i % len(batches)]
assigned_files.append((item, out))
# Check for already existing outputs.
existing = get_files(output_dir,
no_overwrite,
recursive=True)[0]
existing = {split_ext(x)[0] : x for x in existing}
if verbosity >= 3:
print('Process preventing extensions:', no_overwrite)
if no_overwrite:
if verbosity >= 1:
print('\n--- Checking for existing files of types: {} ---'
''.format(no_overwrite))
# Function for checking if file exists in output_dir
def check(file_name):
base, ext = split_ext(file_name)
over_written = existing.get(base, False)
if over_written:
existing_ext = split_ext(existing[base])[1]
if existing_ext.endswith(tuple(no_overwrite)):
print('Skip {}{} -> "{}"'
''.format(base, ext, os.path.relpath(existing[base])))
return False
return True
# Filter for files that don't exist in output_dir
assigned_files = [x for x in assigned_files if check(x[0])]
if verbosity >= 1 and len(assigned_files) == 0:
print('--- No files to process ---\n')
return
if verbosity >= 1:
print('\n--- Begin Processing {} files ---'
''.format(len(assigned_files)))
# Process each file:
for item, out in assigned_files:
process(item, out, verbosity=verbosity)
if verbosity >= 1:
print('--- End Processing ---\n')
|
24ba57f463f48f2d2ada56d4c6fff848a1c45f10
| 3,645,521
|
def get_X_HBR_d_t_i(X_star_HBR_d_t):
"""(47)
Args:
X_star_HBR_d_t: 日付dの時刻tにおける負荷バランス時の居室の絶対湿度(kg/kg(DA))
Returns:
日付dの時刻tにおける暖冷房区画iの実際の居室の絶対湿度(kg/kg(DA))
"""
X_star_HBR_d_t_i = np.tile(X_star_HBR_d_t, (5, 1))
return X_star_HBR_d_t_i
|
125d70ff96ce1a035df98d6995aa55ea3728ffa9
| 3,645,522
|
from typing import Optional
from typing import Dict
from typing import Callable
from typing import Any
def add_route(url: str,
response: Optional[str] = None,
method: str = 'GET',
response_type: str = 'JSON',
status_code: int = 200,
headers: Optional[Dict[str, str]] = None,
callback: Optional[Callable[[Any], None]] = None,
) -> None:
"""
Add route to app.
:param url: the URL rule as string
:param response: return value
:param method: HTTP method
:param response_type: type of response (JSON, HTML, RSS)
:param status_code: return status code
:param headers: return headers
:param callback: function will be executes before response returns
"""
endpoint = '{url}::{method}::{status_code}'.format(
url=url, method=method, status_code=status_code
)
@app.route(url, endpoint=endpoint, methods=[method])
def handler(*args, **kwargs):
if callback is not None:
callback(request, *args, **kwargs)
json_response = jsonify(response)
if headers is not None:
json_response.headers.update(headers)
return json_response, status_code
|
f103b6d6faffff4a816fdf7c3c0124ea41622fe1
| 3,645,523
|
def findUsername(data):
"""Find a username in a Element
Args:
data (xml.etree.ElementTree.Element): XML from PMS as a Element
Returns:
username or None
"""
elem = data.find('User')
if elem is not None:
return elem.attrib.get('title')
return None
|
f7b6bb816b9eeeca7e865582935a157cdf276928
| 3,645,524
|
import argparse
from typing import Union
def preprocess_config_factory(args: argparse.Namespace, ref_paths: dict,
dataset_type: str) -> Union[BratsConfig, CamCanConfig, IBSRConfig, CANDIConfig, IXIConfig]:
"""Factory method to create a pre-processing config based on the parsed command line arguments."""
if dataset_type == 'brats':
config = BratsConfig(
dataset_name=args.dataset_name,
dataset_root_path=args.dataset_root_path,
do_pre_processing=args.pre_process,
do_create_dataset=args.create_dataset,
modalities={modality: modality in args.modalities for modality in VALID_BRATS_MODALITIES},
limit_to_n_samples=args.limit_n_samples,
exclude_empty_slices=args.exclude_empty_slices,
do_bias_correction=not args.no_bias_correction,
force_bias_correction=args.force_bias_correction,
do_histogram_matching=not args.no_histogram_matching,
ref_paths=ref_paths,
do_normalization=not args.no_normalization,
normalization_method=args.normalization_method,
shuffle_pre_processing=args.shuffle_pre_processing,
background_value=BACKGROUND_VALUE,
hdf5_out_folder_path=args.hdf5_out_dir_path,
n4_executable_path=N4_EXECUTABLE_PATH,
store_pre_processing_output=not args.no_output,
print_debug=args.print_debug
)
return config
elif dataset_type == 'camcan':
config = CamCanConfig(
dataset_name=args.dataset_name,
dataset_root_path=args.dataset_root_path,
image_modality=args.modality,
limit_to_n_samples=args.limit_n_samples,
exclude_empty_slices=args.exclude_empty_slices,
do_histogram_matching=not args.no_histogram_matching,
ref_paths=ref_paths,
do_normalization=not args.no_normalization,
normalization_method=args.normalization_method,
background_value=args.background_value,
hdf5_out_folder_path=args.hdf5_out_dir_path,
n4_executable_path=N4_EXECUTABLE_PATH,
val_fraction=args.val_fraction,
print_debug=args.print_debug
)
return config
elif dataset_type == 'ixi':
config = IXIConfig(
dataset_name=args.dataset_name,
dataset_root_path=args.dataset_root_path,
image_modality=args.modality,
limit_to_n_samples=args.limit_n_samples,
exclude_empty_slices=args.exclude_empty_slices,
do_histogram_matching=not args.no_histogram_matching,
ref_paths=ref_paths,
do_normalization=not args.no_normalization,
normalization_method=args.normalization_method,
background_value=args.background_value,
hdf5_out_folder_path=args.hdf5_out_dir_path,
n4_executable_path=N4_EXECUTABLE_PATH,
val_fraction=args.val_fraction,
print_debug=args.print_debug
)
return config
elif dataset_type == 'ibsr':
config = IBSRConfig(
dataset_name=args.dataset_name,
dataset_root_path=args.dataset_root_path,
image_modality='t1',
limit_to_n_samples=args.limit_n_samples,
exclude_empty_slices=args.exclude_empty_slices,
do_histogram_matching=not args.no_histogram_matching,
ref_paths=ref_paths,
do_normalization=not args.no_normalization,
normalization_method=args.normalization_method,
background_value=args.background_value,
hdf5_out_folder_path=args.hdf5_out_dir_path,
n4_executable_path=N4_EXECUTABLE_PATH,
val_fraction=args.val_fraction,
print_debug=args.print_debug
)
return config
elif dataset_type == 'candi':
config = CANDIConfig(
dataset_name=args.dataset_name,
dataset_root_path=args.dataset_root_path,
image_modality='t1',
limit_to_n_samples=args.limit_n_samples,
exclude_empty_slices=args.exclude_empty_slices,
do_histogram_matching=not args.no_histogram_matching,
ref_paths=ref_paths,
do_normalization=not args.no_normalization,
normalization_method=args.normalization_method,
background_value=args.background_value,
hdf5_out_folder_path=args.hdf5_out_dir_path,
n4_executable_path=N4_EXECUTABLE_PATH,
val_fraction=args.val_fraction,
print_debug=args.print_debug
)
return config
else:
raise KeyError(f'Given dataset_type {dataset_type} not supported.')
|
fdd96ac09bc6b86801ea39956a7d456a380ed546
| 3,645,525
|
def GET(request):
"""Get this Prefab."""
request.check_required_parameters(path={'prefabId': 'string'})
prefab = Prefab.from_id(request.params_path['prefabId'])
prefab.check_exists()
prefab.check_user_access(request.google_id)
return Response(200, 'Successfully retrieved prefab', prefab.obj)
|
07a7078cb73893309372c0a8d48857eefc77a41e
| 3,645,526
|
def fix_empty_strings(tweet_dic):
"""空文字列を None に置換する"""
def fix_media_info(media_dic):
for k in ['title', 'description']:
if media_dic.get('additional_media_info', {}).get(k) == '':
media_dic['additional_media_info'][k] = None
return media_dic
for m in tweet_dic.get('entities', {}).get('media', []):
m = fix_media_info(m)
for m in tweet_dic.get('extended_entities', {}).get('media', []):
m = fix_media_info(m)
for m in tweet_dic.get('extended_tweet', {}).get('entities', {}).get('media', []):
m = fix_media_info(m)
for m in tweet_dic.get('extended_tweet', {}).get('extended_entities', {}).get('media', []):
m = fix_media_info(m)
for k in [
'profile_background_image_url',
'profile_background_image_url_https',
'profile_image_url',
'profile_image_url_https',
]:
if tweet_dic.get('user', {}).get(k) == '':
tweet_dic['user'][k] = None
return tweet_dic
|
436daaeb9b96b60867d27812ed7388892ab79b1a
| 3,645,527
|
import json
def group_joinrequest(request, group_id):
"""
Handle post request to join a group.
"""
if not request.is_ajax() or request.method != 'POST':
raise Http404
result = {}
content_type = 'application/json; charset=utf-8'
group_id = int(group_id)
group = get_group(group_id)
if not group:
raise Http404
user = request.user.username
# TODO: Group creator is group staff now, but may changed in future.
staff = group.creator_name
if is_group_user(group_id, user):
# Already in the group. Normally, this case should not happen.
err = _(u'You are already in the group.')
return HttpResponseBadRequest(json.dumps({'error': err}),
content_type=content_type)
else:
form = GroupJoinMsgForm(request.POST)
if form.is_valid():
group_join_msg = form.cleaned_data['group_join_msg']
# Send the message to group staff.
use_https = request.is_secure()
domain = RequestSite(request).domain
t = loader.get_template('group/group_join_email.html')
c = {
'staff': staff,
'user': user,
'group_name': group.group_name,
'group_join_msg': group_join_msg,
'site_name': SITE_NAME,
}
try:
send_mail(_(u'apply to join the group'), t.render(Context(c)), None, [staff],
fail_silently=False)
messages.success(request, _(u'Sent successfully, the group admin will handle it.'))
return HttpResponse(json.dumps('success'),
content_type=content_type)
except:
err = _(u'Failed to send. You can try it again later.')
return HttpResponse(json.dumps({'error': err}), status=500,
content_type=content_type)
else:
return HttpResponseBadRequest(json.dumps(form.errors),
content_type=content_type)
|
befa5d7e64f1fde3be4c4e589e7c6ed3fdec8b7e
| 3,645,528
|
import math
def fibonacci(**kwargs):
"""Fibonacci Sequence as a numpy array"""
n = int(math.fabs(kwargs.pop('n', 2)))
zero = kwargs.pop('zero', False)
weighted = kwargs.pop('weighted', False)
if zero:
a, b = 0, 1
else:
n -= 1
a, b = 1, 1
result = np.array([a])
for i in range(0, n):
a, b = b, a + b
result = np.append(result, a)
if weighted:
fib_sum = np.sum(result)
if fib_sum > 0:
return result / fib_sum
else:
return result
else:
return result
|
055d157120866c9bfe74374d62cffcc8f599d4bb
| 3,645,529
|
def read_data():
"""Reads in the data from (currently) only the development file
and returns this as a list. Pops the last element, because it is empty."""
with open('../PMB/parsing/layer_data/4.0.0/en/gold/dev.conll') as file:
data = file.read()
data = data.split('\n\n')
data.pop(-1)
return data
|
da75e237bbc7b2168cd5af76eefaf389b29d4b30
| 3,645,530
|
def argrelextrema(data, comparator, axis=0, order=1, mode='clip'):
"""
Calculate the relative extrema of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative extrema.
comparator : callable
Function to use to compare two data points.
Should take two arrays as arguments.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default is 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
See Also
--------
argrelmin, argrelmax
Notes
-----
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelextrema
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelextrema(x, np.greater)
(array([3, 6]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelextrema(y, np.less, axis=1)
(array([0, 2]), array([2, 1]))
"""
results = _boolrelextrema(data, comparator,
axis, order, mode)
return np.where(results)
|
66d565fad5672615f1340979a3c59e5abbbab3f5
| 3,645,531
|
import math
def dijkstra(G, s):
"""
find all shortest paths from s to each other vertex in graph G
"""
n = len(G)
visited = [False]*n
weights = [math.inf]*n
path = [None]*n
queue = []
weights[s] = 0
hq.heappush(queue, (0, s))
while len(queue) > 0:
g, u = hq.heappop(queue)
visited[u] = True
for v, w in G[u]:
if not visited[v]:
print(v, w, g, u)
f = g + w
if f < weights[v]:
weights[v] = f
path[v] = u
hq.heappush(queue, (f, v))
return path, weights
|
85069b177ac646f449ce8e3ccf6d9c5b9de7b2e3
| 3,645,532
|
def prepare_create_user_db():
"""Clear a user from the database to be created."""
username = TEST_USERS[0][0]
connection = connect_db()
connection.cursor().execute('DELETE FROM Users WHERE username=%s',
(username,))
connection.commit()
close_db(connection)
return username
|
beb1fd7a7f6c571f9d5e57a79d3b15c62a215789
| 3,645,533
|
def _getlocal(ui, rpath):
"""Return (path, local ui object) for the given target path.
Takes paths in [cwd]/.hg/hgrc into account."
"""
try:
wd = os.getcwd()
except OSError, e:
raise util.Abort(_("error getting current working directory: %s") %
e.strerror)
path = cmdutil.findrepo(wd) or ""
if not path:
lui = ui
else:
lui = ui.copy()
lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
if rpath and rpath[-1]:
path = lui.expandpath(rpath[-1])
lui = ui.copy()
lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
return path, lui
|
4dc90dc62084e13c22b1a602fa20c552557e258c
| 3,645,534
|
import hashlib
def get_size_and_sha256(infile):
"""
Returns the size and SHA256 checksum (as hex) of the given file.
"""
h = hashlib.sha256()
size = 0
while True:
chunk = infile.read(8192)
if not chunk:
break
h.update(chunk)
size += len(chunk)
return (size, h.hexdigest())
|
32c37ca6762f9c62d806e22c991b60f9d60947f4
| 3,645,535
|
def cmServiceAbort():
"""CM SERVICE ABORT Section 9.2.7"""
a = TpPd(pd=0x5)
b = MessageType(mesType=0x23) # 00100011
packet = a / b
return packet
|
1ae9744fd21760775a45066ffeb11d7dea12c127
| 3,645,536
|
def get_distribution(distribution_id):
"""
Lists inforamtion about specific distribution by id.
:param distribution_id: Id of CDN distribution
"""
cloudfront = CloudFront()
return cloudfront.get_distribution(distribution_id=distribution_id)
|
082c572341435423cb42ec895369af7822caee80
| 3,645,537
|
import sqlite3
def get_information_per_topic(db_path: str, topic: str, field: str):
""" Query all alert data monitoring rows for a given topic
Parameters
----------
db_path: str
Path to the monitoring database. The database will be created if
it does not exist yet.
topic: str
Topic name of a stream
field: str
Field for which you want the data.
Returns
----------
df: pd.DataFrame
Pandas DataFrame with data of matching alert rows.
Examples
----------
>>> df = get_information_per_topic(db_fn, "tutu", "objectId")
>>> print(len(df))
1
"""
con = sqlite3.connect(db_path)
statement = f"SELECT {field} FROM `{ALERT_TABLE}` WHERE topic = '{topic}';"
# catch error if the DB is empty
try:
df = pd.read_sql_query(statement, con)
alert_id = list(df[field])
except pd.io.sql.DatabaseError as e:
print(e)
alert_id = [""]
return alert_id
|
97e95942506d15f1604d026c2a9954408ea01c29
| 3,645,538
|
from sys import prefix
def gripper_client(finger_positions):
"""Send a gripper goal to the action server."""
action_address = '/' + prefix + 'driver/fingers_action/finger_positions'
client = actionlib.SimpleActionClient(action_address,
kinova_msgs.msg.SetFingersPositionAction)
client.wait_for_server()
goal = kinova_msgs.msg.SetFingersPositionGoal()
goal.fingers.finger1 = float(finger_positions[0])
goal.fingers.finger2 = float(finger_positions[1])
goal.fingers.finger3 = float(finger_positions[2])
client.send_goal(goal)
if client.wait_for_result(rospy.Duration(50.0)):
return client.get_result()
else:
client.cancel_all_goals()
rospy.WARN(' the gripper action timed-out')
return None
|
eb22363d63b84bcd14e8cf17317d2c1780db7167
| 3,645,539
|
import re
from datetime import datetime
import numpy
def validate_column(column_name,value,lookup_values):
"""Validates columns found in Seq&Treat tuberculosis AST donation spreadsheets.
This function understands either the format of a passed column or uses values
derived from lookup Pandas dataframes to check each value in a spreadsheet.
Args:
column_name (str): the name of the column. Not checked at present!
value: the contents to check
Returns:
True/False
"""
# the SITEID must exist in the table
if column_name=='site_id':
result=str(value) in lookup_values['SITES']
# as must the COUNTRY code
elif column_name=='country_where_sample_taken':
result=value in lookup_values['COUNTRIES']
elif column_name=='instrument_model':
result=value in lookup_values['SEQUENCERS']
elif column_name=='isolate_number':
try:
result=value>0
except:
result=False
elif column_name=='sequence_replicate_number':
result=bool(re.match('^[_0-9]+$',str(value)))
elif column_name in ['dataset_name','lab_id','subject_id']:
if 'nan' in str(value):
return(False)
else:
result=bool(re.match('^[_\-A-Za-z0-9]+$',str(value)))
elif column_name in ['collection_date','submission_date']:
# this will catch nans
if value!=value:
result=True
# otherwise the pandas date converters will have picked it up
else:
result=isinstance(value,datetime.datetime)
elif column_name=='reads_file_1':
result=bool(re.match('^[\-_A-Za-z0-9]+_R1.fastq.gz$',str(value)))
elif column_name=='reads_file_2':
result=bool(re.match('^[\-_A-Za-z0-9]+_R2.fastq.gz$',str(value)))
elif column_name in ['reads_file_1_md5','reads_file_2_md5']:
result=bool(re.match('^[a-z0-9]+$',str(value)))
elif column_name in ['ena_deposited']:
result=value in [True,False]
elif column_name in ['ena_run_accession']:
result=False
if isinstance(value,float) and numpy.isnan(value):
result=True
elif isinstance(value,str):
result=bool(re.match('^(E|D|S)RR[0-9]{6,}$',value))
elif column_name in ['ena_sample_accession']:
result=False
if isinstance(value,float) and numpy.isnan(value):
result=True
elif isinstance(value,str):
result=bool(re.match('^(E|D|S)RS[0-9]{6,}$',value)) or bool(value[:5]=='SAMEA')
elif column_name=='method':
if isinstance(value,float) and numpy.isnan(value):
result=True
else:
result=value in lookup_values['AST_METHODS']
elif column_name=='phenotype':
if isinstance(value,float):
if numpy.isnan(value):
result=True
else:
result=value>0
elif isinstance(value,str):
if value in ['R','S','U']:
return True
else:
if ',' in value:
value=value.replace(',','.')
if '≥' in value:
value=value.replace('≥','>=')
if '≤' in value:
value=value.replace('≤','<=')
if ' ' in value:
value=value.replace(' ','')
# FIXME: hack to allow through incorrect >=32 MICs (should be >32)
if value[:2]=='>=':
try:
result=float(value[2:])>0
except:
result=False
elif value[0]=='>':
try:
result=float(value[1:])>0
except:
result=False
elif value[:2]=='<=':
try:
result=float(value[2:])>0
except:
result=False
# FIXME: hack to allow through incorrect <0.06 MICs (should be <=0.06)
elif value[0]=='<':
try:
result=float(value[1:])>0
except:
result=False
else:
try:
result=float(value)>0
except:
result=False
else:
result=value>0
elif column_name=='cc':
result=False
if isinstance(value,str):
result=value in ['WHO','UK']
elif isinstance(value,float):
if numpy.isnan(value):
return(True)
else:
result=value>0
elif isinstance(value,int):
result=value>0
return result
|
a84df7f80e9e146f742f4d25050bfd4591a0c5cf
| 3,645,540
|
def get_html_from_url(url, timeout=None):
"""Get HTML document from URL
Parameters
url (str) : URL to look for
timeout (float) : Inactivity timeout in seconds
Return
The HTML document as a string
"""
resp = reqget(url, timeout=timeout)
return resp.text
|
f909db702c812be029f00dd73bfaef8ac48966ba
| 3,645,541
|
def clean(column, output_column=None, file_path=None, df=None, symbols='!@#$%^&*()+={}[]:;’\”/<>',
replace_by_space=True, keep_original=False):
"""
cleans the cell values in a column, creating a new column with the clean values.
Args:
column: the column to be cleaned.
output_column: the name of the column where cleaned column values are stored. If not provided, the name of the new column is the name of the input column with the suffix _clean.
file_path: input file path
df: or input dataframe
symbols: a string containing the set of characters to be removed: default is “!@#$%^&*()+={}[]:;’\”/<>”
replace_by_space: when True (default) all instances of the symbols are replaced by a space. In case of removal of multiple consecutive characters, they’ll be replaced by a single space. The value False causes the symbols to be deleted.
keep_original: when True, the output column will contain the original value and the clean value will be appended, separated by |. Default is False
Returns: a dataframe with the new output clean containing clean values
"""
if file_path is None and df is None:
raise RequiredInputParameterMissingException(
'One of the input parameters is required: {} or {}'.format(file_path, df))
symbols = list(symbols)
if output_column is None:
output_column = '{}_clean'.format(column)
if file_path:
df = pd.read_csv(file_path)
df[output_column] = df[column].map(lambda x: string_clean(x, symbols, replace_by_space, keep_original))
return df
|
575d30a704c9ad37c027251ef609ef9c70445139
| 3,645,542
|
def len_lt(name, value):
"""
Only succeed if the length of the given register location is less than
the given value.
USAGE:
.. code-block:: yaml
foo:
check.len_lt:
- value: 42
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if name not in __reg__:
ret["result"] = False
ret["comment"] = "Value {0} not in register".format(name)
return ret
if len(__reg__[name]["val"]) < value:
ret["result"] = True
return ret
|
fde2db2e73d7ac711677b33518b6d5342b5dcbdb
| 3,645,543
|
from typing import Iterable
def _ll_to_xy(latitude, longitude, wrfin=None, timeidx=0,
stagger=None, method="cat", squeeze=True, cache=None,
_key=None, as_int=True, **projparams):
"""Return the x,y coordinates for a specified latitude and longitude.
The *latitude* and *longitude* arguments can be a single value or a
sequence of values.
The leftmost dimension of the returned array represents two different
quantities:
- return_val[0,...] will contain the X (west_east) values.
- return_val[1,...] will contain the Y (south_north) values.
Args:
latitude (:obj:`float` or sequence): A single latitude or a sequence
of latitude values to be converted.
longitude (:obj:`float` or sequence): A single longitude or a sequence
of latitude values to be converted.
wrfin (:class:`netCDF4.Dataset`, :class:`Nio.NioFile`, or an \
iterable): WRF-ARW NetCDF
data as a :class:`netCDF4.Dataset`, :class:`Nio.NioFile`
or an iterable sequence of the aforementioned types.
timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The
desired time index. This value can be a positive integer,
negative integer, or
:data:`wrf.ALL_TIMES` (an alias for None) to return
all times in the file or sequence. The default is 0.
stagger (:obj:`str`): By default, the latitude and longitude are
returned on the mass grid, but a staggered grid can be chosen
with the following options:
- 'm': Use the mass grid (default).
- 'u': Use the same staggered grid as the u wind component,
which has a staggered west_east (x) dimension.
- 'v': Use the same staggered grid as the v wind component,
which has a staggered south_north (y) dimension.
method (:obj:`str`, optional): The aggregation method to use for
sequences. Must be either 'cat' or 'join'.
'cat' combines the data along the Time dimension.
'join' creates a new dimension for the file index.
The default is 'cat'.
squeeze (:obj:`bool`, optional): Set to False to prevent dimensions
with a size of 1 from being automatically removed from the shape
of the output. Default is True.
cache (:obj:`dict`, optional): A dictionary of (varname, ndarray)
that can be used to supply pre-extracted NetCDF variables to the
computational routines. It is primarily used for internal
purposes, but can also be used to improve performance by
eliminating the need to repeatedly extract the same variables
used in multiple diagnostics calculations, particularly when using
large sequences of files.
Default is None.
_key (:obj:`int`, optional): A caching key. This is used for internal
purposes only. Default is None.
as_int (:obj:`bool`): Set to True to return the x,y values as
:obj:`int`, otherwise they will be returned as :obj:`float`.
**projparams: Map projection keyword arguments to set manually.
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`: The
x,y coordinate value(s) whose leftmost dimension is 2 (0=X, 1=Y).
If xarray is enabled and the *meta* parameter is True, then the result
will be a :class:`xarray.DataArray` object. Otherwise, the result will
be a :class:`numpy.ndarray` object with no metadata.
"""
if wrfin is not None:
(map_proj, truelat1, truelat2, stdlon, ref_lat, ref_lon,
pole_lat, pole_lon, known_x, known_y, dx, dy, latinc,
loninc) = _get_proj_params(wrfin, timeidx, stagger, method, squeeze,
cache, _key)
else:
(map_proj, truelat1, truelat2, stdlon, ref_lat, ref_lon,
pole_lat, pole_lon, known_x, known_y, dx, dy, latinc,
loninc) = _kwarg_proj_params(**projparams)
if isinstance(latitude, Iterable):
lats = np.asarray(latitude)
lons = np.asarray(longitude)
# Note: For scalars, this will make a single element array
lats = lats.ravel()
lons = lons.ravel()
if (lats.size != lons.size):
raise ValueError("'latitude' and 'longitude' "
"must be the same length")
if ref_lat.size == 1:
outdim = [2, lats.size]
extra_dims = [outdim[1]]
else:
# Moving domain will have moving ref_lats/ref_lons
outdim = [2, ref_lat.size, lats.size]
extra_dims = outdim[1:]
result = np.empty(outdim, np.float64)
for left_idxs in iter_left_indexes(extra_dims):
# Left indexes is a misnomer, since these will be on the right
x_idxs = (0,) + left_idxs
y_idxs = (1,) + left_idxs
if ref_lat.size == 1:
ref_lat_val = ref_lat[0]
ref_lon_val = ref_lon[0]
else:
ref_lat_val = ref_lat[left_idxs[-2]]
ref_lon_val = ref_lon[left_idxs[-2]]
lat = lats[left_idxs[-1]]
lon = lons[left_idxs[-1]]
xy = _lltoxy(map_proj, truelat1, truelat2, stdlon,
ref_lat_val, ref_lon_val, pole_lat, pole_lon,
known_x, known_y, dx, dy, latinc, loninc,
lat, lon)
# Note: comes back from fortran as y,x
result[x_idxs] = xy[1]
result[y_idxs] = xy[0]
else:
result = np.empty((2,), np.float64)
fort_out = _lltoxy(map_proj, truelat1, truelat2, stdlon,
ref_lat, ref_lon, pole_lat, pole_lon,
known_x, known_y, dx, dy, latinc, loninc,
latitude, longitude)
# Note, comes back from fortran as y,x. So, need to swap them.
result[0] = fort_out[1]
result[1] = fort_out[0]
# Make indexes 0-based
result = result - 1
if as_int:
result = np.rint(result).astype(int)
return result
|
9d96d0d6e520731f16079c69389eff0c47c70dce
| 3,645,544
|
def onedsinusoid(x,H,A,omega,phi):
"""
Returns a 1-dimensional sinusoid of form
H+A*np.sin(omega*x+phi)
"""
phi = np.pi/180 * phi
return H+A*np.sin(omega*x+phi)
|
9917b462a6cd39c84a354d031ad8c6a09afcdec0
| 3,645,545
|
def _make_wrapper_func(func_name):
"""
make_eus_instance()から呼ばれるEus_pkgクラスコンストラクタにて、eusの関数名のエントリからラッパー関数を作成する際の補助関数。
引数部の構築は_translate_args()を用いて行う。
Args:
func_name (str): もとのEuslispでの関数名でpkg::を含む。なお、関数は内部シンボルと仮定している。(exportされてたら外部シンボルアクセス:(1個)を使わなければならない)。
Returns:
wrapper (function): 複数引数, キーワード引数を適切にEuslispで処理可能な形で変換しS式を送り込む処理を行う関数
"""
def wrapper(*args, **kwargs):
# TODO: checking phase here
# mapping phase
if len(args) == len(wrapper.arg_constructors):
args = tuple([constructor(arg) if not isinstance(arg, Eus_proxy) and constructor is not None else arg for arg, constructor in zip(args, wrapper.arg_constructors)]) # wrapper.arg_constructorsの要素constructorがNoneであることもある。下記logging phaseを見よ
if len(kwargs) == len(wrapper.kwarg_constructors):
kwargs = {key:(wrapper.kwarg_constructors[key](kwargs[key]) if not isinstance(kwargs[key], Eus_proxy) else kwargs[key]) for key in wrapper.kwarg_constructors}
# evaluation phase
result = eval_foreign_vm_proxy('({}{})'.format(func_name, _translate_args(args, kwargs)))
# logging phase (ここに来ている時点でevaluation phaseにてEusErrorは投げられていないことがわかる。今回の型は正当である)
# logging時にargの要素やkwargのvalueがproxyでなかった場合(つまりデフォルトルールでの変換が行われた場合)、Noneをarg_constructorsに登録する
# proxyのクラスからコンストラクタを特定する上で、nil-terminated cons vs. non-nil-terminated consの競合が発生する。使用頻度を考えてEusListの方であろうと決め打ちを行うことにする!
# for ind, elm in enumerate(args):
# if isinstance(elm, Eus_proxy):
# pass
# else:
# pass
# for ind, key in enumerate(kwargs):
# if isinstance(key, Eus_proxy):
# pass
# else:
# pass
return result
wrapper.arg_constructors = []
wrapper.kwarg_constructors = {}
wrapper.arg_possible_types = []
wrapper.kwarg_possible_types = {}
return wrapper
|
34bf7d930129432be50a7ddada6641bf6d8eea0e
| 3,645,546
|
def number_in_english(number):
"""Returns the given number in words
>>> number_in_english(0)
'zero'
>>> number_in_english(5)
'five'
>>> number_in_english(11)
'eleven'
>>> number_in_english(745)
'seven hundred and fourty five'
>>> number_in_english(1380)
'one thousand three hundred and eighty'
>>> number_in_english(3204000)
'three million two hundred four thousand'
>>> number_in_english(15000)
'fifteen thousand'
>>> number_in_english(1005)
'one thousand and five'
"""
if not number:
return 'zero'
# split number into blocks of 3
# e.g. 1234567 -> ['567', '234', '1']
numBlocks = int(ceil((log10(number)+1)/3)) # number of digits / 3
number_split = [(number//1000**x)%1000 for x in range(numBlocks)]
# translate each block individual and add the word for the power
# start with the lowest power
word = ''
for n, p in zip(number_split, powers):
if n:
# only the tenner block can have an 'and' (e.g. 'one hundred and five' but not 'one million and one thousand')
word = _hundred_in_english(n, (p == '')) + ' ' + p + ' ' + word
# remove 'and' that was added but is not precede by a number (e.g. 5 -> 'and five')
if word.startswith('and'):
word = word.replace('and', '')
return word.strip()
|
b3d580ed843d5d4bf3c62662c831391536e7479e
| 3,645,547
|
def create_app(environment):
"""Factory Method that creates an instance of the app with the given config.
Args:
environment (str): Specify the configuration to initilize app with.
Returns:
app (Flask): it returns an instance of Flask.
"""
app = Flask(__name__)
app.config.from_object(env_configuration[environment])
db.init_app(app)
api = Api(
app=app,
default='Api',
default_label="Available Endpoints",
title='MovieBuff API',
version='2.0.0',
description="""MovieBuff Api Endpoint Documentation 📚"""
)
# enable cross origin resource sharing
CORS(app)
api.add_resource(Users, "/api/v2/auth/<string:operation>",
endpoint="user")
api.add_resource(Movies, "/api/v2/movie", endpoint="movie")
api.add_resource(Categories, "/api/v2/movie/category",
"/api/v2/movie/category/<string:category_id>",
endpoint="category")
api.add_resource(UserMovieRatings, "/api/v2/movie/ratings",
endpoint="ratings")
api.add_resource(Search,
"/api/v2/movie/search", endpoint="search")
# handle default 404 exceptions
@app.errorhandler(404)
def resource_not_found(error):
response = jsonify(dict(
error='Not found',
message='The requested URL was not found on the server.'))
response.status_code = 404
return response
# handle default 500 exceptions
@app.errorhandler(500)
def internal_server_error(error):
response = jsonify(dict(
error='Internal server error',
message="The server encountered an internal error."))
response.status_code = 500
return response
return app
|
5cd5867a80ec696ee2a5647448c8e8b60fe2e023
| 3,645,548
|
def heating_design_temp(tmy_id):
"""Returns the heating design temperature (deg F) for the TMY3 site
identified by 'tmy_id'.
"""
return df_tmy_meta.loc[tmy_id].heating_design_temp
|
204e219840ed5d2e04e9bb53706883d0fc1c6cfa
| 3,645,549
|
def tonal_int(x):
"""
>>> tonal_int((4,7))
7
>>> tonal_int((4,7,2))
31
>>> tonal_int((6,11,-1))
-1
>>> tonal_int((0,-1,-1))
-13
>>> tonal_int((6,0,0))
12
>>> tonal_int((0,11,0))
-1
>>> tonal_int((0,11))
-1
>>> tonal_int((2, 0))
0
"""
if len(x) == 2:
x = _tonal_unmodulo(x)
return x[1]
d = x[0]
c = x[1]
base_c = MS[d].c
# Example: Cb --- base=0 c=11 c-base=11 11 - 12 = -1
if c - base_c > 3:
c = c - C_LEN
# Example: B# --- base=11 c=0 c-base=-11 c+C_LEN =12
if c - base_c < -3:
c = c + C_LEN
return c + x[2]*(C_LEN)
|
c7fb8dfd7ac5c82a81241efb807a7e45b877eee4
| 3,645,550
|
import re
import warnings
def read_vcf(vcf_file, gene_filter=None, experimentalDesig=None):
"""
Reads an vcf v4.0 or 4.1 file and generates :class:`~epytope.Core.Variant.Variant` objects containing
all annotated :class:`~epytope.Core.Transcript.Transcript` ids an outputs a list :class:`~epytope.Core.Variant.Variant`.
Only the following variants are considered by the reader where synonymous labeled variants will not be integrated into any variant:
filter_variants = ['missense_variant', 'frameshift_variant', 'stop_gained', 'missense_variant&splice_region_variant', "synonymous_variant", "inframe_deletion", "inframe_insertion"]
:param str vcf_file: The path ot the vcf file
:param list(str) gene_filter: A list of gene names of interest (only variants associated with these genes
are generated)
:return: List of :class:`~epytope.Core.Variant.Variants fully annotated
:rtype: Tuple of (list(:class:`~epytope.Core.Variant.Variant`), list(transcript_ids)
"""
vl = list()
with open(vcf_file, 'rb') as tsvfile:
vcf_reader = vcf.Reader(open(vcf_file, 'r'))
vl = [r for r in vcf_reader]
list_vars = []
transcript_ids = []
genotye_dict = {"het": False, "hom": True, "ref": True}
for num, record in enumerate(vl):
c = record.CHROM.strip('chr') # chrom
p = record.POS - 1 # vcf is 1-based & epytope 0-based
variation_dbid = record.ID # e.g. rs0123
r = str(record.REF) # reference nuc (seq)
v_list = record.ALT # list of variants
q = record.QUAL # ?
f = record.FILTER # empty if PASS, content otherwise
# I guess we shouldn't expect that keyword to be there ?!
#z = record.INFO['SOMATIC'] #if true somatic
vt = VariationType.UNKNOWN
if record.is_snp:
vt = VariationType.SNP
elif record.is_indel:
if len(v_list)%3 == 0: # no frameshift
if record.is_deletion:
vt = VariationType.DEL
else:
vt = VariationType.INS
else: # frameshift
if record.is_deletion:
vt = VariationType.FSDEL
else:
vt = VariationType.FSINS
gene = None
# WHICH VARIANTS TO FILTER ?
filter_variants = ['missense_variant', 'frameshift_variant', 'stop_gained', 'missense_variant&splice_region_variant', "synonymous_variant", "inframe_deletion", "inframe_insertion"]
for alt in v_list:
isHomozygous = False
if 'HOM' in record.INFO:
#TODO set by AF & FILTER as soon as available
isHomozygous = record.INFO['HOM'] == 1
elif 'SGT' in record.INFO:
zygosity = record.INFO['SGT'].split("->")[1]
if zygosity in genotye_dict:
isHomozygous = genotye_dict[zygosity]
else:
if zygosity[0] == zygosity[1]:
isHomozygous = True
else:
isHomozygous = False
else:
for sample in record.samples:
if 'GT' in sample.data:
isHomozygous = sample.data['GT'] == '1/1'
if "ANN" in record.INFO and record.INFO['ANN']:
isSynonymous = False
coding = dict()
for annraw in record.INFO['ANN']: # for each ANN only add a new coding! see GSvar
annots = annraw.split('|')
obs, a_mut_type, impact, a_gene, a_gene_id, feature_type, transcript_id, exon, tot_exon, trans_coding, prot_coding, cdna, cds, aa, distance, warns = annots
if a_mut_type in filter_variants:
tpos = 0
ppos = 0
# get cds/protein positions and convert mutation syntax to epytope format
if trans_coding != '':
positions = re.findall(r'\d+', trans_coding)
ppos = int(positions[0]) - 1
if prot_coding != '':
positions = re.findall(r'\d+', prot_coding)
tpos = int(positions[0]) - 1
isSynonymous = (a_mut_type == "synonymous_variant")
#rather take gene_id than gene name
gene = a_gene_id
#REFSEQ specific ? Do have to split because of biomart ?
transcript_id = transcript_id.split(".")[0]
#TODO vcf are not REFSEQ only
#coding string not parsed anyway ? just use the one given by SnpEff
coding[transcript_id] = MutationSyntax(transcript_id, ppos, tpos, trans_coding, prot_coding)
transcript_ids.append(transcript_id)
if coding and not isSynonymous:
if vt == VariationType.SNP:
pos, reference, alternative = p, str(r), str(alt)
elif vt == VariationType.DEL or vt == VariationType.FSDEL:
if alt != '-':
pos, reference, alternative = p + len(alt), r[len(alt):], '-'
else:
pos, reference, alternative = p, str(r), str(alt)
elif vt == VariationType.INS or vt == VariationType.FSINS:
if r != '-':
if alt != '-':
pos, reference, alternative = p + len(r), '-', str(alt)[len(r):]
else:
pos, reference, alternative = p + len(r), '-', str(alt)
else:
pos, reference, alternative = p, str(r), str(alt)
var = Variant("line" + str(num), vt, c, pos, reference, alternative, coding, isHomozygous, isSynonymous, experimentalDesign=experimentalDesig)
var.gene = gene
var.log_metadata("vardbid", variation_dbid)
list_vars.append(var)
else:
warnings.warn("Skipping unannotated variant", UserWarning)
return list_vars, transcript_ids
|
d277369ff340ddb7adefdb70009b69a3d1f5c533
| 3,645,551
|
def sqrtmod(a, p):
"""
Returns a square root of a modulo p.
Input:
a -- an integer that is a perfect
square modulo p (this is checked)
p -- a prime
Output:
int -- a square root of a, as an integer
between 0 and p-1.
Examples:
>>> sqrtmod(4, 5) # p == 1 (mod 4)
3 #rand
>>> sqrtmod(13, 23) # p == 3 (mod 4)
6 #rand
>>> sqrtmod(997, 7304723089) # p == 1 (mod 4)
761044645L #rand
"""
a %= p
if p == 2: return a
assert legendre(a, p) == 1, "a must be a square mod p."
if p%4 == 3: return powermod(a, (p+1)/4, p)
def mul(x, y): # multiplication in R # (1)
return ((x[0]*y[0] + a*y[1]*x[1]) % p, \
(x[0]*y[1] + x[1]*y[0]) % p)
def pow(x, n): # exponentiation in R # (2)
ans = (1,0)
xpow = x
while n != 0:
if n%2 != 0: ans = mul(ans, xpow)
xpow = mul(xpow, xpow)
n /= 2
return ans
while True:
z = randrange(2,p)
u, v = pow((1,z), (p-1)/2)
if v != 0:
vinv = inversemod(v, p)
for x in [-u*vinv, (1-u)*vinv, (-1-u)*vinv]:
if (x*x)%p == a: return x%p
assert False, "Bug in sqrtmod."
|
638481b8d42b9047df1dbd3a8f964762baab783e
| 3,645,552
|
def DrtVariableExpression(variable):
"""
This is a factory method that instantiates and returns a subtype of
``DrtAbstractVariableExpression`` appropriate for the given variable.
"""
if is_indvar(variable.name):
return DrtIndividualVariableExpression(variable)
elif is_funcvar(variable.name):
return DrtFunctionVariableExpression(variable)
elif is_eventvar(variable.name):
return DrtEventVariableExpression(variable)
else:
return DrtConstantExpression(variable)
|
a37b6e3f295e603d4ee78007dc4d4a22d22d1c3f
| 3,645,553
|
def process_cv_results(cv_results):
"""
This function reformats the .cv_results_ attribute of a fitted randomized
search (or grid search) into a dataframe with only the columns we care
about.
Args
--------------
cv_results : the .cv_results_ attribute of a fitted randomized search
(or grid search) object
Returns
--------------
a sorted dataframe with select information
"""
results = pd.DataFrame(cv_results)
cols = ['mean_test_score', 'mean_train_score', 'std_test_score']
if 'mean_train_score' not in cv_results.keys():
cols = ['mean_test_score', 'std_test_score']
cols += [c for c in results.columns.values if c.startswith('param_')]
return results[cols].sort_values(by='mean_test_score', ascending=False)
|
a47b9cbc3fcb00f782eb46269f55259995d4b73c
| 3,645,554
|
def cbow(currentWord, C, contextWords, tokens, inputVectors, outputVectors,
dataset, word2vecCostAndGradient = softmaxCostAndGradient):
""" CBOW model in word2vec """
# Implement the continuous bag-of-words model in this function.
# Input/Output specifications: same as the skip-gram model
# We will not provide starter code for this function, but feel
# free to reference the code you previously wrote for this
# assignment!
#################################################################
# IMPLEMENTING CBOW IS EXTRA CREDIT, DERIVATIONS IN THE WRIITEN #
# ASSIGNMENT ARE NOT! #
#################################################################
cost = 0
gradIn = np.zeros(inputVectors.shape)
gradOut = np.zeros(outputVectors.shape)
### YOUR CODE HERE
#raise NotImplementedError
### END YOUR CODE
return cost, gradIn, gradOut
|
5766b3c2facba8272431796b46da8abbd7264292
| 3,645,555
|
import yaml
def generate_dlf_yaml(in_yaml):
"""
Generate DLF-compatible YAML configuration file using
"templates/dlf_out.yaml" as template.
:param in_yaml: dict representation of a YAML document defining
placeholder values in "templates/dlf_out.yaml"
:type in_yaml: dict
:raises PlaceholderNotFoundError: a {{...}} placeholder referenced
in "templates/dlf_out.yaml" was not found
:raises ValueError in_yaml is not of type dict
:return: DLF-compatible YAML file
:rtype: str
"""
dlf_yaml_dict = generate_dlf_yaml_dict(in_yaml)
dlf_yaml = yaml.safe_dump(dlf_yaml_dict,
default_flow_style=False,
allow_unicode=True,
sort_keys=False)
return dlf_yaml
|
c3bdf86731eb26904cae95b65c5b6181cc130ae8
| 3,645,556
|
def dice_coefficient(x, target):
"""
Dice Loss: 1 - 2 * (intersection(A, B) / (A^2 + B^2))
:param x:
:param target:
:return:
"""
eps = 1e-5
n_inst = x.size(0)
x = x.reshape(n_inst, -1)
target = target.reshape(n_inst, -1)
intersection = (x * target).sum(dim=1)
union = (x ** 2.0).sum(dim=1) + (target ** 2.0).sum(dim=1) + eps
loss = 1. - (2 * intersection / union)
return loss
|
c73cd86ed11bf89d94fb84db16186d6ace39d814
| 3,645,557
|
def batch_intersection_union(output, target, nclass):
"""mIoU"""
# inputs are numpy array, output 4D, target 3D
predict = np.argmax(output, axis=1) + 1 # [N,H,W]
target = target.astype(float) + 1 # [N,H,W]
predict = predict.astype(float) * np.array(target > 0).astype(float)
intersection = predict * np.array(predict == target).astype(float)
# areas of intersection and union
# element 0 in intersection occur the main difference from np.bincount. set boundary to -1 is necessary.
area_inter, _ = np.array(np.histogram(intersection, bins=nclass, range=(1, nclass+1)))
area_pred, _ = np.array(np.histogram(predict, bins=nclass, range=(1, nclass+1)))
area_lab, _ = np.array(np.histogram(target, bins=nclass, range=(1, nclass+1)))
area_all = area_pred + area_lab
area_union = area_all - area_inter
return area_inter, area_union
|
a62596ee500ec7525ceefeb6e6de0fd6673c522d
| 3,645,558
|
import torch
def convert(trainset,testset,seed=1,batch_size=128, num_workers=2,pin_memory=True):
"""
Converts DataSet Object to DataLoader
"""
SEED = 1
cuda = torch.cuda.is_available()
torch.manual_seed(SEED)
if cuda:
torch.cuda.manual_seed(SEED)
dataloader_args = dict(shuffle=True, batch_size=128, num_workers=2, pin_memory=pin_memory) if cuda else dict(shuffle=True, batch_size=64)
trainloader = torch.utils.data.DataLoader(trainset, **dataloader_args)
testloader = torch.utils.data.DataLoader(testset, **dataloader_args)
return trainloader, testloader
|
c380caa064b07ffc108ae33acc98361910b8f28f
| 3,645,559
|
def build_gradcam(img_path, heatmap, color_map, original_image_colormap, alpha=0.5):
"""
Builds the gradcam.
Args:
img_path (_type_): Image path.
heatmap (_type_): Heatmap.
color_map (_type_): Color map.
original_image_colormap (_type_): Original image colormap.
alpha (float, optional): Alpha. Defaults to 0.5.
Returns:
_type_: Gradcam.
"""
img = keras.preprocessing.image.load_img(img_path, color_mode=original_image_colormap)
img = keras.preprocessing.image.img_to_array(img)
heatmap = np.uint8(255 * heatmap)
jet = cm.get_cmap(color_map)
jet_colors = jet(np.arange(256))[:, :3]
jet_heatmap = jet_colors[heatmap]
jet_heatmap = keras.preprocessing.image.array_to_img(jet_heatmap)
jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0]))
jet_heatmap = keras.preprocessing.image.img_to_array(jet_heatmap)
superimposed_img = jet_heatmap * alpha + img
superimposed_img = keras.preprocessing.image.array_to_img(superimposed_img)
return superimposed_img
|
c71c24cc3fccc962b1083c1491e8da0fae9464ed
| 3,645,560
|
def sample_joint_comorbidities(age, country):
"""
Default country is China.
For other countries pass value for country from {us, Republic of Korea, japan, Spain, italy, uk, France}
"""
return sample_joint(age, p_comorbidity(country, 'diabetes'), p_comorbidity(country, 'hypertension'))
|
8190e73ccd637b78270a974773259c9bb4367fd5
| 3,645,561
|
def generate_tsdf_3d_ewa_image(depth_image, camera,
camera_extrinsic_matrix=np.eye(4, dtype=np.float32),
field_shape=np.array([128, 128, 128]), default_value=1,
voxel_size=0.004,
array_offset=np.array([-64, -64, 64]),
narrow_band_width_voxels=20, back_cutoff_voxels=np.inf,
gaussian_covariance_scale=1.0):
"""
Generate 3D TSDF field based on elliptical Gaussian averages (EWA) of depth values from the provided image.
Elliptical Gaussian filters are projected from spherical 3D Gaussian functions onto the depth image and convolved
with a circular 2D Gaussain filter before averaging the depth values.
:type depth_image: np.ndarray
:param depth_image: depth image to use
:type camera: calib.camera.DepthCamera
:param camera: camera used to generate the depth image
:param voxel_size: voxel size, in meters
:param array_offset: offset of the TSDF grid from the world origin
:param camera_extrinsic_matrix: matrix representing transformation of the camera (incl. rotation and translation)
[ R | T]
[ 0 | 1]
:param default_value: default initial TSDF value
:param field_shape: shape of the TSDF grid to generate
:param narrow_band_width_voxels: span (in voxels) where signed distance is between -1 and 1
:param back_cutoff_voxels: where to truncate the negative voxel values (currently not supported!)
:param gaussian_covariance_scale: scale of elliptical gaussians (relative to voxel size)
:return: resulting 3D TSDF
"""
# TODO: use back_cutoff_voxels for additional limit on
# "if signed_distance < -narrow_band_half_width" (maybe?)
if default_value == 1:
field = np.ones(field_shape, dtype=np.float32)
elif default_value == 0:
field = np.zeros(field_shape, dtype=np.float32)
else:
field = np.ndarray(field_shape, dtype=np.float32)
field.fill(default_value)
camera_intrinsic_matrix = camera.intrinsics.intrinsic_matrix
depth_ratio = camera.depth_unit_ratio
narrow_band_half_width = narrow_band_width_voxels / 2 * voxel_size # in metric units
w_voxel = 1.0
camera_rotation_matrix = camera_extrinsic_matrix[0:3, 0:3]
covariance_voxel_sphere_world_space = np.eye(3) * (gaussian_covariance_scale * voxel_size)
covariance_camera_space = camera_rotation_matrix.dot(covariance_voxel_sphere_world_space) \
.dot(camera_rotation_matrix.T)
image_space_scaling_matrix = camera.intrinsics.intrinsic_matrix[0:2, 0:2]
squared_radius_threshold = 4.0 * gaussian_covariance_scale * voxel_size
for z_field in range(field_shape[2]):
for y_field in range(field_shape[1]):
for x_field in range(field_shape[0]):
# coordinates deliberately flipped here to maintain consistency between Python & C++ implementations
# Eigen Tensors being used are column-major, whereas here we use row-major layout by default
x_voxel = (z_field + array_offset[0]) * voxel_size
y_voxel = (y_field + array_offset[1]) * voxel_size
z_voxel = (x_field + array_offset[2]) * voxel_size
voxel_world = np.array([[x_voxel, y_voxel, z_voxel, w_voxel]], dtype=np.float32).T
voxel_camera = camera_extrinsic_matrix.dot(voxel_world).flatten()[:3]
if voxel_camera[2] <= near_clipping_distance:
continue
# distance along ray from camera to voxel center
ray_distance = np.linalg.norm(voxel_camera)
# squared distance along optical axis from camera to voxel
z_cam_squared = voxel_camera[2] ** 2
inv_z_cam = 1 / voxel_camera[2]
projection_jacobian = \
np.array([[inv_z_cam, 0, -voxel_camera[0] / z_cam_squared],
[0, inv_z_cam, -voxel_camera[1] / z_cam_squared],
[voxel_camera[0] / ray_distance, voxel_camera[1] / ray_distance,
voxel_camera[2] / ray_distance]])
remapped_covariance = projection_jacobian.dot(covariance_camera_space) \
.dot(projection_jacobian.T)
final_covariance = image_space_scaling_matrix.dot(remapped_covariance[0:2, 0:2]).dot(
image_space_scaling_matrix.T) + np.eye(2)
Q = np.linalg.inv(final_covariance)
gaussian = eg.EllipticalGaussian(eg.ImplicitEllipse(Q=Q, F=squared_radius_threshold))
voxel_image = (camera_intrinsic_matrix.dot(voxel_camera) / voxel_camera[2])[:2]
voxel_image = voxel_image.reshape(-1, 1)
bounds_max = gaussian.ellipse.get_bounds()
result = find_sampling_bounds_helper(bounds_max, depth_image, voxel_image)
if result is None:
continue
else:
(start_x, end_x, start_y, end_y) = result
weights_sum = 0.0
depth_sum = 0
for y_sample in range(start_y, end_y):
for x_sample in range(start_x, end_x):
sample_centered = np.array([[x_sample],
[y_sample]], dtype=np.float64) - voxel_image
dist_sq = gaussian.get_distance_from_center_squared(sample_centered)
if dist_sq > squared_radius_threshold:
continue
weight = gaussian.compute(dist_sq)
surface_depth = depth_image[y_sample, x_sample] * depth_ratio
if surface_depth <= 0.0:
continue
depth_sum += weight * surface_depth
weights_sum += weight
if depth_sum <= 0.0:
continue
final_depth = depth_sum / weights_sum
signed_distance = final_depth - voxel_camera[2]
field[z_field, y_field, x_field] = common.compute_tsdf_value(signed_distance, narrow_band_half_width)
return field
|
a7665434e58e3485af6a2f4124d9707b2a67f4b9
| 3,645,562
|
import pydoc
def locate(name):
"""
Locate the object for the given name
"""
obj = pydoc.locate(name)
if not obj:
obj = globals().get(name, None)
return obj
|
24f31b241ffcbd2e983889f209bff9a1ff8b1fc3
| 3,645,563
|
from stentseg.utils import PointSet
from stentseg.utils.centerline import points_from_mesh
def get_mesh_deforms(mesh, deforms, origin, **kwargs):
"""
input : mesh object
deforms forward for mesh?!
origin (from volume)
output: PointSet of mesh vertices (duplicates removed) and list with
deforms (PointSets) of mesh vertices
"""
# for vertice in mesh._vertices:
# vertice[-1] = vertice[-1]*-1 # x,y,z with z flipped
# # Turn surfacepoints into a pointset
# pp = PointSet(3, dtype='float32')
# [pp.append(*p) for p in mesh._vertices]
pp = points_from_mesh(mesh, **kwargs) # removes duplicates
# Get deformation for all points
pp_deforms = []
samplePoints = pp - PointSet([o for o in reversed(origin)], dtype='float32')
for deform in deforms:
delta_z = deform.get_field_in_points(samplePoints, 0).reshape(-1, 1)
delta_y = deform.get_field_in_points(samplePoints, 1).reshape(-1, 1)
delta_x = deform.get_field_in_points(samplePoints, 2).reshape(-1, 1)
delta = PointSet( np.concatenate((delta_x, delta_y, delta_z), axis=1) )
pp_deforms.append(delta)
return pp, pp_deforms
|
710530c68d46e03d14eabc83db1fa448e76ebc2e
| 3,645,564
|
import argparse
def parse_args():
"""parse args for binlog2sql"""
parser = argparse.ArgumentParser(description='Parse MySQL binlog to SQL you want', add_help=False)
connect_setting = parser.add_argument_group('connect setting')
connect_setting.add_argument('-h', '--host', dest='host', type=str,
help='Host the MySQL database server located', default='127.0.0.1')
connect_setting.add_argument('-u', '--user', dest='user', type=str,
help='MySQL Username to log in as', default='root')
connect_setting.add_argument('-p', '--password', dest='password', type=str, nargs='*',
help='MySQL Password to use', default='')
connect_setting.add_argument('-P', '--port', dest='port', type=int,
help='MySQL port to use', default=3306)
interval = parser.add_argument_group('interval filter')
interval.add_argument('--start-file', dest='start_file', type=str, help='Start binlog file to be parsed')
interval.add_argument('--start-position', '--start-pos', dest='start_pos', type=int,
help='Start position of the --start-file', default=4)
interval.add_argument('--stop-file', '--end-file', dest='end_file', type=str,
help="Stop binlog file to be parsed. default: '--start-file'", default='')
interval.add_argument('--stop-position', '--end-pos', dest='end_pos', type=int,
help="Stop position. default: latest position of '--stop-file'", default=0)
interval.add_argument('--start-datetime', dest='start_time', type=str,
help="Start time. format %%Y-%%m-%%d %%H:%%M:%%S", default='')
interval.add_argument('--stop-datetime', dest='stop_time', type=str,
help="Stop Time. format %%Y-%%m-%%d %%H:%%M:%%S;", default='')
parser.add_argument('--stop-never', dest='stop_never', action='store_true', default=False,
help="Continuously parse binlog. default: stop at the latest event when you start.")
parser.add_argument('--help', dest='help', action='store_true', help='help information', default=False)
schema = parser.add_argument_group('schema filter')
schema.add_argument('-d', '--databases', dest='databases', type=str, nargs='*',
help='dbs you want to process', default='')
schema.add_argument('-t', '--tables', dest='tables', type=str, nargs='*',
help='tables you want to process', default='')
event = parser.add_argument_group('type filter')
event.add_argument('--only-dml', dest='only_dml', action='store_true', default=False,
help='only print dml, ignore ddl')
event.add_argument('--sql-type', dest='sql_type', type=str, nargs='*', default=['INSERT', 'UPDATE', 'DELETE'],
help='Sql type you want to process, support INSERT, UPDATE, DELETE.')
# exclusive = parser.add_mutually_exclusive_group()
parser.add_argument('-K', '--no-primary-key', dest='no_pk', action='store_true',
help='Generate insert sql without primary key if exists', default=False)
parser.add_argument('-B', '--flashback', dest='flashback', action='store_true',
help='Flashback data to start_position of start_file', default=False)
parser.add_argument('--back-interval', dest='back_interval', type=float, default=1.0,
help="Sleep time between chunks of 1000 rollback sql. set it to 0 if do not need sleep")
return parser
|
e6ef917b97ea15097b30684a1069ea1c74b16064
| 3,645,565
|
def lnLikelihoodDouble(parameters, values, errors, weights=None):
"""
Calculates the total log-likelihood of an ensemble of values, with
uncertainties, for a double Gaussian distribution (two means and
two dispersions).
INPUTS
parameters : model parameters (see below)
values : data values
errors : data uncertainties
OPTIONS
weights : weights on each data point [default: None, ie unweighted]
PARAMETERS
mean1 : model mean 1
dipsersion1 : model dispersion 1
mean2 : model mean 2
dipsersion2 : model dispersion 2
f : fraction of component 1
"""
mean1, dispersion1, mean2, dispersion2, f = parameters
# insist that mean1 is less than mean2 or solution is degenerate
if mean1>mean2:
return -np.inf
# check for unit consistency
if getattr(mean1, "unit", None) is not None \
and getattr(dispersion1, "unit", None) is not None \
and getattr(mean2, "unit", None) is not None \
and getattr(dispersion2, "unit", None) is not None \
and getattr(values, "unit", None) is not None \
and getattr(errors, "unit", None) is not None:
mean1 = mean1.to(values.unit)
dispersion1 = dispersion1.to(values.unit)
mean2 = mean2.to(values.unit)
dispersion2 = dispersion2.to(values.unit)
errors = errors.to(values.unit)
# require positive dispersions
dispersion1 = np.abs(dispersion1)
dispersion2 = np.abs(dispersion2)
# likelihood of each data point
conv_dispersion1 = np.sqrt(dispersion1**2+errors**2)
conv_dispersion2 = np.sqrt(dispersion2**2+errors**2)
likelihoods = f*stats.norm.pdf(values, mean1, conv_dispersion1) \
+ (1-f)*stats.norm.pdf(values, mean2, conv_dispersion2)
# check that all are positive (should be!) and non-zero
if np.all(likelihoods<=0):
return -np.inf
# set zeros (or negatives) to the lowest non-zero value
likelihoods[likelihoods<=0] = likelihoods[likelihoods>0].min()*1e-5
# and take the log
ln_likelihoods = np.log(likelihoods)
# multiply by weights:
if weights is not None:
ln_likelihoods *= weights
# remove -infinities
ln_likelihoods[ln_likelihoods==-np.inf] \
= ln_likelihoods[ln_likelihoods>-np.inf].min()
# total likelihood
total_ln_likelihood = np.sum(ln_likelihoods)
# renormalise by weights
if weights is not None:
total_ln_likelihood *= np.size(ln_likelihoods)/np.sum(weights)
return total_ln_likelihood
|
a387d0f8c52b380c57c4cd86ba06111c187db7b8
| 3,645,566
|
import urllib
def moleculeEntry(request, adjlist):
"""
Returns an html page which includes the image of the molecule
and its corresponding adjacency list/SMILES/InChI, as well
as molecular weight info and a button to retrieve thermo data.
Basically works as an equivalent of the molecule search function.
"""
adjlist = urllib.parse.unquote(adjlist)
try:
molecule = Molecule().from_adjacency_list(adjlist)
except:
return HttpResponseBadRequest('<h1>Bad Request (400)</h1><p>Invalid adjacency list.</p>')
structure = getStructureInfo(molecule)
mol_weight = molecule.get_molecular_weight()
old_adjlist = ''
try:
old_adjlist = molecule.to_adjacency_list(remove_h=True, old_style=True)
except:
pass
smiles = ''
try:
smiles = molecule.to_smiles()
except ValueError:
pass
inchi = ''
try:
inchi = molecule.to_inchi()
except ValueError:
pass
return render(request, 'moleculeEntry.html',
{'structure': structure,
'smiles': smiles,
'adjlist': adjlist,
'mol_weight': mol_weight,
'old_adjlist': old_adjlist})
|
6a53812894b7150fc76444238597e8038f8ffa0c
| 3,645,567
|
def has_user_id(id: int):
"""Checks if the Command Author's ID is the same as the ID passed into the function"""
def predicate(ctx) -> bool:
if ctx.author.id == id:
return True
raise MissingID(id, "Author")
return commands.check(predicate)
|
e83fff93f6ef3ebc06ebadc3470b1e74a18b3a39
| 3,645,568
|
def _polar_gbps(out, in_args, params, per_iter=False):
""" `speed_function` for `benchmark` estimating the effective bandwidth
of a polar decomposition in GB/s. The number of elements is estimated as
2 * the size of the input.
For a matrix multiplication of dimensions `m, n, k` that
took `dt` seconds, we define
`GB/s := (GB of input + GB of output) / (1E9 * dt)`.
"""
out_rows, out_cols, dtype = params[:3]
if out_cols is None:
out_cols = out_rows
dt = out[0]
n_elements = 2 * out_rows * out_cols
result = benchmark_utils.gbps(n_elements, dtype, dt)
header = "GB/s"
return benchmark_utils.per_iter(per_iter, out[-1], result, header)
|
a0428dfc9df6c4dd7f9d25712c3894d71bcd1700
| 3,645,569
|
def is_FreeMonoid(x):
"""
Return True if `x` is a free monoid.
EXAMPLES::
sage: from sage.monoids.free_monoid import is_FreeMonoid
sage: is_FreeMonoid(5)
False
sage: is_FreeMonoid(FreeMonoid(7,'a'))
True
sage: is_FreeMonoid(FreeAbelianMonoid(7,'a'))
False
sage: is_FreeMonoid(FreeAbelianMonoid(0,''))
False
"""
return isinstance(x, FreeMonoid_class)
|
d4fae223bcdec1f365406b9fb3c546f56db38565
| 3,645,570
|
import requests
def get_quote_data(ticker):
"""Inputs: @ticker
Returns a dictionary containing over 70 elements corresponding to the
input ticker, including company name, book value, moving average data,
pre-market / post-market price (when applicable), and more."""
site = "https://query1.finance.yahoo.com/v7/finance/quote?symbols=" + ticker
resp = requests.get(site)
if not resp.ok:
raise AssertionError(
"""Invalid response from server. Check if ticker is valid."""
)
json_result = resp.json()
info = json_result["quoteResponse"]["result"]
return info[0]
|
a23d7e091547ceca3c66f0ae90e84ea9f89d4e1c
| 3,645,571
|
from typing import Dict
import click
def _import_stack_component(
component_type: StackComponentType, component_config: Dict[str, str]
) -> str:
"""import a single stack component with given type/config"""
component_type = StackComponentType(component_type)
component_name = component_config.pop("name")
component_flavor = component_config.pop("flavor")
# make sure component can be registered, otherwise ask for new name
while True:
# check if component already exists
try:
other_component = _get_component_as_dict(
component_type, component_name
)
# component didn't exist yet, so we create it.
except KeyError:
break
# check whether other component has exactly same config as export
other_is_same = True
for key, value in component_config.items():
if key not in other_component or other_component[key] != value:
other_is_same = False
break
# component already exists and is correctly configured -> done
if other_is_same:
return component_name
# component already exists but with different config -> rename
display_name = _component_display_name(component_type)
component_name = click.prompt(
f"A component of type '{display_name}' with the name "
f"'{component_name}' already exists, "
f"but is configured differently. "
f"Please choose a different name.",
type=str,
)
_register_stack_component(
component_type=component_type,
component_name=component_name,
component_flavor=component_flavor,
**component_config,
)
return component_name
|
ec03abab6b005f5047dd7963ab93b83f4f891140
| 3,645,572
|
def unpack_range(a_range):
"""Extract chromosome, start, end from a string or tuple.
Examples::
"chr1" -> ("chr1", None, None)
"chr1:100-123" -> ("chr1", 99, 123)
("chr1", 100, 123) -> ("chr1", 100, 123)
"""
if not a_range:
return Region(None, None, None)
if isinstance(a_range, basestring):
if ':' in a_range and '-' in a_range:
return from_label(a_range, keep_gene=False)
return Region(a_range, None, None)
if isinstance(a_range, (list, tuple)):
if len(a_range) == 3:
return Region(*a_range)
elif len(a_range) == 4:
return Region(*a_range[:3])
raise ValueError("Not a range: %r" % a_range)
|
f44b6069eb5e0fc8c85f01d5cbe708667a09a005
| 3,645,573
|
import torch
def _old_extract_roles(x, roles):
"""
x is [N, B, R, *shape]
roles is [N, B]
"""
N, B, R, *shape = x.shape
assert roles.shape == (N, B)
parts = []
for n in range(N):
parts.append(x[n:n+1, range(B), roles[n]])
return torch.cat(parts, dim=0)
|
07a7be138558baa28ab1a10e2be2c7f17501ae96
| 3,645,574
|
import os
def read_cam_intr(file_path):
"""Reading camera intrinsic.
Args:
file_path (str): File path.
Return:
k (numpy.array): Camera intrinsic matrix, dim = (3, 3).
"""
assert os.path.exists(file_path), '{}: file not found'.format(file_path)
f = open(file_path, 'r')
k_str = f.readlines()[0].strip()
k_str_list = k_str.split(',')
k = np.array(k_str_list).astype(np.float)
return k.reshape((3,3))
|
431569a6e1c061214546f89962a2c5ed8c90213d
| 3,645,575
|
def setup(i):
"""
See "install" API with skip_process=yes
"""
i['skip_process']='yes'
return install(i)
|
d4478adc27e444ac43dc9b4c8cd999157555c831
| 3,645,576
|
import requests
def post_merge_request(profile, payload):
"""Do a POST request to Github's API to merge.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
payload
A dict of information to pass to Github's API as the payload for
a merge request, something like this::
{ "base": <base>, "head": <head>, "commit_message": <mesg>}
Returns:
The response returned by the ``requests`` library when it does the
POST request.
"""
repo = profile["repo"]
url = GITHUB_API_BASE_URL + "repos/" + repo + "/merges"
headers = get_headers(profile)
response = requests.post(url, json=payload, headers=headers)
return response
|
26131ac3dc078a9e33b7b2b785a71c51ec1d9072
| 3,645,577
|
def is_valid_table_name(cur, table_name):
"""
Checks whether a name is for a table in the database.
Note: Copied from utils.database for use in testing, to avoid
a circular dependency between tests and implementation.
Args:
cur: sqlite3 database cursor object
table_name (str): name to check
Returns:
True if valid, False otherwise
"""
query = """
SELECT 1
FROM sqlite_master
WHERE type == 'table'
AND name == ?
"""
res = cur.execute(query, (table_name,))
return res.fetchone() is not None
|
f1efc66220baa215a73f374da19842ab38c619be
| 3,645,578
|
def create_mssql_pyodbc(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a mssql database using pyodbc.
"""
return create_engine(
_create_mssql_pyodbc(username, password, host, port, database),
**kwargs
)
|
dac74a0c32f1c693eb059d6a61f84d2288651969
| 3,645,579
|
def _wait_for_multiple(driver,
locator_type,
locator,
timeout,
wait_for_n,
visible=False):
"""Waits until `wait_for_n` matching elements to be present (or visible).
Returns located elements when found.
Args:
driver: Selenium web driver instance
locator_type: type of locator (e.g. By.CSS_SELECTOR or By.TAG_NAME)
locator: name of tag, class, etc. to wait for
timeout: how long to wait for presence/visibility of element
wait_for_n: wait until this number of matching elements are present/visible
visible: if True, require that elements are not only present, but visible
"""
wait = WebDriverWait(driver, timeout)
def multiple_found(driver):
elements = driver.find_elements(locator_type, locator)
if visible:
elements = [e for e in elements if e.is_displayed()]
if len(elements) < wait_for_n:
return False
return elements
return wait.until(multiple_found)
|
d96c10d95877d699f8b284ea41e8b8ef5aebbf3c
| 3,645,580
|
def relu(x):
"""
x -- Output of the linear layer, of any shape
Returns:
Vec -- Post-activation parameter, of the same shape as Z
cash -- for computing the backward pass efficiently
"""
Vec = np.maximum(0, x)
assert(Vec.shape == x.shape)
cash = x
return Vec, cash
|
1d94d3008aca7ab613dfa92504061264111f1c28
| 3,645,581
|
def _declare_swiftdoc(
*,
actions,
arch,
label_name,
output_discriminator,
swiftdoc):
"""Declares the swiftdoc for this Swift framework.
Args:
actions: The actions provider from `ctx.actions`.
arch: The cpu architecture that the generated swiftdoc belongs to.
label_name: Name of the target being built.
output_discriminator: A string to differentiate between different target intermediate files
or `None`.
swiftdoc: A File referencing the swiftdoc file from a SwiftInfo provider.
Returns:
A File referencing the intermediate swiftdoc.
"""
bundle_doc = intermediates.file(
actions = actions,
target_name = label_name,
output_discriminator = output_discriminator,
file_name = "{}.swiftdoc".format(arch),
)
actions.symlink(
target_file = swiftdoc,
output = bundle_doc,
)
return bundle_doc
|
589828527b8fe775aafca8fb1bee677d716a88c6
| 3,645,582
|
def threshold_image(gray_image, name_bw, threshold):
"""
This computes the binary image of the input image using a threshold
:param gray_image: input image
:param threshold: input threshold
:param name_bw: name of the binary image
:return: BW image
"""
# perform Gaussian blurring to remove unwanted noisy components
blurred = cv2.GaussianBlur(gray_image, (5, 5), 0)
# convert the smooth image into a bw image
thresh = cv2.threshold(blurred, threshold, 255, cv2.THRESH_BINARY)[1]
# perform morphological operation to remove small components
thresh = cv2.erode(thresh, None, iterations=1)
thresh = cv2.dilate(thresh, None, iterations=1)
# store the bw image
cv2.imwrite("threshold_" + name_bw, thresh)
return thresh
|
98c14281a322b110594e12a4e2b10016a8d6533f
| 3,645,583
|
def kalman_update(
states,
upper_chols,
loadings,
control_params,
meas_sd,
measurements,
controls,
log_mixture_weights,
debug,
):
"""Perform a Kalman update with likelihood evaluation.
Args:
states (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states) with
pre-update states estimates.
upper_chols (jax.numpy.array): Array of shape (n_obs, n_mixtures, n_states,
n_states) with the transpose of the lower triangular cholesky factor
of the pre-update covariance matrix of the state estimates.
loadings (jax.numpy.array): 1d array of length n_states with factor loadings.
control_params (jax.numpy.array): 1d array of length n_controls.
meas_sd (float): Standard deviation of the measurement error.
measurements (jax.numpy.array): 1d array of length n_obs with measurements.
May contain NaNs if no measurement was observed.
controls (jax.numpy.array): Array of shape (n_obs, n_controls) with data on the
control variables.
log_mixture_weights (jax.numpy.array): Array of shape (n_obs, n_mixtures) with
the natural logarithm of the weights of each element of the mixture of
normals distribution.
debug (bool): If true, the debug_info contains the residuals of the update and
their standard deviations. Otherwise, it is an empty dict.
Returns:
states (jax.numpy.array): Same format as states.
new_states (jax.numpy.array): Same format as states.
new_upper_chols (jax.numpy.array): Same format as upper_chols
new_log_mixture_weights: (jax.numpy.array): Same format as log_mixture_weights
new_loglikes: (jax.numpy.array): 1d array of length n_obs
debug_info (dict): Empty or containing residuals and residual_sds
"""
n_obs, n_mixtures, n_states = states.shape
not_missing = jnp.isfinite(measurements)
# replace missing measurements and controls by reasonable fill values to avoid NaNs
# in the gradient calculation. All values that are influenced by this, are
# replaced by other values later. Choosing the average expected
# expected measurements without controls as fill value ensures that all numbers
# are well defined because the fill values have a reasonable order of magnitude.
# See https://github.com/tensorflow/probability/blob/main/discussion/where-nan.pdf
# and https://jax.readthedocs.io/en/latest/faq.html
# for more details on the issue of NaNs in gradient calculations.
_safe_controls = jnp.where(not_missing.reshape(n_obs, 1), controls, 0)
_safe_expected_measurements = jnp.dot(states, loadings) + jnp.dot(
_safe_controls, control_params
).reshape(n_obs, 1)
_safe_measurements = jnp.where(
not_missing, measurements, _safe_expected_measurements.mean(axis=1)
)
_residuals = _safe_measurements.reshape(n_obs, 1) - _safe_expected_measurements
_f_stars = jnp.dot(upper_chols, loadings.reshape(n_states, 1))
_m = jnp.zeros((n_obs, n_mixtures, n_states + 1, n_states + 1))
_m = _m.at[..., 0, 0].set(meas_sd)
_m = _m.at[..., 1:, :1].set(_f_stars)
_m = _m.at[..., 1:, 1:].set(upper_chols)
_r = array_qr_jax(_m)[1]
_new_upper_chols = _r[..., 1:, 1:]
_root_sigmas = _r[..., 0, 0]
_abs_root_sigmas = jnp.abs(_root_sigmas)
# it is important not to divide by the absolute value of _root_sigmas in order
# to recover the sign of the Kalman gain.
_kalman_gains = _r[..., 0, 1:] / _root_sigmas.reshape(n_obs, n_mixtures, 1)
_new_states = states + _kalman_gains * _residuals.reshape(n_obs, n_mixtures, 1)
# calculate log likelihood per individual and update mixture weights
_loglikes_per_dist = jax.scipy.stats.norm.logpdf(_residuals, 0, _abs_root_sigmas)
if n_mixtures >= 2:
_weighted_loglikes_per_dist = _loglikes_per_dist + log_mixture_weights
_loglikes = jax.scipy.special.logsumexp(_weighted_loglikes_per_dist, axis=1)
_new_log_mixture_weights = _weighted_loglikes_per_dist - _loglikes.reshape(
-1, 1
)
else:
_loglikes = _loglikes_per_dist.flatten()
_new_log_mixture_weights = log_mixture_weights
# combine pre-update quantities for missing observations with updated quantities
new_states = jnp.where(not_missing.reshape(n_obs, 1, 1), _new_states, states)
new_upper_chols = jnp.where(
not_missing.reshape(n_obs, 1, 1, 1), _new_upper_chols, upper_chols
)
new_loglikes = jnp.where(not_missing, _loglikes, 0)
new_log_mixture_weights = jnp.where(
not_missing.reshape(n_obs, 1), _new_log_mixture_weights, log_mixture_weights
)
debug_info = {}
if debug:
residuals = jnp.where(not_missing.reshape(n_obs, 1), _residuals, jnp.nan)
debug_info["residuals"] = residuals
residual_sds = jnp.where(
not_missing.reshape(n_obs, 1), _abs_root_sigmas, jnp.nan
)
debug_info["residual_sds"] = residual_sds
debug_info["log_mixture_weights"] = new_log_mixture_weights
return (
new_states,
new_upper_chols,
new_log_mixture_weights,
new_loglikes,
debug_info,
)
|
d44eb03c9b99e288abf0103abfdbf83eec7f9df2
| 3,645,584
|
import re
def sub_repeatedly(pattern, repl, term):
"""apply sub() repeatedly until no change"""
while True:
new_term = re.sub(pattern, repl, term)
if new_term == term:
return term
term = new_term
|
e57c648fb057f81e35e0fc2d2dc57edd0b400baf
| 3,645,585
|
from typing import Tuple
def _dtw(distance_matrix: np.ndarray, gully: float = 1., additive_penalty: float = 0.,
multiplicative_penalty: float = 1.) -> Tuple[np.ndarray, np.ndarray, float]:
"""
Compute the dynamic time warping distance between two sequences given a distance matrix.
DTW score of lowest cost path through the distance matrix, including penalties.
:param distance_matrix: Distances between two sequences
:param gully: Sequences must match up to this proportion of the shorter sequence.
Default value is 1, which means that the entirety of the shorter sequence must be matched to a part of the
longer sequence.
:param additive_penalty: Additive penalty for non-diagonal moves.
Default value is 0, which means no penalty.
:param multiplicative_penalty: Multiplicative penalty for non-diagonal moves.
Default value is 1, which means no penalty.
:return: Lowest cost path through the distance matrix. Penalties are included, the score is not yet normalized.
"""
if np.isnan(distance_matrix).any():
raise ValueError('NaN values found in distance matrix.')
distance_matrix = distance_matrix.copy()
# Pre-allocate path length matrix
traceback = np.empty(distance_matrix.shape, np.uint8)
# Populate distance matrix with lowest cost path
_dtw_core(distance_matrix, additive_penalty, multiplicative_penalty, traceback)
if gully < 1.:
# Allow the end of the path to start within gully percentage of the smaller distance matrix dimension
gully = int(gully * min(distance_matrix.shape))
else:
# When gully is 1 require matching the entirety of the smaller sequence
gully = min(distance_matrix.shape) - 1
# Find the indices of the smallest costs on the bottom and right edges
i = np.argmin(distance_matrix[gully:, -1]) + gully
j = np.argmin(distance_matrix[-1, gully:]) + gully
# Choose the smaller cost on the two edges
if distance_matrix[-1, j] > distance_matrix[i, -1]:
j = distance_matrix.shape[1] - 1
else:
i = distance_matrix.shape[0] - 1
# Score is the final score of the best path
score = float(distance_matrix[i, j])
# Pre-allocate the x and y path index arrays
x_indices = np.zeros(sum(traceback.shape), dtype=np.int)
y_indices = np.zeros(sum(traceback.shape), dtype=np.int)
# Start the arrays from the end of the path
x_indices[0] = i
y_indices[0] = j
# Keep track of path length
n = 1
# Until we reach an edge
while i > 0 and j > 0:
# If the tracback matrix indicates a diagonal move...
if traceback[i, j] == 0:
i = i - 1
j = j - 1
# Horizontal move...
elif traceback[i, j] == 1:
i = i - 1
# Vertical move...
elif traceback[i, j] == 2:
j = j - 1
# Add these indices into the path arrays
x_indices[n] = i
y_indices[n] = j
n += 1
# Reverse and crop the path index arrays
x_indices = x_indices[:n][::-1]
y_indices = y_indices[:n][::-1]
return x_indices, y_indices, score
|
388b070d4bd2bbca42371b85d27f0807f86ae09b
| 3,645,586
|
async def _ensure_meadowgrid_security_groups() -> str:
"""
Creates the meadowgrid coordinator security group and meadowgrid agent security
group if they doesn't exist. The coordinator security group allows meadowgrid agents
and the current ip to access the coordinator, as well as allowing the current ip to
ssh. See also _ensure_meadowgrid_agent_security_group.
"""
current_ip_for_ssh = await _get_current_ip_for_ssh()
# allow meadowgrid traffic from the meadowgrid agent security group
agent_security_group_id = ensure_security_group(
_MEADOWGRID_AGENT_SECURITY_GROUP, [(22, 22, f"{current_ip_for_ssh}/32")], []
)
return ensure_security_group(
_MEADOWGRID_COORDINATOR_SECURITY_GROUP,
[
(22, 22, f"{current_ip_for_ssh}/32"),
(
DEFAULT_COORDINATOR_PORT,
DEFAULT_COORDINATOR_PORT,
f"{current_ip_for_ssh}/32",
),
],
[(DEFAULT_COORDINATOR_PORT, DEFAULT_COORDINATOR_PORT, agent_security_group_id)],
)
|
b0fc2c0e1fb767c5cfbb365d0c58cf39d327caf3
| 3,645,587
|
def _create_pairs_numba(
to_match, indexer, first_stage_cum_probs, group_codes_per_individual, seed
):
"""
Args:
to_match (np.ndarry): 2d boolean array with one row per individual
and one column sub-contact model.
indexer (numba.List): Numba list that maps id of county to a numpy array
with the row positions of all individuals from that county.
first_stage_cum_probs(numpy.ndarray): Array of shape n_group, n_groups.
cum_probs[i, j] is the probability that an individual from group i
meets someone from group j or lower.
group (np.ndarray): 1d array with assortative matching group ids,
coded as integers.
Returns:
pairs_of_workers (np.ndarray): 2d integer array with meeting ids.
"""
np.random.seed(seed)
unique_group_codes = np.arange(len(first_stage_cum_probs))
to_match = to_match.copy()
out = np.full(to_match.shape, -1)
n_obs, n_models = to_match.shape
for m in range(n_models):
meeting_id = 0
for i in range(n_obs):
if to_match[i, m]:
group_i = group_codes_per_individual[i]
group_j = choose_other_group(
unique_group_codes, first_stage_cum_probs[group_i]
)
group_j_indices = indexer[group_j]
weights = to_match[group_j_indices, m].astype(np.float64)
j = choose_other_individual(group_j_indices, weights)
if j != -1:
to_match[i, m] = False
to_match[j, m] = False
out[i, m] = meeting_id
out[j, m] = meeting_id
meeting_id += 1
return out
|
5c7bed67a644104dc7b22b79d3858fc5e27cf14d
| 3,645,588
|
def test_set_attr():
"""
Tests that generate_schema returns a schema that has the ability to
set instance variables based on keys of different format in the
dictionary provided in schema.load(d)
"""
class TestObject(object):
def __init__(self):
super().__init__()
self.int_a = 0
self.int_b = 0
@classmethod
def get_fields(cls):
return ["int_a", "int_b"]
def __new__(cls, *args, **kwargs):
instance = super().__new__(cls, *args, **kwargs)
return instance
s = schema.generate_schema(TestObject)
# print(s)
obj = s.load({
"intA": 1,
"intB": 2
})
assert obj.int_a == 1
assert obj.int_b == 2
# TODO: test that no extraneous attributes are set
|
7b12cef9864f6e9d8e6a9d18cb486bbce8812c5e
| 3,645,589
|
def filter_known_bad(orbit_points):
"""
Filter some commands that are known to be incorrect.
"""
ops = orbit_points
bad = np.zeros(len(orbit_points), dtype=bool)
bad |= (ops['name'] == 'OORMPEN') & (ops['date'] == '2002:253:10:08:52.239')
bad |= (ops['name'] == 'OORMPEN') & (ops['date'] == '2004:010:10:00:00.000')
return orbit_points[~bad]
|
c8f64b541be5d2f7fce3554dc83c7f36ee8bc0a4
| 3,645,590
|
def create_lexicon(word_tags):
"""
Create a lexicon in the right format for nltk.CFG.fromString() from
a list with tuples with words and their tag.
"""
# dictionary to filter the double tags
word_dict = {}
for word, tag in word_tags:
if tag not in word_dict:
word_dict[tag] = {word}
else:
word_dict[tag].add(word)
# PRO is the tag for 's, but the 's is not removed on nouns.
word_dict['NN'] = [x.replace('\'s', '') for x in word_dict['NN']]
word_dict['JJ'] = [x.replace('\'s', '') for x in word_dict['JJ']]
del word_dict[',']
word_dict['PRP'].update(word_dict['PRP$'])
del word_dict['PRP$']
word_dict['POS'] = ['"s']
# convert the dictionary to the right NLTK format
lexicon = ''
for key, val in word_dict.items():
lexicon += key + ' -> '
# add ' ' around every word
val = [f'\'{v}\'' for v in val]
# the words are seperated by a pipe
lexicon += ' | '.join(val) + '\n'
return lexicon
|
3a91671d559f5924ec9326520db6e11a1672fee4
| 3,645,591
|
import glob
import os
import re
import requests
from datetime import datetime
import shutil
def process_datasets_metadata(input_file=None, dryrun=True, staging=True, sample=0, report=False, memory='4g', rmlstreamer_run=False):
"""Read a RDF metadata file with infos about datasets, check if the dataset exist in the project SPARQL endpoint
Download the data if new"""
# If no metadata file provided, we search for one in the current folder
if not input_file:
# Search for ttl metadata file
file_list = glob.glob('*.ttl')
if len(file_list) > 1:
raise Exception("More than 1 metadata file have been found in the current folder: " + ', '.join(file_list))
elif len(file_list) < 1:
# Search for jsonld metadata file if no ttl
jsonld_file_list = glob.glob('*.jsonld')
if len(jsonld_file_list) > 1:
raise Exception("More than 1 metadata file have been found in the current folder: " + ', '.join(jsonld_file_list))
elif len(jsonld_file_list) < 1:
raise Exception("No ttl or jsonld metadata file has been found in the current folder")
else:
input_file = jsonld_file_list[0]
else:
input_file = file_list[0]
print("🔎 Reading the metadata file " + input_file)
os.makedirs('data/input', exist_ok=True)
os.makedirs('output', exist_ok=True)
# Retrieve the infos about files to download from the dataset metadata file
g = Graph()
g.parse(input_file, format=get_parse_format(input_file))
download_file_list = []
datasets_described = set()
i = 0
for subject, download_predicate, download_files_uri in g.triples((None, D2S.downloadFiles, None)):
datasets_described.add(subject)
download_file_list.append({})
download_file_list[i]['downloadUrl'] = str(g.value(download_files_uri, DCAT.downloadURL))
if (download_files_uri, D2S.downloadScript, None) in g:
download_file_list[i]['downloadScript'] = str(g.value(download_files_uri, D2S.downloadScript))
if (download_files_uri, D2S.postProcessScript, None) in g:
download_file_list[i]['postProcessScript'] = str(g.value(download_files_uri, D2S.postProcessScript))
if (download_files_uri, D2S.processedFilename, None) in g:
download_file_list[i]['processedFilename'] = str(g.value(download_files_uri, D2S.processedFilename))
i += 1
# Retrieve the dataset URI and various params in the dataset metadata file
if len(datasets_described) < 1:
raise Exception("No dataset has been found in the metadata file")
elif len(datasets_described) > 1:
raise Exception("More than 1 dataset has been found in the metadata file")
else:
dataset_uri = datasets_described.pop()
if (dataset_uri, DC.identifier, None) in g:
dataset_id_cap = str(g.value(dataset_uri, DC.identifier))
dataset_id = dataset_id_cap.lower()
else:
raise Exception("Could not find the dc:identifier property for the dataset in the metadata file")
if (dataset_uri, D2S.processor, None) in g:
processor = str(g.value(dataset_uri, D2S.processor))
else:
processor = 'rmlmapper-java'
if processor.lower() == 'rmlstreamer':
if not rmlstreamer_run:
print('📤 Copying mappings to the RMLStreamer.')
# Make sure the YARRRML mappings on the DSRI RMLStreamer are up to date
rmlstreamer_dataset_path = '/mnt/datasets/' + dataset_id_cap + '/'
oc_cp_cmd = 'oc cp *.yarrr.yml $(oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name):' + rmlstreamer_dataset_path
os.system(oc_cp_cmd)
oc_cp_cmd = 'oc cp *.jsonld $(oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name):' + rmlstreamer_dataset_path
os.system(oc_cp_cmd)
oc_cp_cmd = 'oc cp prepare.sh $(oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name):' + rmlstreamer_dataset_path
os.system(oc_cp_cmd)
# Run this same function directly in the RMLStreamer
print('☁️ Running the process in the RMLStreamer.')
run_d2s_cmd = '"cd ' + rmlstreamer_dataset_path + ' && d2s run --rmlstreamer"'
rmlstreamer_cmd = 'oc exec $(oc get pod --selector app=flink --selector component=jobmanager --no-headers -o=custom-columns=NAME:.metadata.name) -- bash -c ' + run_d2s_cmd
print(rmlstreamer_cmd)
os.system(rmlstreamer_cmd)
# return process_datasets_metadata(input_file, dryrun, sample, report, memory, True)
return None
if (dataset_uri, D2S.rdfSyntax, None) in g:
rdfSyntax = str(g.value(dataset_uri, D2S.rdfSyntax))
else:
rdfSyntax = 'turtle'
if rdfSyntax == 'ntriples':
output_file_extension = '.nt'
output_file_mimetype = 'application/n-triples'
else:
output_file_extension = '.ttl'
output_file_mimetype = 'text/turtle'
if (dataset_uri, D2S.versionRegex, None) in g:
versionRegex = str(g.value(dataset_uri, D2S.versionRegex))
else:
versionRegex = None
prod_endpoint = get_yaml_config('production')['sparql-endpoint']
prod_ldp = get_yaml_config('production')['virtuoso-ldp-url']
staging_endpoint = get_yaml_config('staging')['sparql-endpoint']
if 'virtuoso-ldp-url' in get_yaml_config('staging').keys():
staging_ldp = get_yaml_config('staging')['virtuoso-ldp-url']
endpoint_user = os.getenv('DBA_USER', 'dav')
endpoint_password = os.getenv('DBA_PASSWORD')
# TODO: Get lastUpdated date and version infos from the production endpoint
# date_last_updated = None
# if prod_endpoint:
# print('Querying the SPARQL endpoint ' + prod_endpoint + ' to retrieve version infos for the dataset ' + dataset_uri)
# query = """PREFIX d2s: <https://w3id.org/d2s/vocab/>
# PREFIX pav: <http://purl.org/pav/>
# SELECT ?lastUpdated WHERE {
# <""" + str(dataset_uri) + """> pav:lastUpdateOn ?lastUpdated .
# }
# """
# # query = """SELECT * WHERE {
# # ?s ?p ?o .
# # } LIMIT 10
# # """
# sparql = SPARQLWrapper(prod_endpoint)
# sparql.setReturnFormat(JSON)
# sparql.setQuery(query)
# results = sparql.query().convert()
# print('SPARQLWrapper Results:')
# print(results["results"]["bindings"])
# last_updated = results["results"]["bindings"]["lastUpdated"]["value"]
# date_last_updated = parsedate(last_updated)
# print(results["results"]["bindings"]["lastUpdated"]["value"])
# else:
# print('No SPARQL endpoint associated, running the download without checking if the graphs need to be updated')
print('\n🗃️ Checking files to download: \n')
# Download if last modified date is later than last updated date (or if modified/updated date could not be fetched)
# file_time = datetime.fromtimestamp(os.path.getmtime(dstFile))
# if not date_last_modified or not date_last_updated or date_last_modified > date_last_updated:
# Download file in the data subfolder
os.chdir('data')
skip_global_download = True
# Check last modified date, then download and post process scripts defined for each file
for ddl_file in download_file_list:
ddl_url = ddl_file['downloadUrl']
# # Extract filename from URI:
# ddl_filename = os.path.basename(urlparse(ddl_url).path)
processed_filename = ddl_file['processedFilename']
if versionRegex:
# TODO: Extract version, then increment it
# and check if new version available
version_search = re.search(versionRegex, ddl_url, re.IGNORECASE)
if version_search:
file_version = version_search.group(1)
print(file_version)
skip_download = True
# Check Last Modified date of the URL to download
print('🔎 Checking Last Modified date of file at ' + ddl_url)
r = requests.head(ddl_url)
if 'last-modified' in r.headers.keys():
url_last_modified = r.headers['last-modified']
ddl_file['lastModified'] = parsedate(url_last_modified)
print('📅 File to download last modified on ' + url_last_modified)
## Check if last date updated from SPARQL endpoint is older than the URL Last Modified date
# if ddl_file['lastModified'] > date_last_updated:
# print('📥 According to Last Modified date, the remote file to download is newer than the existing local file (' + str(local_file_time) + '). Downloading it.')
# skip_download = False
# skip_global_download = False
# elif os.path.exists(processed_filename):
# Download only if processed file does not exist, is older than the file to ddl,
# or if the file to ddl has no LastModified date
if os.path.exists(processed_filename):
# If the file to download is newer than existing local file
if 'lastModified' in ddl_file.keys():
local_file_time = datetime.fromtimestamp(os.path.getmtime(processed_filename), timezone.utc)
if ddl_file['lastModified'] > local_file_time:
print('📥 According to Last Modified date, the remote file to download is newer than the existing local file (' + str(local_file_time) + '). Downloading it.')
skip_download = False
skip_global_download = False
else:
print('⏩️ According to Last Modified date, the remote file to download is not newer than the existing local file at data/' + processed_filename + ' (' + str(local_file_time) + '). Skipping download.')
else:
print('📥 No Last Modified date for this file. Downloading it.')
skip_download = False
skip_global_download = False
else:
print('📥 No existing local file for this file. Downloading it.')
skip_download = False
skip_global_download = False
# Run the download and preprocess scripts if download required
if not skip_download:
if 'downloadScript' in ddl_file:
execute_script(ddl_file['downloadScript'])
elif 'downloadUrl' in ddl_file:
os.system("wget -qN " + ddl_url)
if 'postProcessScript' in ddl_file:
execute_script(ddl_file['postProcessScript'])
print('')
# Run download and post process scripts defined for the whole dataset if at least one file has been downloaded
if not skip_global_download:
if (dataset_uri, D2S.downloadScript, None) in g:
execute_script(str(g.value(dataset_uri, D2S.downloadScript)))
if (dataset_uri, D2S.postProcessScript, None) in g:
execute_script(str(g.value(dataset_uri, D2S.postProcessScript)))
else:
print('⏩️ No dataset has been downloaded, skipping global post processing.')
print('')
# TODO: Create a HTML report about input CSV data with Datapane
# import datapane as dp
# if report:
# print('📋 Produce HTML report for CSV files in data folder with datapane')
# for ddl_file in download_file_list:
# processed_filename = ddl_file['processedFilename']
# if processed_filename.endswith('.csv'):
# df = pd.read_csv(processed_filename)
# dp.Report(
# dp.Text('## ' + processed_filename),
# dp.DataTable(df)
# ).save(path='report-' + processed_filename.replace('.csv', '') + '.html',
# open=True, formatting=dp.ReportFormatting(width=dp.ReportWidth.FULL))
## Automatically unzip files, to be done ad-hoc in prepare.sh?
# print("""find . -name "*.tar.gz" -exec tar -xzvf {} \;""")
# if len(glob.glob('*.zip')) > 0:
# print('Unzipping .zip files ' + ', '.join(glob.glob('*.zip')))
# os.system('unzip -o "*.zip"')
# if len(glob.glob('*.tar.gz')) > 0:
# print('Unzipping .tar.gz files ' + ', '.join(glob.glob('*.tar.gz')))
# os.system("""find . -name "*.tar.gz" -exec tar -xzvf {} \;""")
# if len(glob.glob('*.gz')) > 0:
# print('Unzipping .gz files ' + ', '.join(glob.glob('*.gz')))
# os.system('gzip -f -d *.gz')
# Check for .tsv .txt and .tab then convert to CSV (required for most RML engines)
# tab_files = glob.glob('*.tsv') + glob.glob('*.txt') + glob.glob('*.tab')
# for tsv_file in tab_files:
# csv_file = tsv_file[:-4] + '.csv'
# print('📄 Converting TSV file '+ tsv_file + ' to CSV ' + csv_file)
# try:
# tsv_to_csv_cmd = """sed -e 's/"//g' -e 's/\\t/","/g' -e 's/^/"/' -e 's/$/"/' -e 's/\\r//' """ + tsv_file + """ > """ + csv_file
# os.system(tsv_to_csv_cmd)
# # csv_table=pd.read_table(tsv_file,sep='\t')
# # csv_table.to_csv(csv_file, index=False)
# except Exception as e:
# print('Could not convert the file ' + tsv_file + ' to CSV')
# Create sample for CSV files
if sample > 0:
for csv_file in glob.glob('*.csv'):
print('✂️ Creating a sample file with ' + str(sample) + ' lines for ' + csv_file)
# if not os.path.exists(filename):
full_csv_file = csv_file + '.full'
shutil.copy(csv_file, full_csv_file)
sample_cmd = 'head -n ' + str(sample) + ' ' + full_csv_file + ' > ' + csv_file
os.system(sample_cmd)
# Go back to dataset folder to convert YARRML files
os.chdir('..')
# For each YARRRML mappings: convert to RML and run mapper
for file in glob.glob('*.yarrr.yml'):
yarrrml_filename = os.fsdecode(file)
rml_filename = yarrrml_filename.replace('.yarrr.yml', '.rml.ttl')
print('🦜 Converting YARRRML mapping '+ yarrrml_filename + ' to RML ' + rml_filename)
output_filepath = '../output/' + yarrrml_filename.replace('.yarrr.yml', output_file_extension)
os.system('yarrrml-parser -i ' + yarrrml_filename + ' -o data/' + rml_filename)
# Run RML mapper depending on processor given in the metadata file
if processor.lower() == 'rmlmapper-java':
print('☕️ Running the RML mapper with java to generate the RDF to ' + output_filepath.replace('../', ''))
init_d2s_java('rmlmapper')
# Change dir to fix issue with rmlmapper requiring to load a .dtd locally when reading DrugBank RML
os.chdir('data')
# Copy functions jar file in the same folder where we run the rmlmapper to fix issues with finding the functions
shutil.copy('../../IdsRmlFunctions.jar', 'IdsRmlFunctions.jar')
if 'memory' in get_yaml_config('resources').keys():
memory = get_yaml_config('resources')['memory']
java_opts = "-Xms" + memory + " -Xmx" + memory
rml_cmd = 'java ' + java_opts + ' -jar ' + get_base_dir('rmlmapper.jar') + ' -s ' + rdfSyntax + ' -f ../../functions_ids.ttl -m ' + rml_filename + ' -o ' + output_filepath
os.system(rml_cmd)
os.chdir('..')
# if processor.lower() == 'rmlstreamer':
if rmlstreamer_run:
print('🐿️ Running the RMLStreamer')
rmlstreamer_dataset_path = os.getcwd()
parallel_cores = str(get_yaml_config('resources')['flink-cores'])
os.chdir('data')
rmlstreamer_cmd = '/opt/flink/bin/flink run -p ' + parallel_cores + ' -c io.rml.framework.Main /opt/flink/lib/RMLStreamer.jar toFile -m ' + rmlstreamer_dataset_path + '/data/' + rml_filename + ' -o ' + rmlstreamer_dataset_path + '/output/output-' + dataset_id + '.nt --job-name "RMLStreamer Bio2KG - ' + dataset_id + '"'
os.system(rmlstreamer_cmd)
os.chdir('..')
if processor.lower() == 'rocketrml':
print('🚀 Running RocketRML with NodeJS to generate the RDF to ' + output_filepath)
os.chdir('data')
nodejs_memory='2048'
if 'nodejs-memory' in get_yaml_config('resources').keys():
nodejs_memory = str(get_yaml_config('resources')['nodejs-memory'])
# Try to increase node memory to 2G for large files with --max_old_space_size=2048
os.system(f'node --max_old_space_size={nodejs_memory} ../../rocketrml.js -m {rml_filename} -o {output_filepath}')
os.chdir('..')
# TO CHECK: concatenate produced nt files in 1 file if multiple files
list_ntriples = glob.glob('output/*.nt')
if len(list_ntriples) > 1:
print('🗃️ Concatenate ntriples files: ' + ', '.join(list_ntriples))
output_filepath = 'output/' + dataset_id +'.nt'
if os.path.exists(output_filepath):
os.system('rm ' + output_filepath)
os.system('cat output/*.nt > ' + output_filepath)
os.system('ls output/*.nt | grep -v ' + output_filepath + ' | xargs rm')
# os.system('ls *.nt | grep -v ' + dataset_id + '.nt' + ' | parallel rm')
if dryrun:
print('✅ Dry run completed: RDF generated, but not published')
else:
if staging:
print('🧪 Publishing to staging endpoint')
update_endpoint = staging_endpoint
update_ldp = staging_ldp
else:
print('📰 Publishing the processed file to the production endpoint')
update_endpoint = prod_endpoint
update_ldp = prod_ldp
raise Exception("Publishing not implemented yet")
if (dataset_uri, D2S.graph, None) in g:
dataset_graph = str(g.value(dataset_uri, D2S.graph))
else:
dataset_graph = update_ldp + '/' + dataset_id
output_metadata_file = 'output/metadata.ttl'
metadata_graph = update_ldp + '/metadata-' + dataset_id
metadata_slug = 'metadata-' + dataset_id
if os.path.exists(output_metadata_file):
os.remove(output_metadata_file)
# os.system('rm ' + output_metadata_file)
if len(glob.glob('output/*.ttl')) > 1:
raise Exception("More than 1 turtle output file found. If you produce multiple files as output, use the rdfSyntax ntriples, so the output can be concatenated in one graph per dataset")
# TODO: once RDF ouput files generated, if new version and not dry run: load to production Virtuoso
# Otherwise load to staging Virtuoso and generate metadata
# TODO: do we want 1 graph per dataset or 1 graph per file? I would say 1 per dataset to improve metadata generation per graph
# print(update_endpoint)
# print(endpoint_user)
# print(endpoint_password)
# Iterates the output file to upload them to the Virtuoso LDP triplestore
# Should be only one turtle or ntriples file because the LDP create 1 graph per file
for output_file in glob.glob('output/*'):
# Load the RDF output file to the Virtuoso LDP DAV
# Existing file is overwritten automatically at upload
load_rdf_to_ldp(output_file, output_file_mimetype, update_ldp, dataset_id, endpoint_user, endpoint_password)
# TODO: then run d2s metadata to get HCLS metadata and upload it in the dataset metadata graph
# And compare new version metadata to the current version in production
# generate_hcls_from_sparql(sparql_endpoint, rdf_distribution_uri, metadata_type, graph)
g_metadata = generate_hcls_from_sparql(update_endpoint, dataset_graph, 'hcls', dataset_graph)
g_metadata.serialize(destination=output_metadata_file, format='turtle', indent=4)
load_rdf_to_ldp(output_metadata_file, "Accept: text/turtle", update_ldp, metadata_slug, endpoint_user, endpoint_password)
# TODO: handle dataset_version
print('✅ Dataset processed and loaded to ' + update_endpoint)
# Clear graph SPARQL query
# try:
# sparql = SPARQLWrapper(update_endpoint)
# sparql.setMethod(POST)
# # sparql.setHTTPAuth(BASIC) or DIGEST
# sparql.setCredentials(endpoint_user, endpoint_password)
# query = 'CLEAR GRAPH <' + dataset_graph + '>'
# print('🗑️ Clearing previous graph')
# sparql.setQuery(query)
# query_results = sparql.query()
# print(query_results.response.read())
# except:
# print('Could not delete the graph (probably it does not exist)')
# try:
# insert_results = insert_file_in_sparql_endpoint(file_path, sparql_endpoint, username, password, graph_uri, chunks_size)
# except Exception as e:
# print('Error with INSERT of file: ' + file_path)
# print(e)
|
16d54395cf7a37bcc98a56696bd6b942979f0140
| 3,645,592
|
def display_time(seconds, granularity=2):
"""Display time as a nicely formatted string"""
result = []
if seconds == 0:
return "0 second"
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
|
d8fe16585a66d085a08941b7b038d448fee23570
| 3,645,593
|
def reciprocal(x):
"""
Returns the reciprocal of x.
Args:
x (TensorOp): A tensor.
Returns:
TensorOp: The reciprocal of x.
"""
return ReciprocalOp(x)
|
3678efa2d69948e85ccaae43f34492783a77cef9
| 3,645,594
|
from typing import Callable
from typing import Iterable
def get_protocol(remote_debugging_url: str, request_json: Callable[[str], Iterable]):
""" The current devtools protocol, as JSON """
return request_json(f"{remote_debugging_url}/json/protocol")
|
eaca70f6115a9091f2e35452cae4eb6b22cfdb18
| 3,645,595
|
def csv_saver_parser():
"""
Csv saver parser. Returns tuple with args as dictionary
and sufix that needs to be removed.
:return: tuple
"""
csv_saver_parser = ArgumentParser(description='Parser for saving data into CSV files.')
csv_saver_parser.add_argument('--F-csvsave',
help='The field separator to be used. \'\t\' can be used as well. (default: \',\')')
csv_saver_parser.add_argument('--M-csvsave', help='The string representing a missing value. (default: ?)')
csv_saver_parser.add_argument('--N-csvsave', action='store_const', const="", help='Don\'t write a header row.')
csv_saver_parser.add_argument('--decimal-csvsave',
help='The maximum number of digits to print after the decimal place for numeric values (default: 6)')
csv_saver_parser.add_argument('--i-csvsave', help='The input file')
csv_saver_parser.add_argument('--o-csvsave', help='The output file')
return vars((csv_saver_parser.parse_known_args())[0]), '-csvsave'
|
d98fd53217eafa24826df56e114720a7881f17bb
| 3,645,596
|
def get_var(expr: Expression) -> Var:
"""
Warning: this in only true for expressions captured by a match statement.
Don't call it from anywhere else
"""
assert isinstance(expr, NameExpr)
node = expr.node
assert isinstance(node, Var)
return node
|
f8bec4c919858f6aaa5126fc4e55f825f2ca677c
| 3,645,597
|
def sight(unit_type: int):
"""Return the sight range of a unit, given its unit type ID
:param unit_type: the unit type ID, according to :mod:`pysc2.lib.stats`
:type unit_type: int
:return: the unit's sight range
:rtype: float
"""
return __data['Sight'][unit_type]
|
84c3b8fdbfaaede81e7abc10cc190830df9e2c86
| 3,645,598
|
def decode_jwt(encoded_jwt):
"""
解码jwt
"""
global key
# 注意当载荷里面申明了 aud 受众的时候,解码时需要说明
decoded_jwt = jwt.decode(encoded_jwt, key, audience='dev', algorithms=["HS256"])
return decoded_jwt
|
467bfcce7c5264813ab57f420da277b7674976db
| 3,645,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.