content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def _system_message(message_contents):
""" Create SYSTEM_MESSAGES_FILE file w/contents as specified.
This file is displayed in the UI, and can be embedded in nginx 502 (/opt/galaxy/pkg/nginx/html/errdoc/gc2_502.html)
"""
# First write contents to file.
if os.path.exists(SYSTEM_MESSAGES_FILE):
with open(SYSTEM_MESSAGES_FILE, 'a+') as f:
f.write(message_contents)
# Copy message to appropriate places in nginx err_502.html pages.
# possible_nginx_paths = ['/opt/galaxy/pkg/nginx/html/errdoc/gc2_502.html',
# '/usr/nginx/html/errdoc/gc2_502.html'] | 31,500 |
def detected(numbers, mode):
"""
Returns a Boolean result indicating whether the last member in a numeric array is the max or
min, depending on the setting.
Arguments
- numbers: an array of numbers
- mode: 'max' or 'min'
"""
call_dict = {'min': min, 'max': max}
if mode not in call_dict.keys():
print('Must specify either max or min')
return
return numbers[-1] == call_dict[mode](numbers) | 31,501 |
def test_delete_multiple_status(mock_event: dict) -> None:
"""
Test that status effects get correctly removed when there are multiple
status effects with a duration of 1 in the status effects list
:param mock_event: Mock AWS lambda event dict
"""
# Arrange
mock_event["body"]["Player1"]["action"] = "disrupt"
mock_event["body"]["Player2"]["action"] = "block"
mock_event["body"]["Player1"]["status_effects"] = [["disorient", 1],
["connected", 2],
["poison", 1],
["lag", 1],
["anti_area", 999]]
mock_event["body"]["Player1"]["enhanced"] = True
# Act
# Perform a round of combat
combat_result_1 = do_combat(mock_event, mock_event)
combat_body_1 = json.loads(combat_result_1["body"])
# Assert Actual == Expected
assert combat_body_1["Player1"]["status_effects"] == [["connected", 1],
["anti_area", 998],
["enhancement_sickness", 1]]
assert combat_body_1["Player2"]["status_effects"] == [["prone", 1]] | 31,502 |
def generator_v0( samples, cfg ) :
"""Taken from section 18. Generators"""
num_samples = len(samples)
batch_size = cfg["batch_size"]
step_size = batch_size // 2
while True :
samples = sklearn.utils.shuffle(samples)
for offset in range( 0, num_samples, step_size ) :
batch_samples = samples[offset : offset + step_size ]
images = []
angles = []
for sample in batch_samples:
fname = DATA_DIR + "IMG/" + sample[0].split('/')[-1]
center_image = cv2.imread( fname )
center_angle = float( sample[3] )
images.append( center_image )
angles.append( center_angle )
mirrored_img = center_image[ : ,::-1, :]
images.append( mirrored_img )
angles.append( -center_angle )
X_train = np.array( images )
y_train = np.array( angles )
yield X_train, y_train | 31,503 |
def calculate_compass_bearing(point_a, point_b):
"""
Calculates the bearing between two points.
The formulae used is the following:
θ = atan2(sin(Δlong).cos(lat2),
cos(lat1).sin(lat2) − sin(lat1).cos(lat2).cos(Δlong))
:Parameters:
- `pointA: The tuple representing the latitude/longitude for the
first point. Latitude and longitude must be in decimal degrees
- `pointB: The tuple representing the latitude/longitude for the
second point. Latitude and longitude must be in decimal degrees
:Returns:
The bearing in degrees
:Returns Type:
float
"""
# LICENSE: public domain from https://gist.github.com/jeromer/2005586
if (type(point_a) != tuple) or (type(point_b) != tuple):
raise TypeError("Only tuples are supported as arguments")
lat1 = math.radians(point_a[0])
lat2 = math.radians(point_b[0])
diff_long = math.radians(point_b[1] - point_a[1])
x = math.sin(diff_long) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1)
* math.cos(lat2) * math.cos(diff_long))
initial_bearing = math.atan2(x, y)
# Now we have the initial bearing but math.atan2 return values
# from -180° to + 180° which is not what we want for a compass bearing
# The solution is to normalize the initial bearing as shown below
initial_bearing = math.degrees(initial_bearing)
compass_bearing = (initial_bearing + 360) % 360
return compass_bearing | 31,504 |
def parse_instructions(instruction_list):
"""
Parses the instruction strings into a dictionary
"""
instruction_dict = []
for instruction in instruction_list:
regex_match = re.match(r"(?P<direction>\w)(?P<value>\d*)",instruction)
if regex_match:
instruction_dict.append(regex_match.groupdict())
return instruction_dict | 31,505 |
def calc_elapsed_sleep(in_num, hyp_file, fpath, savedir, export=True):
"""
Calculate minutes of elapsed sleep from a hypnogram file & concatenate stage 2 sleep files
Parameters
----------
in_num: str
patient identifier
hyp_file: str (format: *.txt)
file with hypnogram at 30-second intervals
fpath: str
path to EEG files cut by sleep stage
savedir: str
path to save EEG files cut by hrs elapsed sleep
export: bool (default: True)
whether to export blocked dataframes
Returns
-------
.csv files with EEG data blocked in two-hour chunks (according to Purcell et al. 2017)
OR
pd.dataframes blocked in two-hour chunks (according to Purcell et al. 2017)
"""
# calculate elapsed sleep for each 30-second time interval
print('Loading hypnogram...')
sleep_scores = [1, 2, 3, 4, 5] # exclude 0 and 6 for awake and record break
hyp = pd.read_csv(hyp_file, header=None, index_col=[0], sep='\t', names=['time', 'score'], parse_dates=True)
mins_elapsed = hyp.score.isin(sleep_scores).cumsum()/2
# get a list of all matching files
glob_match = f'{fpath}/{in_num}*_s2_*'
files = glob.glob(glob_match)
# make list of dfs for concat
print('Reading data...')
data = [pd.read_csv(file, header = [0, 1], index_col = 0, parse_dates=True) for file in files]
# add NaN to the end of each df
data_blocked = [df.append(pd.Series(name=df.iloc[-1].name + pd.Timedelta(milliseconds=1))) for df in data]
# concatenate the dfs
print('Concatenating data...')
s2_df = pd.concat(data_blocked).sort_index()
# assign indices to hours elapsed sleep
print('Assigning minutes elapsed...')
idx0_2 = mins_elapsed[mins_elapsed.between(0, 120)].index
idx2_4 = mins_elapsed[mins_elapsed.between(120.5, 240)].index
idx4_6 = mins_elapsed[mins_elapsed.between(240.5, 360)].index
idx6_8 = mins_elapsed[mins_elapsed.between(360.5, 480)].index
# cut dataframe into blocks by elapsed sleep (0-2, 2-4, 4-6, 6-8)
df_two = s2_df[(s2_df.index > idx0_2[0]) & (s2_df.index < idx0_2[-1])]
df_four = s2_df[(s2_df.index > idx2_4[0]) & (s2_df.index < idx2_4[-1])]
df_six = s2_df[(s2_df.index > idx4_6[0]) & (s2_df.index < idx4_6[-1])]
df_eight = s2_df[(s2_df.index > idx6_8[0]) & (s2_df.index < idx6_8[-1])]
if export:
# export blocked data
if not os.path.exists(savedir):
print(savedir + ' does not exist. Creating directory...')
os.makedirs(savedir)
print('Saving files...')
for df, hrs in zip([df_two, df_four, df_six, df_eight], ['0-2hrs', '2-4hrs', '4-6hrs', '6-8hrs']):
date = df.index[0].strftime('%Y-%m-%d')
savename = in_num + '_' + date + '_s2_' + hrs + '.csv'
df.to_csv(os.path.join(savedir, savename))
print(f'Files saved to {savedir}')
else:
return df_two df_four df_six df_eight
print('Done') | 31,506 |
def delete_file(path):
"""
删除一个目录下的所有文件
:param path: str, dir path
:return: None
"""
for i in os.listdir(path):
# 取文件或者目录的绝对路径
path_children = os.path.join(path, i)
if os.path.isfile(path_children):
if path_children.endswith(".h5") or path_children.endswith(".json"):
os.remove(path_children)
else: # 递归, 删除目录下的所有文件
delete_file(path_children) | 31,507 |
def label(ctx, name):
"""Manipulate labels."""
ctx.obj['name'] = name | 31,508 |
def valid_pairs(pairs, chain):
"""
Determine if the chain contains any invalid pairs (e.g. ETH_XMR)
"""
for primary, secondary in zip(chain[:-1], chain[1:]):
if not (primary, secondary) in pairs and \
not (secondary, primary) in pairs:
return False
return True | 31,509 |
def setup(sub_args, ifiles, repo_path, output_path):
"""Setup the pipeline for execution and creates config file from templates
@param sub_args <parser.parse_args() object>:
Parsed arguments for run sub-command
@param repo_path <str>:
Path to installation or source code and its templates
@param output_path <str>:
Pipeline output path, created if it does not exist
@return config <dict>:
Config dictionary containing metadata to run the pipeline
"""
# Check for mixed inputs,
# inputs which are a mixture
# of FastQ and BAM files
mixed_inputs(ifiles)
# Resolves PATH to reference file
# template or a user generated
# reference genome built via build
# subcommand
genome_config = os.path.join(repo_path,'config','genome.json')
# if sub_args.genome.endswith('.json'):
# Provided a custom reference genome generated by build pipline
# genome_config = os.path.abspath(sub_args.genome)
required = {
# Base configuration file
"base": os.path.join(repo_path,'config','config.json'),
# Template for project-level information
"project": os.path.join(repo_path,'config','containers.json'),
# Template for genomic reference files
# User provided argument --genome is used to select the template
"genome": genome_config,
# Template for tool information
"tools": os.path.join(repo_path,'config', 'modules.json'),
}
# Create the global or master config
# file for pipeline, config.json
config = join_jsons(required.values()) # uses templates in config/*.json
config['project'] = {}
config = add_user_information(config)
config = add_rawdata_information(sub_args, config, ifiles)
# Resolves if an image needs to be pulled
# from an OCI registry or a local SIF exists
config = image_cache(sub_args, config, repo_path)
# Add other runtime info for debugging
config['project']['version'] = __version__
config['project']['workpath'] = os.path.abspath(sub_args.output)
git_hash = git_commit_hash(repo_path)
config['project']['git_commit_hash'] = git_hash # Add latest git commit hash
config['project']['pipeline_path'] = repo_path # Add path to installation
# Add all cli options for data provenance
for opt, v in vars(sub_args).items():
if opt == 'func':
# Pass over sub command's handler
continue
elif not isinstance(v, (list, dict)):
# CLI value can be converted to a string
v = str(v)
config['options'][opt] = v
# Save config to output directory
with open(os.path.join(output_path, 'config.json'), 'w') as fh:
json.dump(config, fh, indent = 4, sort_keys = True)
return config | 31,510 |
def fqname_for(obj: Any) -> str:
"""
Returns the fully qualified name of ``obj``.
Parameters
----------
obj
The class we are interested in.
Returns
-------
str
The fully qualified name of ``obj``.
"""
if "<locals>" in obj.__qualname__:
raise RuntimeError(
"Can't get fully qualified name of locally defined object. "
f"{obj.__qualname__}"
)
return f"{obj.__module__}.{obj.__qualname__}" | 31,511 |
def analyzer_zipfile(platform, monitor):
"""Creates the Zip file that is sent to the Guest."""
t = time.time()
zip_data = io.BytesIO()
zip_file = zipfile.ZipFile(zip_data, "w", zipfile.ZIP_STORED)
# Select the proper analyzer's folder according to the operating
# system associated with the current machine.
root = cwd("analyzer", platform)
root_len = len(os.path.abspath(root))
if not os.path.exists(root):
log.error("No valid analyzer found at path: %s", root)
raise CuckooGuestError(
"No valid analyzer found for %s platform!" % platform
)
# Walk through everything inside the analyzer's folder and write
# them to the zip archive.
for root, dirs, files in os.walk(root):
archive_root = os.path.abspath(root)[root_len:]
for name in files:
path = os.path.join(root, name)
archive_name = os.path.join(archive_root, name)
zip_file.write(path, archive_name)
# Include the chosen monitoring component and any additional files.
if platform == "windows":
dirpath = cwd("monitor", monitor)
# Generally speaking we should no longer be getting symbolic links for
# "latest" anymore, so in the case of a file; follow it.
if os.path.isfile(dirpath):
monitor = os.path.basename(open(dirpath, "rb").read().strip())
dirpath = cwd("monitor", monitor)
for name in os.listdir(dirpath):
zip_file.write(
os.path.join(dirpath, name), os.path.join("bin", name)
)
# Dump compiled "dumpmem" Yara rules for zer0m0n usage.
zip_file.write(cwd("stuff", "dumpmem.yarac"), "bin/rules.yarac")
zip_file.close()
data = zip_data.getvalue()
if time.time() - t > 10:
log.warning(
"It took more than 10 seconds to build the Analyzer Zip for the "
"Guest. This might be a serious performance penalty. Is your "
"analyzer/windows/ directory bloated with unnecessary files?"
)
return data | 31,512 |
def symbol_size(values):
""" Rescale given values to reasonable symbol sizes in the plot. """
max_size = 50.0
min_size = 5.0
# Rescale max.
slope = (max_size - min_size)/(values.max() - values.min())
return slope*(values - values.max()) + max_size | 31,513 |
def test_distance_function(cosine_kmeans):
"""
Checks that cosine kmeans uses distance_cosine as the distance calculator.
"""
assert isinstance(cosine_kmeans.distance_func, type(distance_cosine)) | 31,514 |
def AddServiceAccountArg(parser):
"""Adds argument for specifying service account used by the workflow."""
parser.add_argument(
'--service-account',
help='The service account that should be used as '
'the workflow identity. "projects/PROJECT_ID/serviceAccounts/" prefix '
'may be skipped from the full resource name, in that case '
'"projects/-/serviceAccounts/" is prepended to the service account ID.') | 31,515 |
def delete(id):
"""Soft delete a patient."""
check_patient_permission(id)
patient = Patient.query.get(id)
patient.deleted = datetime.datetime.now()
patient.deleted_by = current_user
db.session.commit()
return redirect(url_for('screener.index')) | 31,516 |
def system_temp_dir():
"""
Return the global temp directory for the current user.
"""
temp_dir = os.getenv('SCANCODE_TMP')
if not temp_dir:
sc = text.python_safe_name('scancode_' + system.username)
temp_dir = os.path.join(tempfile.gettempdir(), sc)
create_dir(temp_dir)
return temp_dir | 31,517 |
def _pipeline_network_multiple_database(database: List[str], kernel_method: Callable,
filter_network_omic: Union[List, str]) -> Union[Matrix, str]:
"""Process network for a multiple database."""
network = None
db_norm = frozenset([db.lower().replace(' ', '_') for db in database])
if db_norm in list(PATHME_MAPPING.keys()):
db_norm = PATHME_MAPPING[db_norm]
kernels_db_path = os.path.join(DEFAULT_DIFFUPATH_DIR, 'kernels', 'pathme')
kernels_files_list = get_or_create_dir(kernels_db_path)
for kernel in kernels_files_list:
if db_norm in kernel or db_norm == kernel:
network = os.path.join(DEFAULT_DIFFUPATH_DIR, 'kernels', 'by_db', f'{db_norm}.pickle')
break
if not network:
network = os.path.join(DEFAULT_DIFFUPATH_DIR, 'kernels', 'by_db', f'{db_norm}.pickle')
GoogleDriveDownloader.download_file_from_google_drive(file_id=DATABASE_LINKS[db_norm],
dest_path=network,
unzip=True)
else:
intersecc_db = db_norm.intersection(PATHME_DB)
intersecc_db_str = ''
for db_name in intersecc_db:
intersecc_db_str += f'_{db_name}'
if intersecc_db:
kernels_db_path = os.path.join(DEFAULT_DIFFUPATH_DIR, 'kernels', 'by_db')
kernels_files_list = get_or_create_dir(kernels_db_path)
for kernel_file in kernels_files_list:
if intersecc_db_str == kernel_file:
network = os.path.join(DEFAULT_DIFFUPATH_DIR, 'kernels', 'by_db',
f'{intersecc_db_str}.pickle')
break
if not network:
graph_db_path = os.path.join(DEFAULT_DIFFUPATH_DIR, 'graphs', 'by_db')
graphs_files_list = get_or_create_dir(graph_db_path)
if graphs_files_list:
for graph_file in graphs_files_list:
if f'{intersecc_db_str}.pickle' == graph_file:
network = os.path.join(DEFAULT_DIFFUPATH_DIR, 'graphs', 'by_db',
f'{intersecc_db_str}.pickle')
break
if not network:
graph = process_graph_from_file(GRAPH_PATH)
network = get_subgraph_by_annotation_value(graph,
'database',
intersecc_db
)
to_pickle(network, os.path.join(DEFAULT_DIFFUPATH_DIR, 'graphs', 'by_db',
f'{intersecc_db_str}.pickle'))
if not filter_network_omic:
click.secho(f'{EMOJI}Generating kernel from {GRAPH_PATH} {EMOJI}')
network = get_kernel_from_graph(network, kernel_method)
click.secho(f'{EMOJI}Kernel generated {EMOJI}')
to_pickle(network, os.path.join(DEFAULT_DIFFUPATH_DIR, 'kernels', 'by_db',
f'{db_norm}.pickle'))
else:
raise ValueError(
'Subgraph filtering by database only supported for PathMe network (KEGG, Reactome and Wikipathways).')
return network | 31,518 |
def _to_tensor(args, data):
"""Change data to tensor."""
if vega.is_torch_backend():
import torch
data = torch.tensor(data)
if args.device == "GPU":
return data.cuda()
else:
return data
elif vega.is_tf_backend():
import tensorflow as tf
data = tf.convert_to_tensor(data)
return data | 31,519 |
def materialize_jupyter_deployment(
config: ClusterConfig,
uuid: str,
definition: DeploymentDefinition) -> JupyterDeploymentImpl: # noqa
"""Materializes the Jupyter deployment definition.
:param config: Cluster to materialize the Jupyter deployment with.
:param uuid: Unique deployment id.
:param definition: Deployment definition to materialize.
"""
jupyter_deployment = deserialize_jupyter_deployment_impl(
config=config,
uuid=uuid,
serialized=definition.value)
return jupyter_deployment | 31,520 |
async def test_filter_matching_past_event(mock_now, hass, calendar):
"""Test that the matching past event is not returned."""
config = dict(CALDAV_CONFIG)
config["custom_calendars"] = [
{"name": "Private", "calendar": "Private", "search": "This is a normal event"}
]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
state = hass.states.get("calendar.private_private")
assert state.name == calendar.name
assert state.state == "off" | 31,521 |
def show_M(N):
"""
N: int
"""
n = np.arange(N)
k = n.reshape((N,1))
M = k*n
print("M:", M) | 31,522 |
def update_weekly_downloads():
"""Update the weekly "downloads" from the users_install table."""
raise_if_reindex_in_progress()
interval = datetime.datetime.today() - datetime.timedelta(days=7)
counts = (Installed.objects.values('addon')
.filter(created__gte=interval,
addon__type=amo.ADDON_WEBAPP)
.annotate(count=Count('addon')))
ts = [webapp_update_weekly_downloads.subtask(args=[chunk])
for chunk in chunked(counts, 1000)]
TaskSet(ts).apply_async() | 31,523 |
def _CreateLSTMPruneVariables(lstm_obj, input_depth, h_depth):
"""Function to create additional variables for pruning."""
mask = lstm_obj.add_variable(
name="mask",
shape=[input_depth + h_depth, 4 * h_depth],
initializer=tf.ones_initializer(),
trainable=False,
dtype=lstm_obj.dtype)
threshold = lstm_obj.add_variable(
name="threshold",
shape=[],
initializer=tf.zeros_initializer(),
trainable=False,
dtype=lstm_obj.dtype)
# Add old_weights, old_old_weights, gradient for gradient
# based pruning.
old_weight = lstm_obj.add_variable(
name="old_weight",
shape=[input_depth + h_depth, 4 * h_depth],
initializer=tf.zeros_initializer(),
trainable=False,
dtype=lstm_obj.dtype)
old_old_weight = lstm_obj.add_variable(
name="old_old_weight",
shape=[input_depth + h_depth, 4 * h_depth],
initializer=tf.zeros_initializer(),
trainable=False,
dtype=lstm_obj.dtype)
gradient = lstm_obj.add_variable(
name="gradient",
shape=[input_depth + h_depth, 4 * h_depth],
initializer=tf.zeros_initializer(),
trainable=False,
dtype=lstm_obj.dtype)
return mask, threshold, old_weight, old_old_weight, gradient | 31,524 |
def get_index_fredkin_gate(N, padding = 0):
"""Get paramaters for log2(N) Fredkin gates
Args:
- N (int): dimensional of states
- padding (int, optional): Defaults to 0.
Returns:
- list of int: params for the second and third Frekin gates
"""
indices = []
for i in range(0, int(np.log2(N))):
indices.append(2**i + padding)
return indices | 31,525 |
def import_by_name(name):
"""
动态导入
"""
tmp = name.split(".")
module_name = ".".join(tmp[0:-1])
obj_name = tmp[-1]
module = __import__(module_name, globals(), locals(), [obj_name])
return getattr(module, obj_name) | 31,526 |
def fyolo_vgg_voc(backbone="vgg16", num_layers=13, pretrained_base=True,
pretrained=False, num_sync_bn_devices=-1, **kwargs):
"""FYOLO of VGG on VOC dataset
Parameters
----------
backbone : str
Use the imagenet pretrained backbone ("vgg11", "vgg13" or "vgg16") for initialization.
num_layers : int
Keep the first num_layers of pretrained darknet to build an fnet.
pretrained_base : boolean
Whether fetch and load pretrained weights for base network.
pretrained : boolean
Whether fetch and load pretrained weights for the entire network.
num_sync_bn_devices : int
Number of devices for training. If `num_sync_bn_devices < 2`, SyncBatchNorm is disabled.
Returns
-------
mxnet.gluon.HybridBlock
Fully hybrid yolo3 network.
"""
from gluoncv.data import VOCDetection
from .vgg import get_vgg_lsf
pretrained_base = False if pretrained else pretrained_base
base_net = get_vgg_lsf(backbone=backbone, keep_layers=num_layers, pretrained=pretrained_base,
num_sync_bn_devices=num_sync_bn_devices, **kwargs)
classes = VOCDetection.CLASSES
# TODO @ xyutao: Implemenet vgg-based single-scale yolo. | 31,527 |
def f1_score(y_true, y_pred):
"""F-measure."""
p = precision(y_true, y_pred)
r = true_positive_rate(y_true, y_pred)
return 2 * (p * r) / (p + r) | 31,528 |
def hexColorToInt(rgb):
"""Convert rgb color string to STK integer color code."""
r = int(rgb[0:2],16)
g = int(rgb[2:4],16)
b = int(rgb[4:6],16)
color = format(b, '02X') + format(g, '02X') + format(r, '02X')
return int(color,16) | 31,529 |
def reset_speed():
"""
reset vertical speed as score achieves certain level
:return: float, new vertical speed
"""
global y_speed
# when score achieves 50
if score == 50:
# ball moves faster
y_speed = y_speed * 1.2
graphics.reset_vertical_velocity(dy)
# when score achieves 70
elif score == 70:
# ball moves faster
y_speed = y_speed * 1.2
graphics.reset_vertical_velocity(dy)
elif score == 120:
# ball moves faster
y_speed = y_speed * 1.3
graphics.reset_vertical_velocity(dy) | 31,530 |
def test(model, X, model_type, test_type, counter=False):
"""Test functions."""
if model_type == 'notear-mlp':
X = np.vstack(X)
y = model(torch.from_numpy(X))
y = y.cpu().detach().numpy()
mse = mean_squared_loss(y.shape[0], y[:, 0], X[:, 0])
elif model_type == 'notear-castle':
X = np.vstack(X)
y = model(torch.from_numpy(X))
y = y.cpu().detach().numpy()
mse = mean_squared_loss(y.shape[0], y[:, 0], X[:, 0])
elif model_type == 'ISL':
y = model.test(X)
mse = mean_squared_loss(y.shape[0] * y.shape[1], y, X[:, :, 0][:, :,
np.newaxis])
if not counter:
if test_type == 'ID':
metrics[f'{model_type}_testID_MSE'] = mse
elif test_type == 'OOD':
metrics[f'{model_type}_testOOD_MSE'] = mse
else:
if test_type == 'ID':
metrics[f'{model_type}_counter_testID_MSE'] = mse
elif test_type == 'OOD':
metrics[f'{model_type}_counter_testOOD_MSE'] = mse
return mse | 31,531 |
def scale():
"""
Returns class instance of `Scale`.
For more details, please have a look at the implementations inside `Scale`.
Returns
-------
Scale :
Class instance implementing all 'scale' processes.
"""
return Scale() | 31,532 |
def imread_rgb(filename):
"""Read image file from filename and return rgb numpy array"""
bgr = cv2.imread(filename)
rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
return rgb | 31,533 |
def insert_at_index(rootllist, newllist, index):
""" Insert newllist in the llist following rootllist such that newllist is at the provided index in the resulting llist"""
# At start
if index == 0:
newllist.child = rootllist
return newllist
# Walk through the list
curllist = rootllist
for i in range(index-1):
curllist = curllist.child
# Insert
newllist.last().child=curllist.child
curllist.child=newllist
return rootllist | 31,534 |
def _save_observables(
file_name: str,
file_override: bool,
observable_names: List[str],
observable_defs: List[Union[str, Callable]],
) -> None:
"""
Save observable properties into a HDF5 data file
Parameters
----------
file_name: str
HDF5 file name to save observables properties into
file_override: bool
Whether to override HDF5 file contents or not
observable_names : list
List of observable names
observable_defs: list
List of observable string-encoded or callable function definitions
Returns
-------
None
"""
observable_names = _encode_strings(observable_names)
# Filter out callable definitions when saving into HDF5 file
observable_defs = [d if isinstance(d, str) else EMPTY_EXPR for d in observable_defs]
observable_defs = _encode_strings(observable_defs)
# Append if file exists, otherwise create
with h5py.File(file_name, "a") as file:
if file_override:
with suppress(KeyError):
del file["observables"]
file.create_dataset("observables/names", data=observable_names, dtype="S256")
file.create_dataset("observables/definitions", data=observable_defs, dtype="S256") | 31,535 |
def kpi_value(request, body):
"""kpi值接口 根据 indicator 传入参数不同请求不同的 handler"""
params = {
"indicator": body.indicator
}
handler = KpiFactory().create_handler(params["indicator"])
result = handler(params=params)
return DashboardResult(content=result) | 31,536 |
def safeReplaceOrder( references ):
"""
When inlining a variable, if multiple instances occur on the line, then the
last reference must be replaced first. Otherwise the remaining intra-line
references will be incorrect.
"""
def safeReplaceOrderCmp(self, other):
return -cmp(self.colno, other.colno)
result = list(references)
result.sort(safeReplaceOrderCmp)
return result | 31,537 |
def broken_link_finder(urls: Union[str, list, tuple, set],
print_to_console: bool = False,
file_out = None,
viewer = DEFAULT_CSV_VIEWER,
open_results_when_done = True,
exclude_prefixes: Iterable = EXCLUDE_LINKS_STARTING_WITH):
"""
Checks for broken links on a specific web page(s) as specified by the urls argument.
:param urls: the url or urls to check.
:param print_to_console: True / False -- print each link to console while checking.
:param file_out: if not None, name of file to which to write broken link
checker output
:param viewer: program to use to open and view the results (csv file)
:param open_results_when_done: True/False
:param exclude_prefixes: list-like
:return: list of sets of broken_urls, local_urls, foreign_urls, processed_urls
"""
start_time = time.time()
working_urls: List[Any] = []
broken_urls: List[Any] = []
if type(exclude_prefixes) == str:
exclude_prefixes = [exclude_prefixes]
if 'mailto' not in exclude_prefixes:
exclude_prefixes = list(exclude_prefixes)
exclude_prefixes.append('mailto')
if type(urls) == str:
urls = [urls]
links = []
for url in urls:
lst = get_links_from_webpage(url, full_links = True, exclude_prefixes = exclude_prefixes)
links += lst['urls']
# remove duplicates
links = set(links)
tot = len(links)
cnt = 0
for link in links:
if print_to_console:
cnt += 1
print(f'Checking link {cnt} of {tot}: {link}')
try:
# TODO: should probably leverage link_check instead of repeating code.
# compare the code in this function and link_check to see what's up.
head = requests.head(link)
success = head.ok
status = "Retrieved header from: {link}"
try:
response = requests.get(link)
success = response.ok
status = "Received response from: {link}"
except Exception as e:
success = False
status = f"{e}. Retrieved header but failed to open page: {link}"
except Exception as e:
success = False
status = f"{e}. Failed to retrieved header from: {link}"
if link.startswith('ftp:'):
# get stats from ftp server
stats = ez_ftp.stats(link)
filename = stats.basename
file_size = stats.size
# filename, file_size = ftp_file_size(link)
if not filename:
success = False
status = f"FTP file not found: {link}"
if type(filename) == str:
if file_size > 0:
success = True
status = f"FTP file found: {link}"
else:
success = False
status = f"FTP file is empty: {link}"
else:
success = False
status = f"{file_size}. FTP file: {link}"
if success:
# found a broken link
working_urls.append((link, success, status))
else:
broken_urls.append((link, success, status))
processed_urls = working_urls + broken_urls
if file_out:
df = pd.DataFrame(data = processed_urls, index = None,
columns = ['link', 'success', 'header'])
df.to_csv(path_or_buf = file_out, sep = ',', header = True)
if open_results_when_done:
# open result file in viewer text editor).
view_file(filename = file_out, viewer = viewer)
# Done recursive loop. Report results.
stop_time = time.time()
run_time = stop_time - start_time
if print_to_console:
print(f'\n\nChecked: {len(processed_urls)} links in {run_time} seconds')
print(f'\nFound {len(broken_urls)} BROKEN LINKS: \n', broken_urls)
# Return results.
ReturnTuple = namedtuple('ReturnTuple', 'processed_urls broken_urls run_time')
return ReturnTuple(processed_urls, broken_urls, run_time) | 31,538 |
def test_compute_reproject_roi_issue1047():
""" `compute_reproject_roi(geobox, geobox[roi])` sometimes returns
`src_roi != roi`, when `geobox` has (1) tiny pixels and (2) oddly
sized `alignment`.
Test this issue is resolved.
"""
geobox = GeoBox(3000, 3000,
Affine(0.00027778, 0.0, 148.72673054908861,
0.0, -0.00027778, -34.98825802556622), "EPSG:4326")
src_roi = np.s_[2800:2810, 10:30]
rr = compute_reproject_roi(geobox, geobox[src_roi])
assert rr.is_st is True
assert rr.roi_src == src_roi
assert rr.roi_dst == np.s_[0:10, 0:20] | 31,539 |
def clean_value(value: str) -> t.Union[int, float, str]:
"""Return the given value as an int or float if possible, otherwise as the original string."""
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return value | 31,540 |
def check_matching_unit_dimension(
ureg: UnitRegistry, base_units: str, units_to_check: List[str]
) -> None:
"""
Check if all units_to_check have the same Dimension like the base_units
If not
:raise DimensionalityError
"""
base_unit = getattr(ureg, base_units)
for unit_string in units_to_check:
unit = getattr(ureg, unit_string)
if unit.dimensionality != base_unit.dimensionality:
raise DimensionalityError(base_unit, unit) | 31,541 |
def sumstat(*L):
"""
Sums a list or a tuple L
Modified from pg 80 of Web Programming in Python
"""
if len(L) == 1 and \
( isinstance(L[0],types.ListType) or \
isinstance (L[0], types.TupleType) ) :
L = L[0]
s = 0.0
for k in L:
s = s + k
return s | 31,542 |
def test_cases_by_pinned_gene_query(app, case_obj, institute_obj):
"""Test cases filtering by providing the gene of one of its pinned variants"""
# GIVEN a test variant hitting POT1 gene (hgnc_id:17284)
suspects = []
test_variant = store.variant_collection.find_one({"genes.hgnc_id": {"$in": [17284]}})
assert test_variant
with app.test_client() as client:
resp = client.get(url_for("auto_login"))
assert resp.status_code == 200
# GIVEN a case with this variant pinned
form = {
"action": "ADD",
}
client.post(
url_for(
"cases.pin_variant",
institute_id=institute_obj["internal_id"],
case_name=case_obj["display_name"],
variant_id=test_variant["_id"],
),
data=form,
)
updated_case = store.case_collection.find_one({"suspects": {"$in": [test_variant["_id"]]}})
assert updated_case
# WHEN the case search is performed using the POT1 gene
slice_query = f"pinned:POT1"
resp = client.get(
url_for(
"overview.cases",
query=slice_query,
institute_id=institute_obj["internal_id"],
)
)
# THEN it should return a page with the case
assert resp.status_code == 200
assert case_obj["display_name"] in str(resp.data) | 31,543 |
def halref_to_data_url(halref: str) -> str:
"""
Given a HAL or HAL-data document URIRef, returns the corresponding HAL-data URL
halref: str
HAL document URL
(Most important!) https://hal.archives-ouvertes.fr/hal-02371715v2 -> https://data.archives-ouvertes.fr/document/hal-02371715v2
https://data.archives-ouvertes.fr/document/hal-02371715v2.rdf -> https://data.archives-ouvertes.fr/document/hal-02371715v2.rdf
https://data.archives-ouvertes.fr/document/hal-02371715 -> https://data.archives-ouvertes.fr/document/hal-02371715
"""
parsed_ref = urlparse(halref)
assert "archives-ouvertes.fr" in parsed_ref.netloc, "Expected HAL (or HAL-data) document URL"
if "hal.archives-ouvertes.fr" in parsed_ref.netloc:
parsed_ref = parsed_ref._replace(netloc="data.archives-ouvertes.fr",
path=f"/document{parsed_ref.path}")
return urlunparse(parsed_ref) | 31,544 |
def find_best_rate():
"""
Input: Annual salary, semi-annual raise, cost of home
Assumes: a time frame of three years (36 months), a down payment of 25% of the total cost,
current savings starting from 0 and annual return of 4%
Returns the best savings rate within (plus/minus) $100 of the downpayment, and bisection search
else returns false if result is not possible
"""
annual_salary = float(input("Enter your annual salary: "))
total_cost = float(1000000)
semi_annual_raise = float(0.07)
monthly_salary = annual_salary/12
r = 0.04
down_payment = 0.25 * total_cost
current_savings = 0
time = 36
epsilon = 100
low = 0
high = 10000
savings_rate = (low + high)//2
num = 0
while abs(current_savings - down_payment) >= epsilon:
mod_annual_salary = annual_salary #The annual salary we will use to modify/ make changes
current_savings = 0
portion_saved = savings_rate/10000 #Converting our floor/ int division to decimal (as a portion to save)
for month in range(1, time+1):
if month % 6 == 0:
mod_annual_salary += (annual_salary * semi_annual_raise)
monthly_salary = mod_annual_salary/12
monthly_savings = monthly_salary * portion_saved
additional = monthly_savings + (current_savings * r/12) #Additional return considering monthly and current savings
current_savings += additional
#Bisection search
if current_savings < down_payment:
low = savings_rate
else:
high = savings_rate
savings_rate = (low + high)//2
num += 1
if num > 15: #Log_2 (10000) is 13.28... it will not make sense to keep searching after this point
break
if num < 15:
print("Best Savings Rate: {} or {}%".format(portion_saved, portion_saved*100)),
print("Steps in bisection Search: {}".format(num))
return portion_saved
else:
return("It is not possible to pay the down payment in three years") | 31,545 |
def export_secret_key(ctx, account_name):
"""print secret key of own account."""
account = get_account(ctx, account_name)
data = account.export_secret_key()
click.echo(data) | 31,546 |
def q_inv(a):
"""Return the inverse of a quaternion."""
return [a[0], -a[1], -a[2], -a[3]] | 31,547 |
def divide_hex_grid_flower(points, hex_radius=None):
"""Partitions a hexagonal grid into a flower pattern (this is what I used for the final product. Returns a list of partition indices for each point."""
if hex_radius is None: # copied from build_mirror_array()
mini_hex_radius = (10 * 2.5 / 2) + 1
hex_radius = mini_hex_radius * 1.1
points = np.array(points)
# Divide into quarters
partition_indices = np.ones(len(points)) * -1
for i, point in enumerate(points):
x, y, z = point
if np.sqrt(x**2 + y**2) <= 3 * (2*hex_radius + 1) * np.sqrt(3)/2:
partition_indices[i] = 0
else:
θ = np.arctan2(x,y) + pi - 1e-10
partition_indices[i] = 1 + np.floor(6 * θ / (2 * pi))
return partition_indices | 31,548 |
def fis_gauss2mf(x:float, s1:float, c1:float, s2:float, c2:float):
"""Split Gaussian Member Function"""
t1 = 1.0
t2 = 1.0
if x < c1:
t1 = fis_gaussmf(x, s1, c1)
if x > c2:
t2 = fis_gaussmf(x, s2, c2)
return (t1 * t2) | 31,549 |
def _is_trigonal_prism(vectors, dev_cutoff=15):
"""
Triangular prisms are defined by 3 vertices in a triangular pattern on two
aligned planes. Unfortunately, the angles are dependent on the length and
width of the prism. Need more examples to come up with a better way of
detecting this shape.
For now, this code is experimental.
Parameters
----------
vectors : list scitbx.matrix.col
dev_cutoff : float, optional
Returns
-------
bool
"""
if len(vectors) != 6:
return
angles = _bond_angles(vectors)
a_85s, a_135s = [], []
for angle in angles:
if abs(angle[-1] - 85) < abs(angle[-1] - 135):
a_85s.append(angle[-1] - 85)
else:
a_135s.append(angle[-1] - 135)
if len(a_85s) != 9 and len(a_135s) != 6:
return
deviation = sqrt(sum(i ** 2 for i in a_85s + a_135s) / len(angles))
if deviation < dev_cutoff:
return deviation, 6 - len(vectors) | 31,550 |
def get_sza(times, rad, mask=None):
"""
Fetch sza at all range cell in radar FoV
rad: Radar code
mask: mask metrix
"""
fname = "data/sim/{rad}.geolocate.data.nc.gz".format(rad=rad)
os.system("gzip -d " + fname)
fname = fname.replace(".gz","")
data = Dataset(fname)
lat, lon = data["geo_lat"], data["geo_lon"]
sza = []
for d in times:
sza.append(get_altitude(lat, lon, d))
sza = np.array(sza)
os.system("gzip " + fname)
return sza | 31,551 |
def load_results(path):
"""
return a dictionary of columns
can't use genfromtex because of weird format for arrays that I used
:param path:
:return:
"""
data = defaultdict(list)
column_casts = {
"epoch": float,
"env_name": str,
"game_counter": int,
"game_length": int,
"score_red": float,
"score_green": float,
"score_blue": float,
"wall_time": float,
"date_time": float
}
# load in data
step_counter = 0
player_count = None
with open(path, "r") as f:
header = f.readline()
column_names = [name.strip() for name in header.split(",")]
infer_epoch = "epoch" not in column_names
for line in f:
row = line.split(",")
for name, value, in zip(column_names, row):
if name in column_casts:
value = column_casts[name](value)
else:
value = str(value)
data[name] += [value]
# fix a bug with a specific version of rescue game
if "stats_voted_offplayer_count" in data:
data["player_count"] = data["stats_voted_offplayer_count"]
if player_count is None:
player_count = sum([int(x) for x in data["player_count"][0].split(" ")])
step_counter += data["game_length"][-1] * player_count
# convert the team stats to single columns
for i, hit in enumerate(int(x) for x in str(data["stats_player_hit"][-1]).split(" ")):
if vs_order[i] not in data:
data[vs_order[i]] = []
data[vs_order[i]] += [hit]
# convert the team stats to single columns
if "stats_player_hit_with_witness" in data:
for i, hit in enumerate(int(x) for x in str(data["stats_player_hit_with_witness"][-1]).split(" ")):
key = vs_order[i]+"_ww"
if key not in data:
data[key] = []
data[key] += [hit]
# convert the team stats to single columns
for stat in ["deaths", "kills", "general_shot", "general_moved", "general_hidden", "tree_harvested"]:
stats_name = f"stats_{stat}"
if stats_name not in data:
continue
for team, value in zip("RGB", (int(x) for x in str(data[stats_name][-1]).split(" "))):
field_name = f"{team}_{stat}"
data[field_name] += [value]
# convert the team stats to single columns
for stat in ["votes"]:
stats_name = f"stats_{stat}"
if stats_name not in data:
continue
for team, value in zip("RGBT", (int(x) for x in str(data[stats_name][-1]).split(" "))):
field_name = f"{team}_{stat}"
data[field_name] += [value]
if infer_epoch:
data["epoch"].append(float(step_counter)/1e6)
# make epoch an into to group better
data["epoch"][-1] = round(data["epoch"][-1], 1)
return data | 31,552 |
def save_tf(model, folder, filename):
"""Save model in Tensorflow format
Args:
model {graph_def} -- classification model
folder {string} -- folder name
filename {string} -- model filename
"""
filepath = os.path.join(folder, filename)
sess = K.get_session()
outputs = ["input_1", "dense_2/Softmax"]
constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), outputs)
tf.train.write_graph(constant_graph,folder,filename,as_text=False)
print('saved the graph definition in tensorflow format at: ', filepath) | 31,553 |
def priority(n=0):
"""
Sets the priority of the plugin.
Higher values indicate a higher priority.
This should be used as a decorator.
Returns a decorator function.
:param n: priority (higher values = higher priority)
:type n: int
:rtype: function
"""
def wrapper(cls):
cls._plugin_priority = n
return cls
return wrapper | 31,554 |
def vecangle(u,v):
"""
Calculate as accurately as possible the angle between two 3-component vectors u and v.
This formula comes from W. Kahan's advice in his paper "How Futile are Mindless Assessments
of Roundoff in Floating-Point Computation?" (https://www.cs.berkeley.edu/~wkahan/Mindless.pdf),
section 12 "Mangled Angles."
θ=2 atan2(|| ||v||u−||u||v ||, || ||v||u+||u||v ||)
"""
modu = modvec(u)
modv = modvec(v)
vmodu = [modu*v[0] , modu*v[1], modu*v[2] ]
umodv = [modv*u[0] , modv*u[1], modv*u[2] ]
term1 = [umodv[0]-vmodu[0], umodv[1]-vmodu[1], umodv[2]-vmodu[2]]
modterm1 = modvec(term1)
term2 = [umodv[0]+vmodu[0], umodv[1]+vmodu[1], umodv[2]+vmodu[2]]
modterm2 = modvec(term2)
return (2.0*math.atan2(modterm1,modterm2)) | 31,555 |
def _setup_sensor(hass, humidity):
"""Set up the test sensor."""
hass.states.async_set(ENT_SENSOR, humidity) | 31,556 |
def rm_empty_dir(path, rmed_empty_dirs):
"""Recursively remove empty directories under and including path."""
if not os.path.isdir(path):
return
fnames = os.listdir(path)
if len(fnames) > 0:
for fn in fnames:
fpath = os.path.join(path, fn)
if os.path.isdir(fpath):
rm_empty_dir(fpath, rmed_empty_dirs)
if len(os.listdir(path)) == 0:
rmed_empty_dirs.append(str(path))
os.rmdir(path) | 31,557 |
def sanitize_option(option):
"""
Format the given string by stripping the trailing parentheses
eg. Auckland City (123) -> Auckland City
:param option: String to be formatted
:return: Substring without the trailing parentheses
"""
return ' '.join(option.split(' ')[:-1]).strip() | 31,558 |
def node_values_for_tests():
"""Creates a list of possible node values for parameters
Returns:
List[Any]: possible node values
"""
return [1, 3, 5, 7, "hello"] | 31,559 |
def computeGramMatrix(A, B):
"""
Constructs a linear kernel matrix between A and B.
We assume that each row in A and B represents a d-dimensional feature vector.
Parameters:
A: a (n_batch, n, d) Tensor.
B: a (n_batch, m, d) Tensor.
Returns: a (n_batch, n, m) Tensor.
"""
assert(A.dim() == 3)
assert(B.dim() == 3)
assert(A.size(0) == B.size(0) and A.size(2) == B.size(2))
return torch.bmm(A, B.transpose(1,2)) | 31,560 |
def parse_config(config):
"""Backwards compatible parsing.
:param config: ConfigParser object initilized with nvp.ini.
:returns: A tuple consisting of a control cluster object and a
plugin_config variable.
raises: In general, system exceptions are not caught but are propagated
up to the user. Config parsing is still very lightweight.
At some point, error handling needs to be significantly
enhanced to provide user friendly error messages, clean program
exists, rather than exceptions propagated to the user.
"""
# Extract plugin config parameters.
try:
failover_time = config.get('NVP', 'failover_time')
except ConfigParser.NoOptionError, e:
failover_time = str(DEFAULT_FAILOVER_TIME)
try:
concurrent_connections = config.get('NVP', 'concurrent_connections')
except ConfigParser.NoOptionError, e:
concurrent_connections = str(DEFAULT_CONCURRENT_CONNECTIONS)
plugin_config = {
'failover_time': failover_time,
'concurrent_connections': concurrent_connections,
}
LOG.info('parse_config(): plugin_config == "%s"' % plugin_config)
cluster = NVPCluster('cluster1')
# Extract connection information.
try:
defined_connections = config.get('NVP', 'NVP_CONTROLLER_CONNECTIONS')
for conn_key in defined_connections.split():
args = [config.get('NVP', 'DEFAULT_TZ_UUID')]
args.extend(config.get('NVP', conn_key).split(':'))
try:
cluster.add_controller(*args)
except Exception, e:
LOG.fatal('Invalid connection parameters: %s' % str(e))
sys.exit(1)
return cluster, plugin_config
except Exception, e:
LOG.info('No new style connections defined: %s' % e)
# Old style controller specification.
args = [config.get('NVP', k) for k in CONFIG_KEYS]
try:
cluster.add_controller(*args)
except Exception, e:
LOG.fatal('Invalid connection parameters.')
sys.exit(1)
return cluster, plugin_config | 31,561 |
def example(tracker,img_path,dest,scr,disp,log,kb_obj,kb_chc):
""" Describe Demonstration Example """
txt_task = ("Scientist at work in a laboratory.")
img_name = "scientist.jpg"
exp1(tracker,dest,img_path,img_name,txt_task,scr,disp,log,kb_obj,kb_chc)
scr.clear()
scr.draw_text(text= "Great work! Here's another example sentence. Press spacebar to continue.", fontsize=20)
disp.fill(scr)
_ =disp.show()
response, t_1 = kb_obj.get_key()
beep(1)
txt_task = ("Man playing with a dog.")
img_name = "man_dog.jpg"
exp1(tracker,dest,img_path,img_name,txt_task,scr,disp,log,kb_obj,kb_chc)
scr.clear()
scr.draw_text(text= "Great work! This time, without the written instructions. Feel free to ask any questions. Press spacebar to continue.", fontsize=20)
disp.fill(scr)
_ =disp.show()
response, t_1 = kb_obj.get_key()
txt_task = ("The phone says 'pass me the pizza'")
img_name = "phone.jpg"
task(tracker,dest,img_path,img_name,txt_task,scr,disp,log,kb_obj,kb_chc)
scr.clear()
beep(1)
scr.draw_text(text= "Great work! Press spacebar to continue.", fontsize=20)
disp.fill(scr)
_ =disp.show()
response, t_1 = kb_obj.get_key()
txt_task = ("Two young women are playing a game of cards.")
img_name = "music1.jpg"
task(tracker,dest,img_path,img_name,txt_task,scr,disp,log,kb_obj,kb_chc)
scr.clear()
beep(1)
scr.draw_text(text= "Great work! Press spacebar to continue.", fontsize=20)
disp.fill(scr)
_ =disp.show()
response, t_1 = kb_obj.get_key()
txt_task = ("The president would arrive here within two hours.")
img_name = "control.jpg"
task(tracker,dest,img_path,img_name,txt_task,scr,disp,log,kb_obj,kb_chc) | 31,562 |
def test_private_median(example_private_table: PrivateTable):
"""check private median implementation using Age in adult dataset."""
noisy_median = example_private_table.median('Age', PrivacyBudget(10000.))
check_absolute_error(noisy_median, 37., 1.)
del noisy_median | 31,563 |
def _generate_output_data_files_threaded(
short_topic,
output_template,
plaintext_key,
output_folder,
encryption_json_text_output,
output_iv_full,
job_id,
):
"""Generates required historic data files from the files in the given folder using multiple threads.
Keyword arguments:
short_topic -- the short topic
output_template -- the name and location for the output template json file
plaintext_key -- the plaintext data key for encrypting the data file
output_folder -- the folder to store the generated output files in
encryption_json_text_output -- the encryption text
output_iv_full -- the iv used to encrypt
job_id -- job id for the messages
"""
global keys
local_keys = keys
output_base_content = file_helper.get_contents_of_file(output_template, False)
with ThreadPoolExecutor() as executor_output:
future_results_output = []
for key_number in range(0, len(local_keys)):
future_results_output.append(
executor_output.submit(
generate_output_file,
output_base_content,
output_folder,
local_keys[key_number],
encryption_json_text_output,
short_topic,
plaintext_key,
output_iv_full,
job_id,
key_number + 1,
)
)
wait(future_results_output)
for future in future_results_output:
try:
yield future.result()
except Exception as ex:
raise AssertionError(ex) | 31,564 |
def deletable_proxy_user(request, onefs_client):
"""Get the name of an existing proxy user that it is ok to delete."""
return _deletable_proxy_user(request, onefs_client) | 31,565 |
def get_from_module(identifier, module_params, module_name,
instantiate=False, kwargs=None):
"""The function is stolen from keras.utils.generic_utils.
"""
if isinstance(identifier, six.string_types):
res = module_params.get(identifier)
if not res:
raise Exception('Invalid ' + str(module_name) + ': ' +
str(identifier))
if instantiate and not kwargs:
return res()
elif instantiate and kwargs:
return res(**kwargs)
else:
return res
elif type(identifier) is dict:
name = identifier.pop('name')
res = module_params.get(name)
if res:
return res(**identifier)
else:
raise Exception('Invalid ' + str(module_name) + ': ' +
str(identifier))
return identifier | 31,566 |
def test_mnist_model_register_and_scale_using_non_existent_handler():
""" Bug - Following code block will result in "Buggy" behaviour. If a non-existent handler is used,
then ideally we should not be able to scale up workers anytime, but currently Torchserve scales up
background workers. Uncomment it after the Bug is fixed
"""
# mnist_model_register_and_scale_using_non_existent_handler_synchronous()
# mnist_model_register_and_scale_using_non_existent_handler_asynchronous() | 31,567 |
def color_lerp(c1, c2, a):
"""Return the linear interpolation between two colors.
``a`` is the interpolation value, with 0 returing ``c1``,
1 returning ``c2``, and 0.5 returing a color halfway between both.
Args:
c1 (Union[Tuple[int, int, int], Sequence[int]]):
The first color. At a=0.
c2 (Union[Tuple[int, int, int], Sequence[int]]):
The second color. At a=1.
a (float): The interpolation value,
Returns:
Color: The interpolated Color.
"""
return Color._new_from_cdata(lib.TCOD_color_lerp(c1, c2, a)) | 31,568 |
def get_equations(points):
""" Calculate affine equations of inputted points
Input : 1
points : list of list
ex : [[[x1, y1], [x2, y2]], [[xx1, yy1], [xx2, yy2]]] for 2 identified
elements
Contains coordinates of separation lines i.e.
[[[start points x, y], [end points x, y]] [...], [...]]
Output : 2
columns_a : list of list
Contains all the a coefficients of an affine equation (y = ax + b)
of all the calculated lines, in the same order as the input
columns_b : list of list
Contains all the b coefficients of an affine equation (y = ax + b)
of the all the calculated lines, in the same order as the input"""
columns_a, columns_b = [], []
# iterate throught points
for k in points:
# calculate the a coefficients of start and end separation lines of this element
a1 = (k[0][1] - k[1][1])/(k[0][0] - k[1][0])
a2 = (k[2][1] - k[3][1])/(k[2][0] - k[3][0])
columns_a.append([a1, a2])
# then calculate the b coefficients of start and end separation lines
# using the a coeff calculated before
b1 = k[0][1] - a1*k[0][0]
b2 = k[2][1] - a2*k[2][0]
columns_b.append([b1, b2])
return (columns_a, columns_b) | 31,569 |
def Temple_Loc(player, num):
"""temple location function"""
player.coins -= num
player.score += num
player.donation += num
# player = temple_bonus_check(player) for acheivements
return (player) | 31,570 |
def save_crowdin(data):
"""
Save crowdin `data`.
"""
fpath = os.path.join(REPO_ROOT, CROWDIN_FILE)
with open(fpath, "w") as fh:
fh.write(yaml.safe_dump(data)) | 31,571 |
def indexGenomeFile(input, output):
"""Index STAR genome index file
`input`: Input probes fasta file
`output`: SAindex file to check the completion of STAR genome index
"""
#print input
#print output
base = splitext(input)[0]
base = base + ".gtf"
#print base
gtfFile = base
outputDir = proDir + "/result/Genome"
print colored("Stage 4: Creating genome index file from the probe fasta file ....", "green")
print input
#print cpuNum
result = tasks.index_db_file(input, outputDir, cpuNum, gtfFile)
return result | 31,572 |
def assert_allclose(
actual: numpy.ndarray,
desired: Tuple[float, float, float],
rtol: numpy.float64,
atol: numpy.float64,
err_msg: Literal["driver: None"],
):
"""
usage.scipy: 2
"""
... | 31,573 |
def test_list_id_max_length_1_nistxml_sv_iv_list_id_max_length_2_1(mode, save_output, output_format):
"""
Type list/ID is restricted by facet maxLength with value 6.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-maxLength-2.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-maxLength-2-1.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 31,574 |
def performance(origin_labels, predict_labels, deci_value, bi_or_multi=False, res=False):
"""evaluations used to evaluate the performance of the model.
:param deci_value: decision values used for ROC and AUC.
:param bi_or_multi: binary or multiple classification
:param origin_labels: true values of the data set.
:param predict_labels: predicted values of the data set.
:param res: residue or not.
"""
if len(origin_labels) != len(predict_labels):
raise ValueError("The number of the original labels must equal to that of the predicted labels.")
if bi_or_multi is False:
tp = 0.0
tn = 0.0
fp = 0.0
fn = 0.0
for i in range(len(origin_labels)):
if res is True:
if origin_labels[i] == 1 and predict_labels[i] == 1:
tp += 1.0
elif origin_labels[i] == 1 and predict_labels[i] == 0:
fn += 1.0
elif origin_labels[i] == 0 and predict_labels[i] == 1:
fp += 1.0
elif origin_labels[i] == 0 and predict_labels[i] == 0:
tn += 1.0
else:
if origin_labels[i] == 1 and predict_labels[i] == 1:
tp += 1.0
elif origin_labels[i] == 1 and predict_labels[i] == -1:
fn += 1.0
elif origin_labels[i] == -1 and predict_labels[i] == 1:
fp += 1.0
elif origin_labels[i] == -1 and predict_labels[i] == -1:
tn += 1.0
try:
sn = tp / (tp + fn)
r = sn
except ZeroDivisionError:
sn, r = 0.0, 0.0
try:
sp = tn / (fp + tn)
except ZeroDivisionError:
sp = 0.0
try:
acc = (tp + tn) / (tp + tn + fp + fn)
except ZeroDivisionError:
acc = 0.0
try:
mcc = (tp * tn - fp * fn) / math.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
except ZeroDivisionError:
mcc = 0.0
try:
auc = roc_auc_score(origin_labels, deci_value)
except ValueError: # modify in 2020/9/13
auc = 0.0
try:
p = tp / (tp + fp)
except ZeroDivisionError:
p = 0.0
try:
f1 = 2 * p * r / (p + r)
except ZeroDivisionError:
f1 = 0.0
balance_acc = (sn + sp) / 2
return acc, mcc, auc, balance_acc, sn, sp, p, r, f1
else:
correct_labels = 0.0
for elem in zip(origin_labels, predict_labels):
if elem[0] == elem[1]:
correct_labels += 1.0
acc = correct_labels / len(origin_labels)
return acc | 31,575 |
def group_emoves(emoves, props):
"""Elementary moves with the same scale ratio can be combined. All emoves are from the same group, so they
share the same transformation distance. We can combine all emoves that also share the same scale ratio."""
# group the emoves by ratio
emoves.sort(key=lambda x: x.n_sources/x.n_targets)
emovemap = {ratio: list(emove) for ratio, emove in groupby(emoves, key=lambda x: x.n_sources/x.n_targets)}
ratios = list(emovemap.keys())
# consider non-scales first -> ratio of 1 first
if props.low_scale_first:
ratios.sort(key=lambda x: abs(log2(x)))
for ratio in ratios:
if ratio < 1:
continue
ratio_emoves = emovemap[ratio]
# we generate the a list of possible allocations to the emoves, that work to form a possible pair
line_gen = _group_emoves_lines(ratio_emoves, ratio)
exhaust_gen = _group_emoves_exhaust(ratio_emoves, 0, set(), props)
if props.exhaustive and props.line:
gen = chain(line_gen, exhaust_gen)
elif props.exhaustive:
gen = exhaust_gen
else:
gen = line_gen
# generate the actual pair out of the emove allocation
for emove_alloc in gen:
if emove_alloc is None:
continue
up, down = set(), set()
for (emove, sources, targets) in emove_alloc:
(lx, ly, lneg), (hx, hy, hneg) = emove.move
up |= {A(nr, lx, ly, lneg) for nr in sources}
down |= {A(nr, hx, hy, hneg) for nr in targets}
yield (up, down) | 31,576 |
def rotkehlchen_instance(
uninitialized_rotkehlchen,
database,
blockchain,
accountant,
start_with_logged_in_user,
start_with_valid_premium,
function_scope_messages_aggregator,
db_password,
rotki_premium_credentials,
accounting_data_dir,
username,
etherscan,
):
"""A partially mocked rotkehlchen instance"""
initialize_mock_rotkehlchen_instance(
rotki=uninitialized_rotkehlchen,
start_with_logged_in_user=start_with_logged_in_user,
start_with_valid_premium=start_with_valid_premium,
msg_aggregator=function_scope_messages_aggregator,
accountant=accountant,
blockchain=blockchain,
db_password=db_password,
rotki_premium_credentials=rotki_premium_credentials,
data_dir=accounting_data_dir,
database=database,
username=username,
etherscan=etherscan,
)
return uninitialized_rotkehlchen | 31,577 |
def enable_dashboard(cls, config):
"""Method to enable the dashboard module.
if user bootstrap with skip-dashboard option
then enabling the dashboard module.
Args:
cls (CephAdmin object) : cephadm instance object.
config (Dict): Key/value pairs passed from the test suite.
Example::
args:
username: admin123
password: admin@123
"""
user = config.get("username")
pwd = config.get("password")
# To create password text file
temp_file = tempfile.NamedTemporaryFile(suffix=".txt")
passwd_file = cls.installer.node.remote_file(
sudo=True, file_name=temp_file.name, file_mode="w"
)
passwd_file.write(pwd)
passwd_file.flush()
# To enable dashboard module
DASHBOARD_ENABLE_COMMANDS = [
"ceph mgr module enable dashboard",
"ceph dashboard create-self-signed-cert",
]
for cmd in DASHBOARD_ENABLE_COMMANDS:
out, err = cls.shell(args=[cmd])
LOG.info("STDOUT:\n %s" % out)
LOG.error("STDERR:\n %s" % err)
# command to create username and password to access dashboard as administrator
cmd = [
"ceph",
"dashboard",
"ac-user-create",
user,
"-i",
temp_file.name,
"administrator",
]
out, err = cls.shell(
args=cmd,
base_cmd_args={"mount": "/tmp:/tmp"},
)
LOG.info("STDOUT:\n %s" % out)
LOG.error("STDERR:\n %s" % err)
validate_enable_dashboard(cls, user, pwd) | 31,578 |
def store_inspection_outputs_df(backend, annotation_iterators, code_reference, return_value, operator_context):
"""
Stores the inspection annotations for the rows in the dataframe and the
inspection annotations for the DAG operators in a map
"""
dag_node_identifier = DagNodeIdentifier(operator_context.operator, code_reference,
backend.code_reference_to_description.get(code_reference))
annotations_df = build_annotation_df_from_iters(backend.inspections, annotation_iterators)
annotations_df['mlinspect_index'] = range(1, len(annotations_df) + 1)
inspection_outputs = {}
for inspection in backend.inspections:
inspection_outputs[inspection] = inspection.get_operator_annotation_after_visit()
backend.dag_node_identifier_to_inspection_output[dag_node_identifier] = inspection_outputs
return_value = MlinspectDataFrame(return_value)
return_value.annotations = annotations_df
return_value.backend = backend
if "mlinspect_index" in return_value.columns:
return_value = return_value.drop("mlinspect_index", axis=1)
elif "mlinspect_index_x" in return_value.columns:
return_value = return_value.drop(["mlinspect_index_x", "mlinspect_index_y"], axis=1)
assert "mlinspect_index" not in return_value.columns
assert isinstance(return_value, MlinspectDataFrame)
return return_value | 31,579 |
def picard_bedtointervallist(bed, refdict, out_path, no_header_out_path):
"""Starts a Picard BedToIntervalList process that writes to out_path"""
cmd = f'{GATK_PATH} BedToIntervalList -SD "{refdict}" --INPUT "{bed}" --OUTPUT "{out_path}"'
run_ext_process(cmd, shell=True)
# Some GATK-Picard modules do not currently report an accurate exit
# status this is indended to catch errors with this GATK command
if not os.path.exists(out_path):
msg = f'Expected output ({out_path}) not found for command: {cmd}'
raise ChildProcessError(msg)
cmd = f'grep -v "@" "{out_path}" > {no_header_out_path}'
run_ext_process(cmd, shell=True)
if not os.path.exists(no_header_out_path):
msg = f'Expected output ({no_header_out_path}) not found for command: {cmd}'
raise ChildProcessError(msg) | 31,580 |
def breadth_first_search(g, s):
"""
Breadth First Search.
Based on CLR book. For each node, it calculates a distance (from
starting vertex s), and the parent node.
@param g: input graph, assume all unvisited
@type g: Graph
@param s: starting vertex name
"""
# g.reset()
node_s = g[s] # get Node from name
node_s.color = 1 # set to GRAY
node_s.distance = 0
node_s.parent = None
queue = deque() # create queue and add S to queue
queue.append(node_s)
while queue:
node_v = queue.popleft() # remove first element of queue
for node_w in node_v.child: # for each edge (v, w)
if not node_w.color: # if color is WHITE
node_w.color = 1 # set color to GRAY
node_w.distance = node_v.distance + 1
node_w.parent = node_v
queue.append(node_w) # add to queue at the end
node_v.color = 2 | 31,581 |
def draw_overlay(image, overlay, alpha):
"""Draws an overlay over an image at a specified alpha.
:param image: The base image.
:param overlay: The overlay image.
:param alpha: The opacity, from 0 to 1.
:type image: ndarray
:type overlay: ndarray
:type alpha: float
"""
cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0, image) | 31,582 |
def switched (decorator):
"""decorator transform for switched decorations.
adds start_fun and stop_fun methods to class to control fun"""
@simple_decorator
def new_decorator (fun):
event = new_event()
def inner_fun (self, *args):
if args:
event.wait()
if threads_alive():
return fun(self, *args)
def new_fun (self, *args):
setattr(self, 'start_%s' % fun.__name__, event.set)
setattr(self, 'stop_%s' % fun.__name__, event.clear)
decorator(inner_fun)(self, *args)
return new_fun
return new_decorator | 31,583 |
def logistic_embedding0(k=1, dataset='epinions'):
"""using random embedding to train logistic
Keyword Arguments:
k {int} -- [folder] (default: {1})
dataset {str} -- [dataset] (default: {'epinions'})
Returns:
[type] -- [pos_ratio, accuracy, f1_score0, f1_score1, f1_score2, auc_score]
"""
print('random embeddings')
embeddings = np.random.rand(DATASET_NUM_DIC[dataset], EMBEDDING_SIZE)
pos_ratio, accuracy, f1_score0, f1_score1, f1_score2, auc_score = common_logistic(dataset, k, embeddings, 'random')
return pos_ratio, accuracy, f1_score0, f1_score1, f1_score2, auc_score | 31,584 |
def process_plus_glosses(word):
"""
Find all glosses with a plus inside. They correspond
to one-phoneme affix sequences that are expressed by
the same letter due to orthographic requirements.
Replace the glosses and the morphemes.
"""
return rxPartsGloss.sub(process_plus_glosses_ana, word) | 31,585 |
def replication():
"""
1) Connect to the PostgreSQL database server
2) Create and start logical replication with Postgres Server.
3) Call the Pub/sub function to push to Pub/Sub
"""
conn = None
try:
# read connection parameters
dsn = config()
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(dsn,
connection_factory=psycopg2.extras.LogicalReplicationConnection)
print('Connection created')
# create a cursor
cur = conn.cursor()
# replication slot name
replication_slot = 'postgres_replication'
try:
# Start replication slot if available
cur.start_replication(slot_name=replication_slot, decode=True)
except psycopg2.ProgrammingError:
# Create replication slot only the first time, with wal2json decoding.
cur.create_replication_slot(replication_slot, output_plugin='wal2json')
cur.start_replication(slot_name=replication_slot, decode=True)
print('calling streaming')
democonsumer = DemoConsumer()
print("Starting streaming, press Control-C to end...", file=sys.stderr)
try:
#consuming the stream msgs.
cur.consume_stream(democonsumer)
except KeyboardInterrupt:
cur.close()
conn.close()
print("The slot " + replication_slot + "still exists. Drop it by running following command on Postgres: "
"SELECT pg_drop_replication_slot('" + replication_slot + "'); if no longer needed.", file=sys.stderr)
print("WARNING: Transaction logs will accumulate in pg_xlog "
"until the slot is dropped.", file=sys.stderr)
# close the communication with the PostgreSQL
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.') | 31,586 |
def check_in_the_past(value: datetime) -> datetime:
"""
Validate that a timestamp is in the past.
"""
assert value.tzinfo == timezone.utc, "date must be an explicit UTC timestamp"
assert value < datetime.now(timezone.utc), "date must be in the past"
return value | 31,587 |
def fixture(filename):
"""
Get the handle / path to the test data folder.
"""
return os.path.join(fixtures_dir, filename) | 31,588 |
def character_count_helper(results: Dict) -> int:
"""
Helper Function that computes
character count for ocr results on a single image
Parameters
----------
results: Dict
(OCR results from a clapperboard instance)
Returns
-------
Int
Number of words computed from
OCR results
"""
count = 0
for element in results:
words_list = element["text"].split(" ")
for word in words_list:
count += len(word)
return count | 31,589 |
def from_pickle(input_path):
"""Read from pickle file."""
with open(input_path, 'rb') as f:
unpickler = pickle.Unpickler(f)
return unpickler.load() | 31,590 |
def PromptForRegion(available_regions=constants.SUPPORTED_REGION):
"""Prompt for region from list of available regions.
This method is referenced by the declaritive iam commands as a fallthrough
for getting the region.
Args:
available_regions: list of the available regions to choose from
Returns:
The region specified by the user, str
"""
if console_io.CanPrompt():
all_regions = list(available_regions)
idx = console_io.PromptChoice(
all_regions, message='Please specify a region:\n', cancel_option=True)
region = all_regions[idx]
log.status.Print('To make this the default region, run '
'`gcloud config set ai/region {}`.\n'.format(region))
return region | 31,591 |
def update(x, new_x):
"""Update the value of `x` to `new_x`.
# Arguments
x: A `Variable`.
new_x: A tensor of same shape as `x`.
# Returns
The variable `x` updated.
"""
return tf.assign(x, new_x) | 31,592 |
def r1r2_to_bp(r1,r2,pl=0.01, pu=0.25):
"""
Convert uniform samling of r1 and r2 to impact parameter b and and radius ratio p
following Espinoza 2018, https://iopscience.iop.org/article/10.3847/2515-5172/aaef38/meta
Paramters:
-----------
r1, r2: float;
uniform parameters in from u(0,1)
pl, pu: float;
lower and upper limits of the radius ratio
Return:
-------
b, p: tuple;
impact parameter and radius ratio
"""
assert np.all(0<r1) and np.all(r1<=1) and np.all(0<r2) and np.all(r2<=1), f"r1 and r2 needs to be u(0,1) but r1={r1}, r2={r2}"
Ar = (pu-pl)/(2+pu+pl)
if np.all(r1 > Ar):
b = (1+pl) * (1 + (r1-1)/(1-Ar) )
p = (1-r2)*pl + r2*pu
elif np.all(r1 <= Ar):
q1 = r1/Ar
b = (1+pl) + q1**0.5 * r2*(pu-pl)
p = pu + (pl-pu)* q1**0.5*(1-r2)
return b, p | 31,593 |
def is_utc_today(utc):
"""
Returns true if the UTC is today
:param utc:
:return:
"""
current_time = datetime.datetime.utcnow()
day_start = current_time - datetime.timedelta(hours=current_time.hour, minutes=current_time.minute,
seconds=current_time.second)
day_start_utc = unix_time(day_start)
return (utc - day_start_utc) >= 0 | 31,594 |
def test_run_completed(mock_job, mock_queue, mock_driver):
"""Test run function for a successful run."""
# Setup
def mock_render(*args, **kwargs):
return
class MockStorage:
def __init__(self):
pass
def load(self, *args, **kwargs):
return 'blah'
def save(self, *args, **kwargs):
return True
# Execute
render.run(
sleep=5,
job_queue=mock_queue,
job=mock_job,
render=mock_render,
storage=MockStorage(),
driver=mock_driver,
)
# Verify
assert mock_job.status is StatusEnum.complete | 31,595 |
async def get_telegram_id(phone_number, user_mode=False):
"""
Tries to get a telegram ID for the passed in phone number.
"""
async with start_bot_client() as bot:
if user_mode:
# just leaving this code here in case it proves useful.
# It only works if you use a user, not a bot.
# more details: https://stackoverflow.com/a/51196276/8207
# https://tl.telethon.dev/methods/contacts/import_contacts.html#examples
contact = InputPhoneContact(client_id=0, phone=phone_number, first_name="a", last_name="")
result = await bot(ImportContactsRequest([contact]))
print(result)
else:
# this only works if you have already messaged the contact, so only will allow looking
# up "known" users.
# more details: https://stackoverflow.com/a/41696457/8207
room_id = settings.MPACT_CONTACT_LOOKUP_ROOM_ID or GroupChat.objects.all()[0].id
print('room id', room_id)
receiver = await bot.get_entity(PeerChat(room_id))
msg_inst = await bot.send_file(
receiver,
InputMediaContact(
phone_number=phone_number,
first_name='Jane',
last_name='Doe',
vcard='',
))
# "unknown" users return "0" instead of the actual ID
return msg_inst.media.user_id if msg_inst.media.user_id != 0 else None | 31,596 |
def QuadRemesh(thisMesh, parameters, multiple=False):
"""
Quad remesh this mesh.
"""
url = "rhino/geometry/mesh/quadremesh-mesh_quadremeshparameters"
if multiple: url += "?multiple=true"
args = [thisMesh, parameters]
if multiple: args = list(zip(thisMesh, parameters))
response = Util.ComputeFetch(url, args)
response = Util.DecodeToCommonObject(response)
return response | 31,597 |
def part_2_helper():
"""PART TWO
This simply runs the script multiple times and multiplies the results together
"""
slope_1 = sled_down_hill(1, 1)
slope_2 = sled_down_hill(1, 3)
slope_3 = sled_down_hill(1, 5)
slope_4 = sled_down_hill(1, 7)
slope_5 = sled_down_hill(2, 1)
return slope_1 * slope_2 * slope_3 * slope_4 * slope_5 | 31,598 |
def test_shuffle_each_shard():
"""Test that shuffle_each_shard works."""
n_samples = 100
n_tasks = 10
n_features = 10
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.random.randint(2, size=(n_samples, n_tasks))
ids = np.arange(n_samples)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
dataset.reshard(shard_size=10)
dataset.shuffle_each_shard()
X_s, y_s, w_s, ids_s = (dataset.X, dataset.y, dataset.w, dataset.ids)
assert X_s.shape == X.shape
assert y_s.shape == y.shape
assert ids_s.shape == ids.shape
assert w_s.shape == w.shape
assert not (ids_s == ids).all()
# The ids should now store the performed permutation. Check that the
# original dataset is recoverable.
for i in range(n_samples):
np.testing.assert_array_equal(X_s[i], X[ids_s[i]])
np.testing.assert_array_equal(y_s[i], y[ids_s[i]])
np.testing.assert_array_equal(w_s[i], w[ids_s[i]])
np.testing.assert_array_equal(ids_s[i], ids[ids_s[i]]) | 31,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.