content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import json
def load_config(folder):
"""
Loads the configuration file for the Canvas API and returns it.
Params:
folder (String) : The folder containing the config.
Returns:
json : The config file as a JSON object.
"""
try:
with open(f'{folder}/config.json', 'r') as f:
return json.load(f)
except FileNotFoundError:
raise FileNotFoundError("Make sure to run setup.py before running the scraper.") | b5629bbb2426333c13cf72dad7f088e1b571455c | 48,055 |
def find_max_consecutive_ones(given_string):
"""
:param given_string:
:return:
"""
counter = 1
max_consecutive_repetition = 0
for i in range(len(given_string)):
if i+1 < len(given_string):
if given_string[i] == "1" and given_string[i + 1] == "1":
counter += 1
elif given_string[i] == "1" and given_string[i + 1] == "0":
if counter > max_consecutive_repetition:
max_consecutive_repetition = counter
counter = 1
elif given_string[i] == "0" and given_string[i + 1] == "1":
counter = 1
elif given_string[i] == "0" and given_string[i + 1] == "0":
counter = 0
else:
if counter > max_consecutive_repetition:
max_consecutive_repetition = counter
return max_consecutive_repetition | c7f432433be9b2d10a61bd0eac5593a184e187ad | 48,056 |
def is_file_list(coll):
"""
Checks whether a collection is a list of files.
:param coll (list): collection to check.
:return: True if collection is a list of files, else returns False.
"""
# check if collection is a list of files
if not isinstance(coll, list):
raise Exception(f"Expected collection as a list, have received {type(coll)}")
if coll[0].startswith("/") or coll[0].startswith("http"):
return True
return False | 1ac4a12bf1654d6b787480b9072c3edc4bdb2604 | 48,057 |
def read_input(in_file, header=True):
"""Read and return all GO terms that are found in an open file.
Parameters
----------
in_file : an open file object
header : bool, optional
If the file contains a header that should be stripped
Returns
-------
A list with all GO-term id's that are present in the given file.
"""
if header:
next(in_file)
return [line.rstrip() for line in in_file] | c42e93b99937281cfd3f1dd776fa0c507733ba22 | 48,058 |
def split_digits(number, digits_after_dp=1):
"""Return digits before and after a decimal point"""
number = round(float(number), digits_after_dp)
return str(number).split('.') | 549d349c11d7f6974c3ec363579f273cc23bc630 | 48,061 |
import argparse
def get_input_args():
"""
This function returns these arguments as an ArgumentParser object.
Parameters:
None - simply using argparse module to create & store command line arguments
Returns:
parse_args() -data structure that stores the command line arguments object
"""
# Create Parse using ArgumentParser
parser = argparse.ArgumentParser()
# Create command line arguments as mentioned above using add_argument() from ArguementParser method
parser.add_argument('test_image_path', type = str, default = '/flowers/test', help = 'test_image_path')
parser.add_argument('checkpoint', type = str, default = 'checkpoint_flower.pth', help = 'model_save')
parser.add_argument('--top_k', type = int, default = 3, help = 'top probability')
parser.add_argument('--category_names', type = str, default = 'cat_to_name.json', help = 'file of cat_to_name')
parser.add_argument('--gpu_usage', type = str, default = 'gpu', help = 'using gpu')
# Replace None with parser.parse_args() parsed argument collection that
# you created with this function
in_args = parser.parse_args()
return in_args | ff04c375b8609095d1d806aad1d178ad13582977 | 48,063 |
def format_references(section):
"""Format the "References" section."""
def format_item(item):
return ' - **[{0}]** {1}'.format(item[0], item[1].strip())
return '!!! attention "References"\n{0}'.format('\n'.join(
map(format_item, section))) | 586bc412ca142b2dbd583ab2e2e08bf49db83588 | 48,065 |
def convert_string(value, value_type):
"""
Converts a string to a value according to a given type.
:param value: the value to convert
:rtype: str
:param value_type: the destination between 'integer', 'real' and 'string'
:type value_type: str
:return: the value converted
:rtype: object
"""
if value_type == 'integer':
return int(value)
elif value_type == 'real':
return float(value)
elif value_type == 'text':
return value | 9dc1b9b11dc0c9947c5a4b9f117cbcf104a4e984 | 48,070 |
def ff(items, targets):
"""First-Fit
This is perhaps the simplest packing heuristic;
it simply packs items in the next available bin.
Complexity O(n^2)
"""
bins = [(target, []) for target in targets]
skip = []
for item in items:
for target, content in bins:
if item <= (target - sum(content)):
content.append(item)
break
else:
skip.append(item)
return bins, skip | 3649d9b7704f36871f320a236cff0115b75689f3 | 48,071 |
import argparse
def get_parser():
"""Parse input arguments"""
parser = argparse.ArgumentParser(description='Image processing prior to segmentation')
parser.add_argument(
'--path', dest='path', required=True,
help='Path to raw data of one field of view')
parser.add_argument(
'--image_directory_generated', dest='image_directory_generated', action='store_true')
return parser | 2e0fcbca5a2285b59315b320acec7d230a4582a7 | 48,072 |
def _get_backdrop_error_message(response):
"""
Backdrop should return an error as response with a JSON body like
{'status': 'error', 'message': 'Some error message'}
This attempts to extract the 'Some error message' string. If that fails,
return the raw JSON string.
"""
try:
return response.json()['message']
except Exception:
return response.content | bcb6cd58327807bd1ed8ae4671301f5678ad9370 | 48,073 |
import argparse
def parse():
"""Parse CLI arguments"""
parser = argparse.ArgumentParser(
description="Generate Hive script to load selected Avro data stores "+
"into Hive metastore.")
parser.add_argument('inputs', metavar='input', nargs='+',
help='Input data store of the form "table_name=HDFS_data_store_path", '+
'e.g. user=some/cluster/path')
args = parser.parse_args()
return args | 959ecca5dff7c79e3953a080e2662e06b3af3ba0 | 48,074 |
from typing import Optional
def get_time_limit(env, current_max_episode_length: Optional[int]) -> int:
"""
Get time limit from environment.
:param env: Environment from which we want to get the time limit.
:param current_max_episode_length: Current value for max_episode_length.
:return: max episode length
"""
# try to get the attribute from environment
if current_max_episode_length is None:
try:
current_max_episode_length = env.get_attr("spec")[0].max_episode_steps
# Raise the error because the attribute is present but is None
if current_max_episode_length is None:
raise AttributeError
# if not available check if a valid value was passed as an argument
except AttributeError:
raise ValueError(
"The max episode length could not be inferred.\n"
"You must specify a `max_episode_steps` when registering the environment,\n"
"use a `gym.wrappers.TimeLimit` wrapper "
"or pass `max_episode_length` to the model constructor"
)
return current_max_episode_length | 03e3c141079986eda534d152372e0454be01dad5 | 48,076 |
def _device_category_to_string(category_id):
"""
:param category_id: Category ID to convert to a string
:return: Category description
"""
if category_id < 50:
return 'Reserved'
if category_id < 1000:
return 'Temporary'
if category_id < 2000:
return 'Administrative Tools'
if category_id < 3000:
return 'Alarms'
if category_id < 4000:
return 'Analytics'
if category_id < 5000:
return 'Appliances'
if category_id < 6000:
return 'Audio'
if category_id < 7000:
return 'Cameras'
if category_id < 8000:
return 'Climate Control'
if category_id < 9000:
return 'Displays'
if category_id < 10000:
return 'Environmental'
if category_id < 11000:
return 'Health'
if category_id < 12000:
return 'Lighting'
if category_id < 13000:
return 'Locks'
if category_id < 14000:
return 'Media'
if category_id < 15000:
return 'Meters'
if category_id < 16000:
return 'Perimeter Monitoring'
if category_id < 17000:
return 'Remote Controls'
if category_id < 18000:
return 'Robotics'
if category_id < 19000:
return 'Routers and Gateways'
if category_id < 20000:
return 'Security'
if category_id < 21000:
return 'Sensors'
if category_id < 22000:
return 'Shades'
if category_id < 23000:
return 'Social'
if category_id < 24000:
return 'Switches'
if category_id < 25000:
return 'Toys'
if category_id < 26000:
return 'Transportation'
if category_id < 27000:
return 'Videos'
if category_id < 28000:
return 'Water' | 0c9dc1b753f3415173a34c425fe457671724efc9 | 48,078 |
import numpy
def onehot_to_categories(y):
"""
Transform categorical labels to one-hot vectors
:param y: list of one-hot vectors, ex. [[0, 0, 1], [1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1], ...]
:return: list of categories, ex. [0, 2, 1, 2, 0, ...]
"""
return numpy.asarray(y).argmax(axis=-1) | a7d8581f54f253560856526f8927a7a408060dea | 48,079 |
def config_valid():
"""Valid config dictionary."""
config = {
"database": {
"dir": "data/",
"file": "faddr-db.json",
},
"rancid": {
"dir": "tests/fixtures/rancid_dir/",
"mapping": {
"cisco-mf": "cisco_ios",
},
},
}
return config | e06a6ed1ec7b28977155df13542686a3c7d06fe3 | 48,081 |
def sign(x: float) -> float:
"""
Função sinal: retorna 1.0 se x é maior ou igual a zero, -1.0 caso contrário.
Args:
x (float): valor a ser comparado com zero.
Returns:
float: 1.0 ou -1.0, a depender do valor de x.
"""
if x >= 0.0:
return 1.0
else:
return -1.0 | 7f38d062344660f0d5acd2defe76e0f974f20ffa | 48,082 |
def _bbox_around_polycoords(coords):
"""
bounding box
"""
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)] | 08698adc3a54e233c5e70524670aa74c4da0535c | 48,083 |
def add_coord_attrs(xds):
"""Ensures that files are Georeferened for Panoply"""
xds["lat"].attrs = dict(
standard_name="latitude",
units="degrees_north",
axis="Y",
)
xds["lon"].attrs = dict(
standard_name="longitude",
units="degrees_east",
axis="X",
)
return xds | bb44e51c49490cb9c367f46ab85c91d4e1c6f2f8 | 48,085 |
def get_reconstruction_config():
"""Gets reconstruction config.
Returns:
Dictionary of reconstruction configs.
"""
reconstruction_dict = dict()
# Whether using multiple resolutions across a list of TF Records.
reconstruction_dict["use_multiple_resolution_records"] = True
# GCS locations to read reconstruction training data.
reconstruction_dict["train_file_patterns"] = [
"data/cifar10_car/train_{0}x{0}_*.tfrecord".format(4 * 2 ** i)
for i in range(4)
]
# GCS locations to read reconstruction evaluation data.
reconstruction_dict["eval_file_patterns"] = [
"data/cifar10_car/test_{0}x{0}_*.tfrecord".format(4 * 2 ** i)
for i in range(4)
]
# Which dataset to use for reconstruction training:
# "mnist", "cifar10", "cifar10_car", "tf_record"
reconstruction_dict["dataset"] = "tf_record"
# TF Record Example feature schema for reconstruction.
reconstruction_dict["tf_record_example_schema"] = [
{
"name": "image_raw",
"type": "FixedLen",
"shape": [],
"dtype": "str"
},
{
"name": "label",
"type": "FixedLen",
"shape": [],
"dtype": "int"
}
]
# Name of image feature within schema dictionary.
reconstruction_dict["image_feature_name"] = "image_raw"
# Encoding of image: raw, png, or jpeg.
reconstruction_dict["image_encoding"] = "raw"
# Height of predownscaled image if NOT using multiple resolution records.
reconstruction_dict["image_predownscaled_height"] = 32
# Width of predownscaled image if NOT using multiple resolution records.
reconstruction_dict["image_predownscaled_width"] = 32
# Depth of image, number of channels.
reconstruction_dict["image_depth"] = 3
# Name of label feature within schema dictionary.
reconstruction_dict["label_feature_name"] = "label"
# Schedule list of number of epochs to train for reconstruction.
reconstruction_dict["num_epochs_schedule"] = [1] * 9
# Number of examples in one epoch of reconstruction training set.
reconstruction_dict["train_dataset_length"] = 400
# Schedule list of number of examples in reconstruction training batch for each resolution block.
reconstruction_dict["train_batch_size_schedule"] = [4] * 9
# Schedule list of number of examples in reconstruction evaluation batch for each resolution block.
reconstruction_dict["eval_batch_size_schedule"] = [4] * 9
# Number of steps/batches to evaluate for reconstruction.
reconstruction_dict["eval_steps"] = 1
# List of number of examples until block added to networks.
reconstruction_dict["num_examples_until_growth_schedule"] = [
epochs * reconstruction_dict["train_dataset_length"]
for epochs in reconstruction_dict["num_epochs_schedule"]
]
# List of number of steps/batches until block added to networks.
reconstruction_dict["num_steps_until_growth_schedule"] = [
ex // bs
for ex, bs in zip(
reconstruction_dict["num_examples_until_growth_schedule"],
reconstruction_dict["train_batch_size_schedule"]
)
]
# Whether to autotune input function performance for reconstruction datasets.
reconstruction_dict["input_fn_autotune"] = True
# How many steps to train before writing steps and loss to log.
reconstruction_dict["log_step_count_steps"] = 10
# How many steps to train before saving a summary.
reconstruction_dict["save_summary_steps"] = 10
# Whether to write loss summaries for TensorBoard.
reconstruction_dict["write_loss_summaries"] = False
# Whether to write generator image summaries for TensorBoard.
reconstruction_dict["write_generator_image_summaries"] = False
# Whether to write encoder image summaries for TensorBoard.
reconstruction_dict["write_encoder_image_summaries"] = False
# Whether to write variable histogram summaries for TensorBoard.
reconstruction_dict["write_variable_histogram_summaries"] = False
# Whether to write gradient histogram summaries for TensorBoard.
reconstruction_dict["write_gradient_histogram_summaries"] = False
# How many steps to train reconstruction before saving a checkpoint.
reconstruction_dict["save_checkpoints_steps"] = 10000
# Max number of reconstruction checkpoints to keep.
reconstruction_dict["keep_checkpoint_max"] = 10
# Whether to save checkpoint every growth phase.
reconstruction_dict["checkpoint_every_growth_phase"] = True
# Whether to save checkpoint every epoch.
reconstruction_dict["checkpoint_every_epoch"] = True
# Checkpoint growth index to restore checkpoint.
reconstruction_dict["checkpoint_growth_idx"] = 0
# Checkpoint epoch index to restore checkpoint.
reconstruction_dict["checkpoint_epoch_idx"] = 0
# The checkpoint save path for saving and restoring.
reconstruction_dict["checkpoint_save_path"] = ""
# Whether to store loss logs.
reconstruction_dict["store_loss_logs"] = True
# Whether to normalize loss logs.
reconstruction_dict["normalized_loss_logs"] = True
# Whether to print model summaries.
reconstruction_dict["print_training_model_summaries"] = False
# Initial growth index to resume training midway.
reconstruction_dict["initial_growth_idx"] = 0
# Initial epoch index to resume training midway.
reconstruction_dict["initial_epoch_idx"] = 0
# Max number of times training loop can be restarted such as for NaN losses.
reconstruction_dict["max_training_loop_restarts"] = 10
# Whether to scale layer weights to equalize learning rate each forward pass.
reconstruction_dict["use_equalized_learning_rate"] = True
# Whether to normalize reconstruction losses by number of pixels.
reconstruction_dict["normalize_reconstruction_losses"] = True
return reconstruction_dict | 6cb142e0777d413d42807e892d9a530c0d256f90 | 48,086 |
def get_version(opts, app):
"""Get version of a specific app
Args:
opts (dict): Nephos options dict.
app (str): Helm application name.
Returns:
str: Desired version of Helm app, if specified. Defaults to None.
"""
if "versions" in opts and app in opts["versions"]:
return opts["versions"][app]
else:
return None | bf3517644904f26482dac56c598326c2bbcf5b44 | 48,087 |
import math
def extract_euler_angles(mat):
"""
This algorith is aken from
Extracting Euler Angles from a Rotation Matrix
Mike Day, Insomniac Games
mday@insomniacgames.com
https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2012/07/euler-angles1.pdf
The authors follow the notational conventions of Shoemake’s “Euler Angle Conversion”, Graphics Gems IV, pp.
222-9, with the exception that their vectors are row vectors instead of column vectors. Thus, all their
matrices are transposed relative to Shoemake’s, and a sequence of rotations will be written from left to
right.
"""
[[m00, m01, m02],
[m10, m11, m12],
[m20, m21, m22]] = mat
theta_x = math.atan2(m12, m22)
c2 = math.sqrt(m00**2+m01**2)
theta_y = math.atan2(-m02, c2)
s1 = math.sin(theta_x)
c1 = math.cos(theta_x)
theta_z = math.atan2(s1*m20-c1*m10, c1*m11-s1*m21)
"""
multiply minus one for each theta_*. this is equivalent to RE-consider vector is column instead of rows
i.e. back to common world that says vector is column.
"""
return -theta_x, -theta_y, -theta_z | a3850a0a5ae5ffd3ebf4fd68f43b4e93e573170d | 48,090 |
def tagged_struct_columns(typegraph, struct_id):
""" Return a dict mapping the struct column names to versions tagged with the id.
"""
struct_id = str(struct_id)
typedef = 'Struct' + struct_id
colheader = ('struct_id', *typegraph.fields[typedef])
return { header:"t{:s}_{:s}".format(struct_id, header) for header in colheader} | 022a33a32dfc9bbc964fd2294f1393a796597469 | 48,091 |
def get_winner(player_one, player_two):
"""Searches through each player's weapon's comparison dictionary to match
the other players weapon id to their weapon id, then prints the comparison.
Also updates player scores, clears player weapons and returns the winning
player.
"""
if player_one.weapon is None or player_two.weapon is None:
return None
if player_one.weapon == player_two.weapon:
return "tie"
winner = None
for comparison in player_one.weapon.compares:
if int(comparison["other_gesture_id"]) == player_two.weapon.id:
print(player_one.weapon.title, comparison["verb"][0],
player_two.weapon.title)
player_one.wins += 1
winner = player_one
for comparison in player_two.weapon.compares:
if int(comparison["other_gesture_id"]) == player_one.weapon.id:
print(player_two.weapon.title, comparison["verb"][0],
player_one.weapon.title)
player_two.wins += 1
winner = player_two
player_one.weapon = None
player_two.weapon = None
return winner | bcc7871215c2fd2adeac28a78c694d93d49f4264 | 48,092 |
def maybe_tuple(value):
"""Return `value` as a tuple. If it is already a tuple, return it
unchanged. Otherwise return a 1-element tuple containing `value`."""
if isinstance(value, tuple):
return value
return (value,) | 6279c9a96d5362b7cdaf4b022ac3f41a40a26e28 | 48,093 |
def config_identifier(converter, model_name):
"""Create identifier of configuration based on data `converter` and `model_name`"""
return model_name.lower().replace('-', '_') + '_' + converter | 50633c10a240823e4cb7e664d480cfe459fdf8a8 | 48,094 |
def get_node_placement(node_id, model_structure):
"""
get location of node based on node_id
"""
prev = 0
for n,i in enumerate(model_structure):
# if n == 0:
shape = i[0][-1]
shape = shape+prev
if node_id < shape:
return (n,abs(prev-(node_id % shape)))
# print(n)
prev=shape
return -1 | 20e6d5162b1435ec55188db2f7bdecdb307e9e47 | 48,095 |
def get_context(result):
"""
возвращает словарь значений, которые будут отправлены в шаблон
result: данные для отправки в документ
"""
return {
'dates': result
} | 45b86d17f056ff1a8f86815dbc08a65b4eaf34c0 | 48,096 |
import torch
def create_rotations_labels(batch_size, device):
"""Creates the rotation labels."""
labels_rot = torch.arange(4, device=device).view(4, 1)
labels_rot = labels_rot.repeat(1, batch_size).view(-1)
return labels_rot | bdde921eee3c235b076e473a7e202aee910afd90 | 48,097 |
def _get_id_field_from_input_field_name(input_field_name: str) -> str:
"""
Map plural input fields like children to the appropriate field child_ids in this
case.
"""
if input_field_name == "children":
return "child_ids"
return input_field_name.rstrip("s") + "_ids" | 7ce6368f6d9462718d0acf6b46099c8ae80e0e54 | 48,098 |
def test_function(x):
"""Test Function Docstring."""
return x + 1 | 977f6936f927c47ccc64a8923184fa19537b853c | 48,102 |
def are_points_with_errors_adjacent(points, errs):
"""Returns whether a given set of points are adjacent when taking their errors into account."""
for i in range(len(points) - 1):
point = points[i]
err_right = errs[0][i]
next_point = points[i + 1]
next_err_left = errs[1][i + 1]
right_edge = point + err_right
left_edge = next_point - next_err_left
if abs(left_edge - right_edge) > (err_right + next_err_left) / 100.0:
return False
return True | d434451d9751f39e3b835dcb7631b147b1126961 | 48,103 |
def valid_benefit_data(promotion):
""" Valid JSON data for creating a new Benefit object """
return {
'promotion': promotion.pk
} | 8e6ec6adb25930623ecc45bf04cadac14ad8087c | 48,106 |
from typing import Tuple
from typing import List
import os
def get_list_of_files_and_directories(directory: str) -> Tuple[List[str], List[str]]:
"""returns [[files],[directories]]"""
files = []
directories = []
for name in os.listdir(directory):
if os.path.isdir(os.path.join(directory, name)):
directories.append(name)
else:
files.append(name)
return files, directories | fd51c33f6313072d0387ddc9ca050c755848838d | 48,107 |
def presale_freeze_ends_at(web3) -> int:
"""How long presale funds stay frozen until refund."""
return web3.eth.getBlock('pending').timestamp + 120 | a66c2b02836e521be5dd9ca3f20032a65a0ac6e6 | 48,108 |
def convert_string_to_pymath(strin):
""" Convert a string to a 'math' format """
if strin.strip() is not '':
return '$' + strin.replace(' ', '\ ') + '$'
else:
return strin | 74f2d99e5bcb324c3284c0e06d4af343c9c320ac | 48,109 |
def _any_all_none(series, values, func_name):
""" Returns the index of rows from series containing any/all of the
given values as requested by func_name.
Parameters
----------
series : pandas.Series
The data to be queried
values : list-like
The values to be tested
func_name : string ('any' or 'all' ONLY)
The name of the logic being used
Returns
-------
index : pandas.index
The index of series for rows containing any/all of the given values.
"""
if series.dtype=='object':
# Get the dichotomous version of series
dummies = series.str.get_dummies(';')
# Slice the dummies column-wise for only the targeted values
values = [str(v) for v in values]
cols = [col for col in dummies.columns if col in values]
# If not valid columns are availabe, the result is no rows
if not cols:
return []
else:
dummies = dummies[cols]
# Slice the dummies row-wise for only rows with any/all/none of
# the targeted responses
if func_name=='any':
# Apply 'any' logic
dummies = dummies[(dummies.T!=0).any()]
elif func_name=='all':
# Apply 'all' logic
dummies = dummies[(dummies.T!=0).all()]
else:
# Apply 'none' logic
dummies = dummies[(dummies.T==0).all()]
# Return the index
return dummies.index
elif series.dtype in ['int64', 'float64']:
# Slice the series row-wise for only rows with any/all of the
# targets responses
if func_name=='any' or (func_name=='all' and len(values)==1):
series = series[series.isin(values)].dropna()
elif func_name=='none':
series = series[~series.isin(values)]
else:
# has_all() for multiple values is being requested on a
# single-type variable, so the result will be none
return []
# Return the index
return series.index
else:
raise TypeError(
"The dtype '%s' of series is incompatible with has_%s()" %
series.dtype,
func_name
) | 6d86241bd7657a39736f701a7637939854b8b3ca | 48,113 |
def conj(pary):
"""
Returns the conjugation of 2D PitchArray
same as PitchArray.conj(), but create a new copy
Parameter:
----------
pary: PitchArray
"""
return pary.conj(inplace = False) | 36f12094dba0c4e0cf28ffb8d27e2b35c87d85b2 | 48,114 |
def assignUniqueName(df, columnName="building_name", groupingCol=""):
"""
gets a column filled with names of building or rooms and assign comprehensible unqiue names to it,
if the column_name is not a valid name in the header, then it will choose the first column as the name column
the grouping_name is also a header value and should group the names.
Example:
code: column_name = "room_name", second_name = "building_name"
csv file:
room_name, building_name --> outputs (modified name)
dorm, DormA --> DormA_dorm_1
dorm, DormA --> DormA_dorm_2
dorm, DormB --> DormB_dorm
"""
# use the first entry as the "name" column if the name is not valid
headerVal, nameCount = list(df.columns.values), dict()
columnName = headerVal[0] if columnName not in headerVal else columnName
# check if we want to group the names
grouping = True if groupingCol != "" else False
for index, rowVal in df.iterrows():
itemName = str(rowVal[columnName])
groupName = str(rowVal[groupingCol]) + "_" if grouping else ""
newName = (groupName + itemName).replace(" ", "_")
# count the occurrnace of unique names
nameCount[newName] = nameCount.get(newName, 0) + 1
entry = newName
if nameCount[newName] > 1 and newName not in["transit_space_hallway", "transit_space_hub", "transit_space"]:
entry+=str(nameCount[entry])
df.loc[index, columnName] = entry
return df | 57cfb45bce70e66b08abb52a2ab2f3b19bf7e21b | 48,115 |
def to_iso639_part1(language_code):
"""
Convert codes like "en-us" to "en"
"""
return language_code.split('-', 1)[0] | 53192b1a7b0263cca5bb9c714fc01be4b4b3b1ae | 48,116 |
import random
def rand_int():
"""
052
Display a random integer between 1 and 100 inclusive.
"""
return random.randrange(1, 100) | af8f84eaa5caf492cbfb947358d6a8557e33b929 | 48,119 |
def get_descendants(cur, term_id):
"""Return a set of descendants for a given term ID."""
cur.execute(
f"""WITH RECURSIVE descendants(node) AS (
VALUES ('{term_id}')
UNION
SELECT stanza AS node
FROM statements
WHERE predicate IN ('rdfs:subClassOf', 'rdfs:subPropertyOf')
AND stanza = '{term_id}'
UNION
SELECT stanza AS node
FROM statements, descendants
WHERE descendants.node = statements.object
AND statements.predicate IN ('rdfs:subClassOf', 'rdfs:subPropertyOf')
)
SELECT * FROM descendants""",
)
return set([x[0] for x in cur.fetchall()]) | 70b10d43bb51e5ec8315d52dd627731ca7e40f46 | 48,121 |
def digit(n, d):
"""."""
count = 0
res = []
for num in range(n + 1):
k = num * num
for kay in str(k):
if kay == str(d):
res.append(str(kay))
count += 1
return count | 44472ae12ed234891eb49bf0545ab138e2bb6bca | 48,124 |
import types
def good_decorator_accepting_args(decorator):
"""This decorator makes decorators behave well wrt to decorated
functions names, doc, etc.
Differently from good_decorator, this accepts decorators possibly
receiving arguments and keyword arguments.
This decorato can be used indifferently with class methods and
functions."""
def new_decorator(*f, **k):
g = decorator(*f, **k)
if 1 == len(f) and isinstance(f[0], types.FunctionType):
g.__name__ = f[0].__name__
g.__doc__ = f[0].__doc__
g.__dict__.update(f[0].__dict__)
pass
return g
new_decorator.__name__ = decorator.__name__
new_decorator.__doc__ = decorator.__doc__
new_decorator.__dict__.update(decorator.__dict__)
# Required for Sphinx' automodule.
new_decorator.__module__ = decorator.__module__
return new_decorator | 898d8bdcb71d47d8a083841d57122e9e16436905 | 48,125 |
def is_step(cur, prev):
"""
Checks whether pairs cur (current) and prev (previous) are consecutive tracks.
Works if disc_num or track_num is a single letter
:param cur: [disc_num, track_num]
:param prev: [disc_num, track_num]
:return: bool. True if cur comes after prev, False otherwise
"""
c = cur[:]
c = [c[0] if len(c[0]) > 0 else '0', c[1] if len(c[1]) > 0 else '0']
c = [ord(c[0])-64 if not c[0].isdigit() else int(c[0]),
ord(c[1])-64 if not c[1].isdigit() else int(c[1])]
p = prev[:]
p = [p[0] if len(p[0]) > 0 else '0', p[1] if len(p[1]) > 0 else '0']
p = [ord(p[0])-64 if not p[0].isdigit() else int(p[0]),
ord(p[1])-64 if not p[1].isdigit() else int(p[1])]
if c[0]-p[0] == 0: # same disc, must be next track
return c[1]-p[1] == 1
elif c[0]-p[0] == 1: # next disc, must start new track
return c[1] < 2
else: # cannot be valid
return False | 4c5238fb21faa18c44fca2ea457da29f42dbfba3 | 48,126 |
import six
def check(pkt, pktType, keyMatches=None, **valMatches):
"""This function takes an object that was expected to come from a packet (after it has been JSONized)
and compares it against the arg requirements so you don't have to have 10 if() statements to look for keys in a dict, etc..
Args:
@pkt : object to look at
@pktType : object type expected (dict, list, etc..)
@keyMatches : a list of minimum keys found in parent level of dict, expected to be an array
@valMatches : a dict of key:value pairs expected to be found in the parent level of dict
the value can be data (like 5) OR a type (like this value must be a @list@).
Returns:
None if everything matches, otherwise it returns a string as to why it failed."""
# First check that the pkt type is equal to the input type
if(type(pkt) is not pktType):
return 'expected %s' % str(pktType)
if(keyMatches):
# Convert the keys to a set
keyMatches = set(keyMatches)
# The keyMatches is expected to be an array of the minimum keys we want to see in the pkt if the type is dict
if(type(pkt) is dict):
if(not keyMatches.issubset(pkt.keys())):
return 'missing, "%s"' % ', '.join(list(keyMatches - set(pkt.keys())))
else:
return None
# Finally for anything in the valMatches find those values
if(valMatches):
# Pull out the dict object from the "valMatches" key
if('valMatches' in valMatches.keys()):
matchObj = valMatches['valMatches']
else:
matchObj = valMatches
for k, v in six.iteritems(matchObj):
# Check for the key
if(k not in pkt.keys()):
return 'key missing "%s"' % k
# See how we should be comparing it:
if(type(v) is type):
if(type(pkt[k]) is not v):
return 'key "%s", bad value type, "%s", expected "%s"' % (k, type(pkt[k]), v)
else:
# If key exists check value
if(v != pkt[k]):
return 'key "%s", bad value data, "%s", expected "%s"' % (k, pkt[k], v)
return None | a2e9819cf25ed2d919da74e2ff3141537303cb0e | 48,127 |
def calc_kl_div(a,b):
"""
Calculates the KL-divergence with ref tensor a, comparing to a new tensor b.
source of formula: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
"""
return (a * (a / b).log()).sum() | 9c2c545a15d863561604304661d1071f5b88de90 | 48,128 |
def mask_results(result_array, value_if_fits, settings):
"""
Determines if each position in one array is walkable. If so, assigns the specified float
value to that position. This is useful when combining results later: get a set of scores for
each pose, then stack those score matrices.
:param result_array:
:param value_if_fits:
"""
result_array = result_array.copy()
result_array[result_array <= settings["max_collision_pct"]] = value_if_fits
result_array[result_array > settings["max_collision_pct"]] = settings["unwalkable_value"]
return result_array | 68b1a4312b09d3abd069f1230ccb0ce2c52d3764 | 48,130 |
import sys
def unittest_command(suite):
"""Get new command for unittest suite."""
return [
sys.executable,
"-m",
"unittest",
"discover",
"-v",
"-s",
suite,
"-p",
"*_test.py"
] | 8efe144e6875cb287d671214598edb8756c5e7a4 | 48,131 |
def _get_instance_list(mig, field='name', filter_list=None):
"""
Helper to grab field from instances response.
:param mig: Managed Instance Group Object from libcloud.
:type mig: :class: `GCEInstanceGroupManager`
:param field: Field name in list_managed_instances response. Defaults
to 'name'.
:type field: ``str``
:param filter_list: list of 'currentAction' strings to filter on. Only
items that match a currentAction in this list will
be returned. Default is "['NONE']".
:type filter_list: ``list`` of ``str``
:return: List of strings from list_managed_instances response.
:rtype: ``list``
"""
filter_list = ['NONE'] if filter_list is None else filter_list
return [x[field] for x in mig.list_managed_instances()
if x['currentAction'] in filter_list] | efcb7c948583e7433ff30030cd934903f9953632 | 48,133 |
def comp_nthoctave_axis(noct, freqmin, freqmax):
"""Computes the frequency vector between freqmin and freqmax for the 1/n octave
Parameters
----------
noct: int
kind of octave band (1/3, etc)
freqmin: float
minimum frequency
freqmax: float
maximum frequency
Returns
-------
Frequency vector
"""
if noct == 3:
table = [
10,
12.5,
16,
20,
25,
31.5,
40,
50,
63,
80,
100,
125,
160,
200,
250,
315,
400,
500,
630,
800,
1000,
1250,
1600,
2000,
2500,
3150,
4000,
5000,
6300,
8000,
10000,
12500,
16000,
20000,
]
f_oct = [f for f in table if (f >= freqmin and f <= freqmax)]
else:
f0 = 1000
f_oct = [f0]
i = 1
while f_oct[-1] <= freqmax:
f_oct.append(f0 * 2.0 ** (i / noct))
i = i + 1
f_oct = f_oct[:-2]
i = -1
while f_oct[0] > freqmin:
f_oct.insert(0, f0 * 2.0 ** (i / noct))
i = i - 1
f_oct = f_oct[1:]
return f_oct | ab7ef798021f8ee2872e5b2e0040860c80639647 | 48,134 |
import six
import socket
import ssl
import hashlib
def get_cert_fingerprint(url, log=None):
"""
Get the SHA-1 certificate fingerprint of a TLS server on the network.
"""
parsed = six.moves.urllib.parse.urlparse(url)
host, _, port = parsed.netloc.partition(':')
if port:
port = int(port)
else:
port = socket.getservbyname(parsed.scheme)
if log:
log.info('retrieving certificate from server %s, port %i', host, port)
cert = ssl.get_server_certificate((host, port),
ssl_version=ssl.PROTOCOL_TLSv1)
fprint = hashlib.sha1(ssl.PEM_cert_to_DER_cert(cert)).hexdigest()
if log:
log.info('found thumbprint %s', fprint)
return fprint | 1200a34166516df2ce9abd8518c39daba8fa3da5 | 48,135 |
def get_clauses(id_dict, db):
"""Get a list of clauses to be passed to a db query.
Note that an empty condition will be returned if id_dict has no ids in it
(either the dict is empty or all the lists within the dict are empty),
which will in general have the unexpected effect of selecting everything,
rather than nothing.
Parameters
----------
id_dict : dict {id_type: [int or str]}
A dictionary indexed by the type of id, containing lists of id's of
that the respective type. If all the lists are empty, or the dict is
empty, returns an empty condition. Note that id types of 'trid' and
'tcid' will be mapped to text ref ids and text content ids,
respectively.
db : indra.db.DatabaseManager instance
This instance is only used for forming the query, and will not be
accessed or queried.
Returns
-------
clause_list : list [sqlalchemy clauses]
A list of sqlalchemy clauses to be used in query in the form:
`db.filter_query(<table>, <other clauses>, *clause_list)`.
If the id_dict has no ids, an effectively empty condition is returned.
"""
# Handle all id types but text ref ids (trid) and text content ids (tcid).
id_condition_list = [getattr(db.TextRef, id_type).in_(id_list)
for id_type, id_list in id_dict.items()
if len(id_list) and id_type not in ['tcid', 'trid']]
# Handle the special id types trid and tcid.
for id_type, table in [('trid', db.TextRef), ('tcid', db.TextContent)]:
if id_type in id_dict.keys() and len(id_dict[id_type]):
int_id_list = [int(i) for i in id_dict[id_type]]
id_condition_list.append(table.id.in_(int_id_list))
return id_condition_list | 86effb5e1f035f3ba30bd7e305a2c71a933a3c33 | 48,136 |
def check_boolean(value):
"""Validate a boolean value.
If the value provided is not a python boolean, an error message is
returned.
Args:
value: The value to evaluate.
Returns:
A string error message if an error was found. ``None`` otherwise.
"""
if not isinstance(value, bool):
return "Value must be either True or False, not %s %s" % (
type(value), value) | ebfa441c3b9e84b0154acaa949c5608cea8e35c4 | 48,139 |
def sublists_index_avg(list, index):
""" (list of list of number) -> float
Returns the mean of all values of a specified index in sublists of list.
>>> sublists_index_avg([[1, 2, 3], [2, 3, 4]], 2)
3.5
"""
sum = 0
for item in list:
sum += item[index]
mean = sum / len(list)
return mean | f608f85f5bab04bbadca99966bbaa7770735c720 | 48,140 |
def add_asset(patrowl_api, title, description):
"""
Create an asset
"""
try:
return patrowl_api.add_asset(
title,
title,
'domain',
description,
'low',
'external',
tags=['All'])
except:
pass
return None | b2ad2f8b4e537386ea327ff36cd902a230197be1 | 48,143 |
def frontiers_from_bar_to_time(seq, bars):
"""
Converts the frontiers (or a sequence of integers) from bar indexes to absolute times of the bars.
The frontier is considered as the end of the bar.
Parameters
----------
seq : list of integers
The frontiers, in bar indexes.
bars : list of tuple of floats
The bars, as (start time, end time) tuples.
Returns
-------
to_return : list of float
The frontiers, converted in time (from bar indexes).
"""
to_return = []
for frontier in seq:
bar_frontier = bars[frontier][1]
if bar_frontier not in to_return:
to_return.append(bar_frontier)
return to_return | ea86d14725a6761ba90ba4b62b7f78d687e6a269 | 48,145 |
import sys
def get_to():
"""get the message destination, e.g. where the message was sent to, e.g. a channel or you privately"""
return sys.argv[2] | 7a8da99cd1e8dc9d22d3fe78cca755ea52040a0c | 48,146 |
def bin2string (arr):
""" Converts the binary number array 'arr' to string format """
bin_string = ''
for bits in arr:
bin_string += str(int(bits))
return bin_string | 1d0ae72bb93732562c51a1380bb519ccfdc1db31 | 48,148 |
def nodes_to_edges(nodes, edges_dict):
"""
:param nodes: list
list with id of nodes
:return: list
id of edges
"""
edges_list = []
for i in range(len(nodes)-1):
edge = edges_dict.get((int(nodes[i]), int(nodes[i+1])))
edges_list.append(edge)
return edges_list | f2f2e732075cdef383f42a3746a2c487336a9a18 | 48,149 |
def get_form_data(form):
"""
提取form中提交的数据
:param form: 使用selector封装后的具有xpath的selector
:return:
"""
data = {}
inputs = form.xpath(".//input")
for input in inputs:
name = input.xpath("./@name").extract_first()
value = input.xpath("./@value").extract_first()
if name:
data[name] = value
return data | 55afb9af16befd7808a82a012ff0fefbd82f5d5a | 48,152 |
import copy
def remove_root_duplicate(result):
"""
COW enabled while modifing result.
:param result: dict
e.g
original dict:
{'A': 1, 'B': {'A': 3, 'C': {'B': {'A': 4}}, 'D': 4}}
remote root duplicate dict:
{'A': 1, 'B': {'D': 4}}
"""
cow_result = {}
root_keys = result.keys()
key_path_queue = []
def current_layer_check(current_result):
duplicate_keys = [k for k in current_result if k in root_keys]
if duplicate_keys:
if not cow_result:
cow_result.update(copy.deepcopy(result))
key_path_index = 0
c_result = cow_result
while key_path_index < len(key_path_queue):
if not c_result:
break
current = c_result
c_result = c_result.get(
key_path_queue[key_path_index], None)
if key_path_index == len(key_path_queue) - 1:
if c_result:
for k in duplicate_keys:
del c_result[k]
if not c_result:
del current[key_path_queue[key_path_index]]
key_path_index += 1
def loop_dict(inner_result, first_layer=True):
if not first_layer:
current_layer_check(inner_result)
for k in inner_result:
if isinstance(inner_result[k], dict):
key_path_queue.append(k)
loop_dict(inner_result[k], False)
key_path_queue.pop()
loop_dict(result)
return cow_result if cow_result else result | 05da39ee04a732e1767c8e17966e9b06e65a9f1e | 48,153 |
def is_app_code(code: int) -> bool:
"""
Checks whether a code is part of the app range.
:param code: Code to check.
:return: Boolean indicating whether `code` is within the app range.
"""
return 0 < code < 0x10 | d0bd6485875500418b5ddd60ae5fb3c9b965b71f | 48,154 |
def generateAbsCompUFromRelaCompUTex(var, mean, relativeU, cu, unit):
"""
:param var:
:param mean:
:param relativeU:
:param cu: Decimal , should only have 1 effective digit
:param unit:
:return:
"""
string = '\[' +r'u(' + var + r')=\bar{' + var + r'}\cdot E_{' + var + '} = ' + mean.to_eng_string() + r'\times' + relativeU.to_eng_string() + '=' + cu.to_eng_string() + '\ (' + unit + ')' + '\]'
return string | d5af51fb6c8e4b7089808b2a3504350bc6c47953 | 48,155 |
import torch
def label_smoothing(y_pred, y_true, eta):
"""Return label smoothed target."""
n_classes = y_pred.size(1)
# convert to one-hot
y_true = torch.unsqueeze(y_true, 1)
soft_y_true = torch.zeros_like(y_pred)
soft_y_true.scatter_(1, y_true.to(dtype=torch.long), 1)
# label smoothing
soft_y_true = soft_y_true * (1 - eta) + eta / n_classes * 1
return soft_y_true | 169e1ecad73b1667eb3e48daf63a3046ea09f58c | 48,157 |
def get_module_params_subsection(module_params, tms_config, resource_key=None):
"""
Helper method to get a specific module_params subsection
"""
mp = {}
if tms_config == "TMS_GLOBAL":
relevant_keys = [
"certificate",
"compression",
"source_interface",
"vrf",
]
for key in relevant_keys:
mp[key] = module_params[key]
if tms_config == "TMS_DESTGROUP":
mp["destination_groups"] = []
for destgrp in module_params["destination_groups"]:
if destgrp["id"] == resource_key:
mp["destination_groups"].append(destgrp)
if tms_config == "TMS_SENSORGROUP":
mp["sensor_groups"] = []
for sensor in module_params["sensor_groups"]:
if sensor["id"] == resource_key:
mp["sensor_groups"].append(sensor)
if tms_config == "TMS_SUBSCRIPTION":
mp["subscriptions"] = []
for sensor in module_params["subscriptions"]:
if sensor["id"] == resource_key:
mp["subscriptions"].append(sensor)
return mp | 89de58e25138536dd447e7cd821073d16ede12fe | 48,158 |
from typing import Tuple
def get_nonhybrid_data(qdata, args) -> Tuple:
"""Loads the data from pre-trained autoencoder latent space when we have non
hybrid VQC testing.
"""
train_loader = None
if "batch_size" in args:
train_features = qdata.batchify(qdata.get_latent_space("train"),
args["batch_size"])
train_labels = qdata.batchify(qdata.ae_data.trtarget, args["batch_size"])
train_loader = [train_features, train_labels]
valid_features = qdata.get_latent_space("valid")
valid_labels = qdata.ae_data.vatarget
valid_loader = [valid_features, valid_labels]
test_features = qdata.get_latent_space("test")
test_labels = qdata.ae_data.tetarget
test_loader = [test_features, test_labels]
return train_loader, valid_loader, test_loader | 87524d999f55ecab62f8839a3f739ac5b8ad5325 | 48,159 |
def get_group_lists(self, sym_grouping):
"""Gets the index list for left and right groups."""
left_idx = [k for k in range(sym_grouping[0])]
right_list = [k + sym_grouping[0] for k in range(sym_grouping[1])]
return left_idx, right_list | b3adf2e12c7fa2cb4ac5ecdd47c8f9b18a77c49b | 48,160 |
def actions(board):
"""
Returns set of all possible actions (i, j) available on the board.
"""
available_moves = set()
for x, row in enumerate(board):
for y, column in enumerate(row):
if column is None:
available_moves.add((x, y))
return available_moves | b6d4057e3c5c369a8cadd4c10d7175e5003824bd | 48,163 |
def chromatic_induction_factors(n):
"""
Returns the chromatic induction factors :math:`N_{bb}` and :math:`N_{cb}`.
Parameters
----------
n : numeric
Function of the luminance factor of the background :math:`n`.
Returns
-------
tuple
Chromatic induction factors :math:`N_{bb}` and :math:`N_{cb}`.
Examples
--------
>>> chromatic_induction_factors(0.2) # doctest: +ELLIPSIS
(1.0003040..., 1.0003040...)
"""
N_bb = N_cb = 0.725 * (1 / n) ** 0.2
return N_bb, N_cb | d05e9243d3ee4a7255f59435bb02131638142f9f | 48,164 |
import torch
def distance2bbox(points, distance, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (n, 2), [x, y].
distance (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom).
max_shape (tuple): Shape of the image.
Returns:
Tensor: Decoded bboxes.
"""
x1 = points[:, 0] - distance[:, 0]
y1 = points[:, 1] - distance[:, 1]
x2 = points[:, 0] + distance[:, 2]
y2 = points[:, 1] + distance[:, 3]
if max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1])
y1 = y1.clamp(min=0, max=max_shape[0])
x2 = x2.clamp(min=0, max=max_shape[1])
y2 = y2.clamp(min=0, max=max_shape[0])
return torch.stack([x1, y1, x2, y2], -1) | 1a7021e32398625b87a847fc82f7316137dbe5cf | 48,165 |
def can_self_enroll_in_course(course_key):
"""
Returns True if the user can enroll themselves in a course.
Note: an example of a course that a user cannot enroll in directly
is a CCX course. For such courses, a user can only be enrolled by
a CCX coach.
"""
if hasattr(course_key, 'ccx'):
return False
return True | e5b9b66bb2885a17afbd947093cfbb4088095e7d | 48,166 |
import subprocess
def swig_works(swig: str, verbose: bool = True) -> bool:
"""Test if `swig` looks like a working SWIG executable."""
try:
# For python3.6 compatibility no `capture_output=True`
result = subprocess.run([swig, '-version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except (FileNotFoundError, PermissionError):
if verbose:
print(f'Testing SWIG executable {swig}... FAILED.')
return False
if verbose:
if result.returncode == 0:
print(f'Testing SWIG executable {swig}... SUCCEEDED.')
else:
print(f'Testing SWIG executable {swig}... FAILED.')
return result.returncode == 0 | 4590ba3c43cdf4b7e7734a551147f47e6b1b88ab | 48,167 |
import re
def textProtect(s):
"""process any strings we cant have illegal chracters in"""
return re.sub('[-./:?]', '_', s) | 31e7e848946a1fb87b5d0ed6f9c50583947344e9 | 48,168 |
def fair_rations(B):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/fair-rations/problem
You are the benevolent ruler of Rankhacker Castle, and today you're distributing bread. Your subjects are in a line,
and some of them already have some loaves. Times are hard and your castle's food stocks are dwindling, so you must
distribute as few loaves as possible according to the following rules:
1. Every time you give a loaf of bread to some person , you must also give a loaf of bread to the person immediately
in front of or behind them in the line (i.e., persons i+1 or i-1).
2. After all the bread is distributed, each person must have an even number of loaves.
Given the number of loaves already held by each citizen, find and print the minimum number of loaves you must
distribute to satisfy the two rules above. If this is not possible, print NO.
Args:
B (list): an array of integers that represent the number of loaves each persons starts with
Returns:
int, str: the number of loaves to ration, or "NO" if it's not possible to ration
"""
# If in the line, people are holding an odd number of loaves, it is not possible to distribute evenly
if sum(B) % 2:
return "NO"
# Else, we determine the number of loaves needed to distribute to have everyone with an even number of loaves
else:
ration = 0
for i in range(len(B)-1):
if B[i] % 2:
B[i] += 1
B[i+1] += 1
ration += 2
return ration | 7f77e7ebfd4f0edaf45edcd5beec3ae9197d8255 | 48,169 |
import numpy
def makeScheduleMatrix(input_schedules, team_people_set):
"""
Translates the input schedule format (with a list of names in each
scheduling slot, representing who is available in that slot) into an array
with the count of how many people are available for each slot.
"""
number_of_days = len(input_schedules)
number_of_hours = len(input_schedules[0])
sch = numpy.zeros([len(team_people_set), number_of_days * number_of_hours], dtype=bool)
for day_num, day in enumerate(input_schedules):
for hour_num, hour in enumerate(day):
for count, person in enumerate(team_people_set):
if person in hour:
sch[count, (day_num * number_of_hours) + hour_num] = True
print(sch.shape)
return sch | 2c5750afe4d683e2e2b0b870fae01b8b8016af5c | 48,171 |
def is_seq_dual_coding(seq, frame2, gencode):
"""The function checks if given seq is dual coding, i.e. does not have
inner stop codons in the main as well as in the alternative (frame2)
reading frame. Stop codons at sequence ends are allowed.
"""
if frame2 == +1:
seq2 = seq[2:] # skip the first 2 nt
elif frame2 == -1:
seq2 = seq[1:] # skip the first nt
else:
raise Exception("Unknown frame = '%d'" % frame2)
# Make the whole number of codons to avoid Biopythong warnings
seq1 = seq[0:(3*int(len(seq)/3))]
seq2 = seq2[0:(3*int(len(seq2)/3))]
prot1 = seq1.translate(gencode).strip('*')
prot2 = seq2.translate(gencode).strip('*')
if '*' in prot1 + prot2:
return False
else:
return True | 4ea6277ac8d2b6fbcbb68b269fe630991dd39d90 | 48,173 |
from typing import TextIO
def read_content(content: TextIO) -> str:
"""Read the metadata file."""
if content.isatty():
raise Exception('Need an input to metadata!')
return content.read() | 5cf9e89050dd19f74735c5a36ed13bead4e300a9 | 48,175 |
def depth_to_col_name(depth):
"""
Derives the proper name of the column for locations given a depth.
"""
if depth == 0:
return "location"
else:
return "sub_" * depth + "lctn" | e6ae7a4e3563bf762dfe0a82d514314a849da0a6 | 48,176 |
def apply_rules(user, subscription, subscription_rules, rule_logic):
"""
Apply logic to rules set for each subscription. In a way this authorizes who can
see the subscription. Rules can be applied in two ways: All rules must apply and
some rules must apply.
user: models.User()
subscription: models.MeetingSubscription()
subscription_rules: models.Rule()
rule_logic: all(), any()
"""
rules = set()
for rule in subscription_rules:
user_rule = user.meta_data[rule.name]
subscription_rule = rule.value
if type(user_rule) is list:
rules.add(subscription_rule in user_rule)
else:
rules.add(user_rule == subscription_rule)
if rule_logic(rules):
return subscription
return None | f4850944fe98a1887b05c7336ffb7d3d4f11fc5d | 48,178 |
def _determine_sentence_id(sentence, new_id, id_name, old_id):
"""
Determine the appropriate id for this sentence.
Ids here means doc id or par id.
Args:
sentence: The sentence whose ids to check.
new_id: The key that the id can come up as without the id key word.
id_name: The id in the comments to modify. One of 'newpar id', or
'newdoc id'.
old_id: The id of the previous sentence.
Returns:
The value of the id of the sentence.
"""
if sentence.meta_present(id_name):
return sentence.meta_value(id_name)
elif sentence.meta_present(new_id):
return None
else:
return old_id | 95a5f1987a9037df1fb7e73b2b47c8da2b265e1c | 48,179 |
def string_slice(strvar,slicevar):
""" slice a string with |string_slice:'[first]:[last]'
"""
first,last= slicevar.partition(':')[::2]
if first=='':
return strvar[:int(last)]
elif last=='':
return strvar[int(first):]
else:
return strvar[int(first):int(last)] | 1a52d6a7d71a6c1c116dcd9c1eef6d5d5962cd07 | 48,180 |
def sum_math(n=100_000_000):
"""
mathematical equations
:param n:
:return:
"""
return (n * (n - 1)) // 2 | 7b8486f5254ba312f2ed0a7652850940cfa523f5 | 48,181 |
def get_cumulative_rewards(rewards, gamma=0.99):
"""
rewards: rewards at each step
gamma: discount for reward
"""
discounted_rewards = []
for rew in range(len(rewards)):
Gt = 0
pw = 0
for r in rewards[rew:]:
Gt = Gt + gamma ** pw * r
pw = pw + 1
discounted_rewards.append(Gt)
return discounted_rewards | bd8716f6cfb1261048ebcd99dfeac023eef78e49 | 48,182 |
def is_array_type (name):
###############################################################################
"""
>>> is_array_type('array(T)')
True
>>> is_array_type('array')
False
>>> is_array_type('array(T)')
True
"""
return name[0:6]=="array(" and name[-1]==")" | 29e5d4597ab6a0248b18fccec9836248b09b46c5 | 48,184 |
def _entity_selector_AQW_ (AQW) :
"""Entity selector for `AQW._attr.E_Type`"""
AQ = AQW._attr.E_Type.AQ
result = AQ.ES
return result.__class__ (AQ, AQW = AQW) if AQ._polymorphic else result | d9376fee12dffa75a04ec30bb56ea833f29b2715 | 48,185 |
def binarize_predictions(relevant_list, predicted_list):
#print(predicted_list)
"""Returns 2 if the first entry is present in the predictions, 1 if one of the
other relevant items is present in the predictions, 0 otherwise."""
#bin_list = []
# if there are no recommendations, binarize to all 0s
if predicted_list is None:
return [0] * 500
# if less than 500 recommendations are returned (most likely in case of solr), append 0s.
if len(predicted_list)<500:
return predicted_list.extend([0]*(500-len(predicted_list)))
#for ground_truth, pred in zip(relevant_list, predicted_list):
return [2 if entry == relevant_list[0] else 1 if entry in relevant_list[1:] else 0
for entry in predicted_list] | 347f9f310313072cbe4b210ca394dbb543a92c6a | 48,186 |
def getnamedargs(*args, **kwargs):
"""allows you to pass a dict and named args
so you can pass ({'a':5, 'b':3}, c=8) and get
dict(a=5, b=3, c=8)"""
adict = {}
for arg in args:
if isinstance(arg, dict):
adict.update(arg)
adict.update(kwargs)
return adict | 1a82e221c63611540e1ddc516b516ce7668b4497 | 48,187 |
import unicodedata
def strip_accents(s):
"""Transliterate any unicode string into the closest possible representation in ascii text.
Parameters
----------
s : str
Input string
Returns
-------
str
The transliterated string.
"""
return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn') | b112ef0ad3d87b238f5415a5be4d43525a6a255f | 48,188 |
def validate_prod_timestamp(logger, energy_dt, request_dt):
"""Check that the energy production data was captured up to 2 hours after the requested datetime.
Compares two Arrow objects in local HST time."""
diff = energy_dt - request_dt
if diff.total_seconds() > 7200:
msg = (
"Hawaii data is too old to use, " "parsed data timestamp was {0}."
).format(energy_dt)
logger.warning(msg, extra={"key": "US-HI-OA"})
return False
return True | effc9f283fb0b0acf2599c6f56f7935678e902d8 | 48,190 |
from typing import Any
def is_number(val: Any) -> bool:
"""
Check if a value is a number.
"""
return isinstance(val, (int, float, complex)) | 37a62e0a846a22ee58c9bdc96cf63bfeb34bc539 | 48,191 |
def increment_ipv4_segments(segments):
"""
Increment an ip address given in quad segments based on ipv4 rules
:param segments: IPv4 segments to increment.
:type segments: ``list`` or ``tuple``
:return: Incremented segments.
:rtype: ``list``
"""
segments = [int(segment) for segment in segments]
segments[3] += 1
if segments[3] == 256:
segments[3] = 0
segments[2] += 1
if segments[2] == 256:
segments[2] = 0
segments[1] += 1
if segments[1] == 256:
segments[1] = 0
segments[0] += 1
return segments | fd2f9ea2c74fa3546815b53e20407b63cee6b1a4 | 48,192 |
def _calculate_num_runs_failures(list_of_results):
"""Caculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures | f67a6b1fa6a5aefc19bfe7e99ed77473801e6b83 | 48,193 |
def user_rating_ratio(uid, userid_grouped, flag):
""" 用户打分的比例 """
if flag == 0:
return -1, -1, -1, -1, -1
df = userid_grouped[uid]
if df.shape[0] == 0:
return -1, -1, -1, -1, -1
else:
ratings = df['rating'].tolist()
count_1 = float(ratings.count(1))
count_2 = float(ratings.count(2))
count_3 = float(ratings.count(3))
count_4 = float(ratings.count(4))
count_5 = float(ratings.count(5))
return count_1 / df.shape[0], count_2 / df.shape[0], count_3 / df.shape[0], count_4 / df.shape[0], count_5 / df.shape[0] | 1cee07ee6afcc90839fd20eeece5c003bb2ddc86 | 48,194 |
def test_request_response(connection, receiver):
"""
Test request/response messaging pattern.
"""
def endpoint_callback(message):
return message.payload + "-pong"
connection.register_async_endpoint(endpoint_callback, "test.request")
connection.call_async(receiver.create_cbf(), "test.request", "ping")
assert ["ping-pong"] == receiver.wait_for_messages() | 200dd1c9dcf752130b7748323ea9d269a9d53eab | 48,196 |
from datetime import datetime
import logging
def get_now_time(format="%Y-%m-%d %H:%M:%S", show=False):
"""
Gets current time.
"""
now = datetime.now()
now = now.strftime(format)
if show == True:
logging.info("Current time: {}".format(now))
return now | 1cb1cc85c102601890319a7fc057484b3c79360a | 48,197 |
def warning_formatter(msg, category, filename, lineno, line=None):
"""Format warning to only print filename, linenumber and message.
Parameters
----------
msg
warning message
category
warning category
filename
filename of file where warning was raised
lineno
linenumber where warning was raised
line
line containing warning
Returns
-------
str
formatted warning message
"""
return f"{filename}:L{lineno}: {msg}\n" | 8b0c35077ca5c1872eefa00616e36ea67d40c93d | 48,198 |
def get_newick(node, parent_dist, leaf_names, newick='') -> str:
"""
Convert sciply.cluster.hierarchy.to_tree()-output to Newick format.
:param node: output of sciply.cluster.hierarchy.to_tree()
:param parent_dist: output of sciply.cluster.hierarchy.to_tree().dist
:param leaf_names: list of leaf names
:param newick: leave empty, this variable is used in recursion.
:returns: tree in Newick format
"""
if node.is_leaf():
return "%s:%.2f%s" % (leaf_names[node.id], parent_dist - node.dist, newick)
else:
if len(newick) > 0:
newick = "):%.2f%s" % (parent_dist - node.dist, newick)
else:
newick = ");"
newick = get_newick(node.get_left(), node.dist,
leaf_names, newick=newick)
newick = get_newick(node.get_right(), node.dist,
leaf_names, newick=",%s" % (newick))
newick = "(%s" % (newick)
return newick | 222a2a3861687f3fbe4069bd560ff73e059e4c5c | 48,199 |
def parse_etraveler_response(rsp, validate):
""" Convert the response from an eTraveler clientAPI query to a
key,value pair
Parameters
----------
rsp : return type from
eTraveler.clientAPI.connection.Connection.getHardwareHierarchy
which is an array of dicts information about the 'children' of a
particular hardware element.
validate : dict
A validation dictionary, which contains the expected values
for some parts of the rsp. This is here for sanity checking,
for example requiring that the parent element matches the
input element to the request.
Returns
----------
slot_name,child_esn:
slot_name : str
A string given to the particular 'slot' for each child
child_esn : str
The sensor id of the child, e.g., E2V-CCD250-104
"""
for key, val in validate.items():
try:
rsp_val = rsp[key]
if isinstance(val, list):
if rsp_val not in val:
errmsg = "eTraveler response does not match expectation for key %s: " % (key)
errmsg += "%s not in %s" % (rsp_val, val)
raise ValueError(errmsg)
else:
if rsp_val != val:
errmsg = "eTraveler response does not match expectation for key %s: " % (key)
errmsg += "%s != %s" % (rsp_val, val)
raise ValueError(errmsg)
except KeyError:
raise KeyError("eTraveler response does not include expected key %s" % (key))
child_esn = rsp['child_experimentSN']
slot_name = rsp['slotName']
return slot_name, child_esn | b8f8d0cb395889dd2266e926abbf323c6ea7ae84 | 48,201 |
import logging
def get_all_loggers():
"""Return ``dict`` of all loggers than have been accessed.
.. versionadded:: 0.3.0
"""
return logging.Logger.manager.loggerDict | 2c0fed7ed8f6e71eda838792a8803538f0058942 | 48,203 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.