content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def get_captions_from_dict(img_to_cap_dict):
"""
Extracts captions from the img_to_cap_dict dictionary
and adds them to a list.
Args:
img_to_cap_dict: the dictionary storing image to caption mappings
Returns:
captions: a list storing all captions extracted from the dictionary
"""
captions = []
for image_name, caption_list in img_to_cap_dict.items():
for cap in caption_list:
captions.append(cap)
return captions
|
2378e8233f6ff5e9d8a96d8773d5fab779eace33
| 61,821
|
def int32_to_octets(value):
""" Given an int or long, return a 4-byte array of 8-bit ints."""
return [int(value >> 24 & 0xFF), int(value >> 16 & 0xFF),
int(value >> 8 & 0xFF), int(value & 0xFF)]
|
06ea4d1c47b4a99ef9a8ff530b4e5b2d00abd54d
| 61,822
|
def generate_base_code(naval_base: bool,
scout_base: bool,
pirate_base: bool) -> str:
"""Returns a base code, one of:
A -- Naval Base and Scout Base/Outpost
G -- Scout Base/Outpost and Pirate Base
N -- Naval Base
P -- Pirate Base
S -- Scout Base/Outpost
"""
if naval_base and scout_base:
return "A"
elif scout_base and pirate_base:
return "G"
elif naval_base:
return "N"
elif pirate_base:
return "P"
elif scout_base:
return "S"
else:
return " "
|
48ea0037fa2dc3e1b1efa9a5ae9b9678ece78551
| 61,825
|
def compDict2List(compDict):
"""
utility function to create a list of component keys and a list of
compoenent values from a component dictionary
"""
return (list(compDict.keys()),list(compDict.values()))
|
d10baa3bc10b755c024b58067fcbdaff1d7d2775
| 61,828
|
def concatenate(value, arg):
"""Concatenate value and arg"""
return f"{value}{arg}"
|
3245fb025a39e0a3ad64d0434b1ec5324ff14f5b
| 61,830
|
def extract_pob(loaded_df):
"""
Extracts the place of birth of each row. The coded values are the following:
-1 - Invalid value
1 - Born inside Canada
2 - Born outside Canada
:param loaded_df: The dataframe loaded from the file
:return: A list containing the new values of the place of birth
"""
pobs = []
columns = loaded_df.columns
col_name = ""
print('Extracting Place of Birth')
if "POBP" in columns:
col_name = "POBP"
elif "POB" in columns:
col_name = "POB"
else:
print("No Place of Birth Column(s) Detected")
num_uniq = loaded_df[col_name].nunique()
for pob in loaded_df[col_name].tolist():
if col_name == "POBP":
if num_uniq == 41:
if pob <= 10 or (pob >= 33 and pob <= 35): # Inside Canada
pobs.append(1)
elif pob <= 40: # Outside Canada
pobs.append(2)
else: # invalid value
pobs.append(-1)
elif num_uniq == 13:
if pob < 6: # Inside Canada
pobs.append(1)
elif pob <= 12: # Outside Canada
pobs.append(2)
else: # invalid value
pobs.append(-1)
else:
if num_uniq == 28:
if pob == 1: # Inside Canada
pobs.append(1)
elif pob <= 27: # Outside Canada
pobs.append(2)
else: # invalid value
pobs.append(-1)
elif num_uniq == 33:
if pob == 1: # Inside Canada
pobs.append(1)
elif pob <= 32: # Outside Canada
pobs.append(2)
else: # invalid value
pobs.append(-1)
print('Place of Birth extracted')
return pobs
|
a5a40b5cef4ecc288429f71723fc0277c695c537
| 61,831
|
import calendar
def datetime_to_unix(timestamp, milliseconds=False):
"""Converts a datetime object to a unix timestamp.
Args:
timestamp: A datetime object.
milliseconds: Bool, default False, return result in milliseconds instead.
Returns:
An integer of unit timestamp in seconds.
"""
if milliseconds:
return int(calendar.timegm(timestamp.timetuple()) * 1000)
else:
return int(calendar.timegm(timestamp.timetuple()))
|
3e8cac46c5d458202eacc82ef150e51b48cc55d7
| 61,835
|
def edge_threshold(grad_mag, thresh):
"""
Takes the array of gradient magnitude values and suppresses pixels below the threshold value.
grad_mag: Gradient magnitude for an image which is an array of the same shape as the original image.
thresh: Threshold value for which pixels to include in edge detection.
return: Array of of gradient magnitude values where pixel values below threshold are suppressed.
"""
grad_mag_thresh = grad_mag.copy()
grad_mag_thresh[grad_mag_thresh < thresh] = 0
return grad_mag_thresh
|
9182c7d16fbb0f0e22c89ed64de77ebe4f8e899e
| 61,840
|
def get_object_map(id_offset: int) -> dict:
"""
Returns ID to communication object mapping of a given CAN node ID.
Map taken from https://en.wikipedia.org/wiki/CANopen#Predefined_Connection_Set[7]
"""
return {
0x000 : "NMT_CONTROL",
0x001 : "FAILSAFE",
0x080 : "SYNC",
0x080 + id_offset : "EMERGENCY",
0x100 : "TIMESTAMP",
0x180 + id_offset : "TPDO1",
0x200 + id_offset : "RPDO1",
0x280 + id_offset : "TPDO2",
0x300 + id_offset : "RPDO2",
0x380 + id_offset : "TPDO3",
0x400 + id_offset : "RPDO3",
0x480 + id_offset : "TPDO4",
0x500 + id_offset : "RPDO4",
0x580 + id_offset : "TSDO",
0x600 + id_offset : "RSDO",
0x700 + id_offset : "NMT_MONITORING",
0x7E4 : "TLSS",
0x7E5 : "RLSS"
}
|
7f7ecdb7b6620eb5cde3682865fccd3c927c711f
| 61,841
|
def is_number(str):
"""
Checks wether a string is a number or not.
:param str: String.
:type str: string
:returns: True if `str` can be converted to a float.
:rtype: bool
:Example:
>>> is_number('10')
True
"""
try:
float(str)
return True
except ValueError:
return False
|
f8391667115f09f90fcdd6593883f10e5c6d2597
| 61,842
|
def construct_error_message(files_dict):
"""Function to construct an error message pointing out where bad latin
phrases appear in lines of text
Arguments:
files_dict {dictionary} -- Dictionary of failing files containing the
bad latin phrases and offending lines
Returns:
{string} -- The error message to be raised
"""
error_message = ["Bad latin found in the following files:\n"]
for file in files_dict.keys():
error_message.append(
f"{file}:\t{files_dict[file]['latin_type']}\tfound in line\t[{files_dict[file]['line']}]\n"
)
return "\n".join(error_message)
|
b2d1f1f0cc677f1797a2706a1c2ed249253f8fda
| 61,843
|
def _nofilter(text):
"""Return the supplied text unchanged"""
return text
|
3c843e9ea6e00d3a3eaa4dd759732aebefde38b0
| 61,845
|
from pathlib import Path
import json
def template_json(keyboard):
"""Returns a `keymap.json` template for a keyboard.
If a template exists in `keyboards/<keyboard>/templates/keymap.json` that text will be used instead of an empty dictionary.
Args:
keyboard
The keyboard to return a template for.
"""
template_file = Path('keyboards/%s/templates/keymap.json' % keyboard)
template = {'keyboard': keyboard}
if template_file.exists():
template.update(json.load(template_file.open(encoding='utf-8')))
return template
|
7949c78a164abfb04ef1b46caaf0897acac88bfc
| 61,846
|
import hashlib
import uuid
def generate_uuid_from_string(the_string):
"""
Returns String representation of the UUID of a hex md5 hash of the given string
"""
# Instansiate new md5_hash
md5_hash = hashlib.md5()
# Pass the_string to the md5_hash as bytes
md5_hash.update(the_string.encode("utf-8"))
# Generate the hex md5 hash of all the read bytes
the_md5_hex_str = md5_hash.hexdigest()
# Return a String repersenation of the uuid of the md5 hash
return str(uuid.UUID(the_md5_hex_str))
|
29d29adfacafb27191b6af01da5a9073eb34d4ed
| 61,852
|
def doubleVal(input):
"""Return twice the input vallue
"""
return input * 2
|
954ac7fc806b35e7f9ffb1c8244e8b4ed4f15fd6
| 61,853
|
def partition(pred, iterable):
"""Returns [[trues], [falses]], where [trues] is the items in
'iterable' that satisfy 'pred' and [falses] is all the rest."""
trues = []
falses = []
for item in iterable:
if pred(item):
trues.append(item)
else:
falses.append(item)
return trues, falses
|
be96fc0a560a0c2e5bd25e12387622167b4f084b
| 61,854
|
def update_template(template, pardict):
"""Makes variable substitution in a template.
Args:
template (str): Template with old style Python format strings.
pardict (dict): Dictionary of parameters to substitute.
Returns:
str: String with substituted content.
"""
return template % pardict
|
e8b5c220e7e66b3c0540938d229f2e75104e02bc
| 61,857
|
def calculate_shape_keeping_aspect_ratio(height: int, width: int, min_size: int, max_size: int):
"""
The function changes spatial sizes of the image keeping aspect ratio to satisfy provided requirements.
The behavior of this function is equivalent to the output shape calculation of the pre-processor block of TensorFlow
Object Detection API models with keep aspect ratio resizer.
:param height: input height.
:param width: input width.
:param min_size: size limit.
:param max_size: size limit.
:return: the tuple with scaled image height, width.
"""
ratio_min = min_size / min(height, width)
ratio_max = max_size / max(height, width)
ratio = min(ratio_min, ratio_max)
return int(round(height * ratio)), int(round(width * ratio))
|
f61ae6b9c13250093c6d4685e468f7a5634605c6
| 61,860
|
def get_cn_description(cn):
"""
Writes a verbal description of the coordination number/polyhedra based on a given coordination number
:param cn: (integer) rounded coordination number
:return: (String) verbal description of coordination number/polyhedra
"""
rounded_cn = round(cn)
description = str(int(rounded_cn)) + "-fold coordinated"
return description
|
832322bb6b3ed26af7fa51bf7036123e93d7efa3
| 61,862
|
def get_attribute(obj: dict, path: list):
"""
Get attribute iteratively from a json object.
:param obj: object to iterate on
:param path: list of string to get sub path within json
:return: the value if the path is accessible, empty string if not found
"""
current_location = obj
for token in path:
if isinstance(current_location, dict) and token in current_location:
current_location = current_location[token]
elif isinstance(current_location, list) and token < len(current_location):
current_location = current_location[token]
else:
return ""
return current_location
|
a21c3d7d58eb11673d46a09ce6e830345be62e96
| 61,867
|
import yaml
def parse_yaml_to_dict(contents):
"""
Parses YAML to a dict.
Parameters
----------
contents : str
The contents of a file with user-defined
configuration settings.
Returns
-------
dict
Configuration settings (one key-value
pair per setting) or an empty dict.
"""
if contents:
return yaml.safe_load(contents)
else:
return dict()
|
de97a20c5343ab909351261a3dfafad8590b2f57
| 61,870
|
def get_makefile_name(libname):
"""
Given a library name, return the corresponding Makefile name.
"""
return "Makefile.%s" % libname
|
43d9f97f9bed9a7052fbaba10d97afda8ca1da7b
| 61,872
|
import re
def validate_string(password_string):
"""
Performs a number of checks to see
if a string can be a valid password
"""
if len(password_string) < 8:
message = "Password must contain atleat 8 characters."
return (message, False)
elif re.search('[0-9]', password_string) is None:
message = "Password must contain atleast one number"
return (message, False)
else:
message = None
return (message, True)
|
1ddd108daba38de9b8c2b6360bca7e04cb7de75f
| 61,875
|
import re
def regex_filter(pattern, list_of_strings):
"""Apply regex pattern to each string and return those that match.
See also regex_capture
"""
return [s for s in list_of_strings if re.match(pattern, s) is not None]
|
68437ed5dd9ed4690a22c9da7aa225a596e6b755
| 61,876
|
import ctypes
def c_encode_char(string):
""" ctypes char encoding
Args:
string (str): string to encode as c_char_p
"""
return ctypes.c_char_p(string.encode())
|
ff933e7e4e1ec60a28d67cab42542ab198efce26
| 61,877
|
def get_legal_topic_name(topic: str) -> str:
"""Returns a legal Kafka topic name
Special characters are not allowed in the name
of a Kafka topic. This method returns a legal name
after removing special characters and converting each
letter to lowercase
Parameters
----------
topic: str
topic name, essentially an alert parameter which is to be used
to create a topic
Returns
----------
legal_topic: str
A topic name that can be used as a Kafka topic
Examples
----------
>>> bad_name = 'IaMEvi\\l'
>>> good_name = get_legal_topic_name(bad_name)
>>> print(good_name)
iamevil
"""
legal_topic = ''.join(a.lower() for a in topic if a.isalpha())
return legal_topic
|
ca313d9ef0f55f15e440d02d7f2f278bf1e9a2ee
| 61,882
|
def startswith_token(s, prefix, separators=None):
"""Tests if a string is either equal to a given prefix or prefixed by it
followed by a separator.
"""
if separators is None:
return s == prefix
prefix_len = len(prefix)
if s.startswith(prefix):
if len(s) == prefix_len:
return True
if isinstance(separators, str):
sep = separators
return s.find(sep, prefix_len) >= 0
for sep in separators:
if s.find(sep, prefix_len) >= 0:
return True
return False
|
07e8bc52ec24bac862281c2efaa4344ea499396a
| 61,885
|
def find(question, cur):
"""
Finds a corresponding answer for the input question.
Args:
question: input question row
cur: database cursor
Returns:
Answer row if found, None otherwise
"""
# Query for accepted answer
cur.execute("SELECT Body, OwnerUserId, OwnerDisplayName from answers where Id = ?", [question[1]])
answer = cur.fetchone()
if answer and answer[0]:
# Check if answer has a message body
return answer
return None
|
bdb6c21165a914d6084c7ee2a0f2db0a7b6f7873
| 61,886
|
def get_sheet(book, sheetName=None, sheetIndex=0):
"""Get xlrd sheet object"""
if sheetName is not None:
sheet = book.sheet_by_name(sheetName)
return sheet
else:
sheet = book.sheet_by_index(sheetIndex)
return sheet
|
1db384417769299540e892321c029f71f9f4f10b
| 61,887
|
def _boto_tags_to_dict(tags):
"""Convert the Tags in boto format into a usable dict
[{'Key': 'foo', 'Value': 'bar'}, {'Key': 'ham', 'Value': 'spam'}]
is translated to
{'foo': 'bar', 'ham': 'spam'}
"""
return {item['Key']: item['Value'] for item in tags}
|
d4935269864a0cb09ef8d84cbbc44402383e8edb
| 61,889
|
def location(text, index):
"""
Location of `index` in the `text`. Report row and column numbers when
appropriate.
"""
if isinstance(text, str):
line, start = text.count('\n', 0, index), text.rfind('\n', 0, index)
column = index - (start + 1)
return "{}:{}".format(line + 1, column + 1)
else:
return str(index + 1)
|
5f8026d3d30267833c6014a6d338b9d7fb2e5294
| 61,890
|
def bound_elems(elems):
"""
Finds the minimal bbox that contains all given elems
"""
group_x0 = min(map(lambda l: l.x0, elems))
group_y0 = min(map(lambda l: l.y0, elems))
group_x1 = max(map(lambda l: l.x1, elems))
group_y1 = max(map(lambda l: l.y1, elems))
return (group_x0, group_y0, group_x1, group_y1)
|
300169c886801845cc17de666ceaacff2c73baa2
| 61,894
|
def split_feature_matrices(x_train, x_test, y_train, y_test, idx):
"""Does the opposite of merge_feature_matrices i.e. when given the
train and test matrices for features and labels, splits them into
train/active, test/active, train/inactive, test/inactive.
Parameters:
- x_train, x_test, y_train, y_test (2D np.arrays): Feature
matrices and label matrices in the sklearn 'X' and 'Y' style.
- idx (int): a column of the label matrix corresponding to the
protein target you wish to test. """
x_actives_train = x_train[y_train[:,idx]==1]
x_actives_test = x_test[y_test[:,idx]==1]
x_inactives_train = x_train[y_train[:,idx]!=1]
x_inactives_test = x_test[y_test[:,idx]!=1]
return x_actives_train, x_actives_test, x_inactives_train, x_inactives_test
|
41ef3ccbee3cad1fbce2485d02f5d67c9870cfef
| 61,898
|
def human_delta(tdelta):
"""
Takes a timedelta object and formats it for humans.
Usage:
# 149 day(s) 8 hr(s) 36 min 19 sec
print human_delta(datetime(2014, 3, 30) - datetime.now())
Example Results:
23 sec
12 min 45 sec
1 hr(s) 11 min 2 sec
3 day(s) 13 hr(s) 56 min 34 sec
:param tdelta: The timedelta object.
:return: The human formatted timedelta
"""
d = dict(days=tdelta.days)
d['hrs'], rem = divmod(tdelta.seconds, 3600)
d['min'], d['sec'] = divmod(rem, 60)
if d['min'] is 0:
fmt = '{sec} sec'
elif d['hrs'] is 0:
fmt = '{min} min {sec} sec'
elif d['days'] is 0:
fmt = '{hrs} hr(s) {min} min {sec} sec'
else:
fmt = '{days} day(s) {hrs} hr(s) {min} min {sec} sec'
return fmt.format(**d)
|
c15cdfcc8cc8594e10b08d6cc5180d08d8460053
| 61,900
|
def get_waters(lines):
"""Helper function to extract waters from a PDB file"""
return "".join([line for line in lines if line[17:20] == "HOH"])
|
468fa05f0a1213669eb1a1ac2d29b0191ba96887
| 61,910
|
def ramachandran_type(residue, next_residue) :
"""Expects Bio.PDB residues, returns ramachandran 'type'
If this is the last residue in a polypeptide, use None
for next_residue.
Return value is a string: "General", "Glycine", "Proline"
or "Pre-Pro".
"""
if residue.resname.upper()=="GLY" :
return "Glycine"
elif residue.resname.upper()=="PRO" :
return "Proline"
elif next_residue is not None \
and next_residue.resname.upper()=="PRO" :
#exlcudes those that are Pro or Gly
return "Pre-Pro"
else :
return "General"
|
42e06d73a0d00f9f5a623677bc59b7747e150501
| 61,915
|
def clip_curvatures(vals, radius=0.03):
""" Clips principal curvatues to be in a defined range.
A principal of curvature of k corresponds to the curvature
(in one direction) of a sphere or radius r = 1/k. Since
Our mesh has somewhat of a low resolution, so we don't
want to consider curvatures much higher than some threshhold.
vals:
the principal curvatures to clip
radius:
the radius of the smallest sphere to consider, in meters
"""
print("!!!!!!!!! {:.6f} - {:.6f}".format(vals.max(), vals.min()))
max_val = 1.0 / radius
vals[vals > max_val] = max_val
vals[vals < -max_val] = -max_val
return vals
|
72bc964ce0dd7691304a56ec356a29571b60407f
| 61,916
|
def name_to_label(self, class_name):
""" Retrieves the class id given a class name """
return self.name_to_class_info[class_name]['id']
|
5dc630932a5938894a3db6912d0a5469c3e9cbdf
| 61,919
|
import torch
def batch_identity(batch_size, matrix_size, *args, **kwargs):
"""
Tile the identity matrix along axis 0, `batch_size` times.
"""
ident = torch.eye(matrix_size, *args, **kwargs).unsqueeze(0)
res = ident.repeat(batch_size, 1, 1)
return res
|
412f59ff9e5e8c802c0cfc6c6614fce6f14138ad
| 61,927
|
def dec2bin(num, width=0):
"""
>>> dec2bin(0, 8)
'00000000'
>>> dec2bin(57, 8)
'00111001'
>>> dec2bin(3, 10)
'0000000011'
>>> dec2bin(-23, 8)
'11101001'
>>> dec2bin(23, 8)
'00010111'
>>> dec2bin(256)
'100000000'
"""
if num < 0:
if not width:
raise ValueError('Width must be specified for negative numbers')
num += 2 ** width
binary = ''
while num:
if num & 1:
binary += '1'
else:
binary += '0'
num >>= 1
if width:
no_zeros = width - len(binary)
if no_zeros < 0:
raise OverflowError('A binary of width %d cannot fit %d' %
(width, num))
binary += '0' * no_zeros
if not binary:
binary = '0'
return binary[::-1]
|
a63cfca8506d23ee69eeb112d489cf9af0542f79
| 61,928
|
def taylor_sin(x:float, order:int):
"""
使用泰勒公式近似sinx
:param x: x
:param order:阶数,越高越准但越慢
:return: 结果
"""
e = x
s = x
for i in range(2, order):
e = -1*e*x*x/((2*i-1)*(2*i-2))
s += e
return s
|
2c601bc52db0e6943a8b5e61d5f28d20f6aa545e
| 61,931
|
def count_datasets(models):
""" Return the number of datasets on the database """
dataset_model = models["dataset"]
return dataset_model.select().count()
|
38916c2116ccb0c9bc96f178e4b78e943eb19328
| 61,937
|
import string
def buildCoder(shift):
"""
Returns a dict that can apply a Caesar cipher to a letter.
The cipher is defined by the shift value. Ignores non-letter characters
like punctuation, numbers and spaces.
shift: 0 <= int < 26
returns: dict
"""
# initialize empty dictionary
dict = {}
# create lists for keys based on lower case and upper case alphabet
lower_keys =list(string.ascii_lowercase)
upper_keys = list(string.ascii_uppercase)
# initialize empty list for values
lower_vals = []
upper_vals = []
# create value lists based on shifting the lower and upper keys
for key in lower_keys:
index = shift % 26
shift += 1
lower_vals.append(lower_keys[index])
for key in upper_keys:
index = shift % 26
shift += 1
upper_vals.append(upper_keys[index])
# create total key and value lists
all_keys = upper_keys + lower_keys
all_vals = upper_vals + lower_vals
index = 0
# for each key in all_keys, add the value from all_vals
for k in all_keys:
dict[k] = all_vals[index]
index += 1
return dict
|
db60e548f8561237f80de70aefe28fa9991f1782
| 61,939
|
def problem_4_6(node1, node2):
""" Design an algorithm and write code to find the first common ancestor of
two nodes in a binary tree. Avoid storing additional nodes in a data
structure. NOTE: This is not necessarily a binary search tree.
Solution: traverse from n1 up to the root; for each ancestor, start traversing
from n2 up to the root until you you hit n1's parent of the root.
Alternative solution (not the implementation below): at the very least the
root will be the common ancestor of the two nodes. So, build two linked lists
with all the nodes on the path from each node to the root. Then traverse the
two lists until the last element which is common. This uses extra space!
"""
n1 = node1
while n1 != None:
n2 = node2
while n2 != None:
if n2 == n1:
return n1
n2 = n2.parent
n1 = n1.parent
return None
|
155376d952fde5b0292e3404c24f8ecfc03e8a6d
| 61,940
|
def is_unique(s):
"""Check if the list s has no duplicate."""
return len(s) == len(set(s))
|
75e831313b855cf9c013ca33360a5611606102b1
| 61,941
|
def numWindows(tot, deltaT):
""" Evaluates the number of windows that will be used
given the total time (tot) of a particular induction.
"""
return int( (tot - deltaT)*60. )
|
deb0e9a55ddbf8a0a65148c503b2af56a942aacd
| 61,942
|
import torch
def dcg(
outputs: torch.Tensor,
targets: torch.Tensor,
k=10,
gain_function="pow_rank",
) -> torch.Tensor:
"""
Computes DCG@topk for the specified values of `k`.
Graded relevance as a measure of usefulness,
or gain, from examining a set of items.
Gain may be reduced at lower ranks.
Reference:
https://en.wikipedia.org/wiki/Discounted_cumulative_gain
Args:
outputs (torch.Tensor): model outputs, logits
with shape [batch_size; slate_length]
targets (torch.Tensor): ground truth, labels
with shape [batch_size; slate_length]
gain_function:
String indicates the gain function for the ground truth labels.
Two options available:
- `pow_rank`: torch.pow(2, x) - 1
- `rank`: x
On the default, `pow_rank` is used
to emphasize on retrievng the relevant documents.
k (int):
Parameter fro evaluation on top-k items
Returns:
torch.Tensor for dcg at k
Raises:
ValueError: gain function can be either `pow_rank` or `rank`
"""
k = min(outputs.size(1), k)
order = torch.argsort(outputs, descending=True, dim=-1)
true_sorted_by_preds = torch.gather(targets, dim=-1, index=order)
if gain_function == "pow_rank":
gain_function = lambda x: torch.pow(2, x) - 1
gains = gain_function(true_sorted_by_preds)
discounts = torch.tensor(1) / torch.log2(
torch.arange(true_sorted_by_preds.shape[1], dtype=torch.float)
+ 2.0
)
discounted_gains = (gains * discounts)[:, :k]
elif gain_function == "rank":
discounts = torch.tensor(1) / torch.log2(
torch.arange(true_sorted_by_preds.shape[1], dtype=torch.float)
+ 1.0
)
discounts[0] = 1
discounted_gains = (true_sorted_by_preds * discounts)[:, :k]
else:
raise ValueError("gain function can be either pow_rank or rank")
sum_dcg = torch.sum(discounted_gains, dim=1)
return sum_dcg
|
3c654af98e42292c86203d09b0b3e9f3d1808107
| 61,946
|
def validates(*names, **kw):
"""Decorate a method as a 'validator' for one or more named properties.
Designates a method as a validator, a method which receives the
name of the attribute as well as a value to be assigned, or in the
case of a collection, the value to be added to the collection.
The function can then raise validation exceptions to halt the
process from continuing (where Python's built-in ``ValueError``
and ``AssertionError`` exceptions are reasonable choices), or can
modify or replace the value before proceeding. The function should
otherwise return the given value.
Note that a validator for a collection **cannot** issue a load of that
collection within the validation routine - this usage raises
an assertion to avoid recursion overflows. This is a reentrant
condition which is not supported.
:param \*names: list of attribute names to be validated.
:param include_removes: if True, "remove" events will be
sent as well - the validation function must accept an additional
argument "is_remove" which will be a boolean.
.. versionadded:: 0.7.7
:param include_backrefs: defaults to ``True``; if ``False``, the
validation function will not emit if the originator is an attribute
event related via a backref. This can be used for bi-directional
:func:`.validates` usage where only one validator should emit per
attribute operation.
.. versionadded:: 0.9.0
.. seealso::
:ref:`simple_validators` - usage examples for :func:`.validates`
"""
include_removes = kw.pop('include_removes', False)
include_backrefs = kw.pop('include_backrefs', True)
def wrap(fn):
fn.__sa_validators__ = names
fn.__sa_validation_opts__ = {
"include_removes": include_removes,
"include_backrefs": include_backrefs
}
return fn
return wrap
|
214394568bf66ccb9a8267535eac51c4e03731e8
| 61,949
|
from typing import Union
from typing import List
def generic_import(name: Union[str, List[str]]):
"""
Import using string or list of module names
"""
if isinstance(name, str):
components = name.split('.')
else:
assert isinstance(name, list), name
components = name
mod = None
for i, comp in enumerate(components):
try:
# if imported sucessfully __import__ returns a top module
mod = __import__('.'.join(components[:i + 1]))
except ModuleNotFoundError:
for comp in components[1:]:
try:
mod = getattr(mod, comp)
except AttributeError:
raise
return mod
|
2c9ef55ab66a0b52ff78c8130057a9dec255ab7c
| 61,950
|
import random
def get_texts_sampled(texts, number):
"""Returns a sample of size `number` from `texts`"""
number_of_test_texts = min(number, len(texts))
return random.sample(texts, number_of_test_texts)
|
d8893fc8835a94a0902df2f5dc04da84786b7d11
| 61,951
|
def create_pids2idxs(dataset):
"""Creates a mapping between pids and indexes of images for that pid.
Returns:
2D List with pids => idx
"""
pid2idxs = {}
for idx, data in enumerate(dataset.data):
pid = data['pid']
if pid not in pid2idxs:
pid2idxs[pid] = [idx]
else:
pid2idxs[pid].append(idx)
return pid2idxs
|
c6ccd42a9a439ad9bed9d200991c4bc12f6a0ae0
| 61,952
|
def kmlCoords(coords):
"""Convert from a sequence of floats to a comma delimited string"""
return ','.join([str(x) for x in coords])
|
d957539212d55c60989c80d40f710aff6d1ffc7e
| 61,954
|
def get_spaceweather_imageurl(iu_address, iu_date, iu_filename, iu_extension, \
verbose):
"""Returns a complete image url string tailored to the spaceweather site by
concatenating the input image url (iu) strings that define the
address, the date folder, the filename root, and the filename extension.
If verbose is truthy, then print the returned image url string
"""
sw_imageurl = iu_address + iu_date + "/" + iu_filename + iu_extension
if verbose:
print("Input image file URL: \n{}".format(sw_imageurl))
return sw_imageurl
|
8ba57b53f1af9cb516648fd6df13e2f44b0dd1bd
| 61,956
|
def parseDate2solar(date):
"""parse date to solar date (datetime)
:return: year, month, day
"""
if type(date).__name__ == "LunarDate":
date = date.to_datetime()
year = date.year
month = date.month
day = date.day
return year, month, day
|
aa21db5c136322752fac6ead8cb948e7acdb0516
| 61,960
|
def fast_addition(a, b):
"""This function adds two arguments a,b and returns the result"""
return a + b
|
763f0681c708c302e6d8f1d3e141204f97e73fd4
| 61,962
|
def transfer_turn(board):
"""Sets the turn to the opposite player"""
return -board
|
74fb9f483b5408894b08cb7efda843a3e3915e1e
| 61,964
|
def parse_s3_url(url):
"""
Parses a url with format s3://bucket/prefix into a bucket and prefix
"""
if not url.startswith("s3://"):
raise ValueError("The provided URL does not follow s3://{bucket_name}/{path}")
# Parse into bucket and prefix
bucket = ""
prefix = ""
for index, char in enumerate(url[5:]):
# Everything before this char should be the bucket name
if char == "/":
# Take rest of path less '/' and s3://
prefix = url[(index + 6):]
break
# Build bucket string
bucket += char
if not bucket:
raise ValueError("The provided URL " + str(url) + " is not valid. Please enter a URL following s3://{bucket_name}/path")
if not prefix:
prefix = "/"
return bucket, prefix
|
1f8724e00f5205c6747a1dc12db8237939af5830
| 61,965
|
def get_url(path, scheme="http"):
""" Return the full InfoQ URL """
return scheme + "://www.infoq.com" + path
|
8e4010d3943514ea293c5ee9b12a68143694042f
| 61,966
|
def _formulate_smt_constraints_fully_connected_layer(
z3_optimizer, nn_first_layer, smt_first_layer, top_k, gamma):
"""Formulates smt constraints using first layer activations.
Generates constraints for the top_k nodes in the first hidden layer by setting
the masked activation to be greater than that of the unmasked activation.
Args:
z3_optimizer: instance of z3.Optimizer, z3 optimizer.
nn_first_layer: numpy array with shape (num_hidden_nodes_first_layer,)
smt_first_layer: list of z3.ExprRef with length
num_hidden_nodes_first_layer.
top_k: int, constrain the nodes with top k activations in the first hidden
layer.
gamma: float, masked activation is greater than gamma times the unmasked
activation. Its value is always between [0,1).
Returns:
z3 optimizer with added smt constraints.
"""
for index in nn_first_layer.argsort()[-top_k:]:
if nn_first_layer[index] > 0:
# we constrain only those nodes whose activations are positive.
# In future we might handle nodes with negative values as well.
z3_optimizer.solver.add(
smt_first_layer[index] > gamma * nn_first_layer[index])
return z3_optimizer
|
da67ad56d43b0f0574add5333f98b4bf8686241f
| 61,967
|
def bdev_ftl_delete(client, name):
"""Delete FTL bdev
Args:
name: name of the bdev
"""
params = {'name': name}
return client.call('bdev_ftl_delete', params)
|
9ea5656354953f8b1589dcb13a39ff008129dbf6
| 61,971
|
def tree_as_host_list(current, path):
"""
Args:
current (Node): The root of the subtree.
path (str): path from the root to the current Node.
Returns:
str: The list representation of the hosts in current
"""
if len(current.children) == 0:
return path
p = str()
for i in current.children.values():
p += tree_as_host_list(i, i.auth + '.' + path if path != '\n' else i.auth + path)
return p
|
f2e58e7ef87c100f0a33586348f689cc2c17b686
| 61,974
|
import math
def SE_calc(item1, item2):
"""
Calculate standard error with binomial distribution.
:param item1: parameter
:type item1: float
:param item2: number of observations
:type item2: int
:return: standard error as float
"""
try:
return math.sqrt(
(item1 * (1 - item1)) / item2)
except Exception:
return "None"
|
fcbc06b1bff4bf607dfdff929cb1d67c919e2de9
| 61,978
|
def expand_dups(hdr_tuples):
"""
Given a list of header tuples, unpacks multiple null separated values
for the same header name.
"""
out_tuples = list()
for (n, v) in hdr_tuples:
for val in v.split('\x00'):
if len(val) > 0:
out_tuples.append((n, val))
return out_tuples
|
54823b34421a5d1dec693c5507b7a0f7c65b20e0
| 61,985
|
def parse_filename(filename, full_output=False):
"""
Return the basic stellar parameters from the filename.
"""
basename = filename.split("/")[-1]
teff = float(basename.split("t")[1].split("g")[0])
logg = float(basename.split("g")[1].split("k")[0])/10.
feh = float(basename[1:4].replace("p", "").replace("m", "-"))/10.
alpha = [0, 0.4][basename[4] == "a"]
parameters = [teff, logg, feh, alpha]
if full_output:
names = ("effective_temperature", "surface_gravity", "metallicity",
"alpha_enhancement")
return (parameters, names)
return parameters
|
bc88720155f40ce8049966e5db15bdf25fd6c52e
| 61,997
|
def sw_archie(res, phi, Rw, a=1.0, m=2.0, n=2.0):
"""
Calculate water saturation using Archie method.
(a * Rw)
Sw = ( ----------- )^(1/n)
phi^m * res
Sw = sw_archie(res, phi, Rw, a, m, n)
Sw = water saturation
res = measured formation resistivity
phi = effective porosity
Rw = formation water resistivity
a = constant
m = constant
n = constant
"""
Rw = float(Rw)
a = float(a)
m = float(m)
n = float(n)
Sw = (a * Rw / (phi**m * res)) ** (1 / n)
Sw[Sw < 0] = 0.0
Sw[Sw > 1] = 1.0
return Sw
|
c97add62cb41be146915c25a9a7832a628cbb416
| 61,998
|
import re
def extract_blog_content(content):
"""This function extracts blog post content using regex
Args:
content (request.content): String content returned from requests.get
Returns:
str: string content as per regex match
"""
content_pattern = re.compile(r'<div class="cms-richtext">(.*?)</div>')
result = re.findall(content_pattern, content)
return result[0] if result else "None"
|
286a6a978b700342b3fe9905966c41aaacb1ac74
| 62,000
|
def add_colons(df, id_name='', col_types={}):
"""
Adds the colons to column names before neo4j import (presumably removed by `remove_colons` to make queryable).
User can also specify a name for the ':ID' column and data types for property columns.
:param df: DataFrame, the neo4j import data without colons in it (e.g. to make it queryable).
:param id_name: String, name for the id property. If importing a CSV into neo4j without this property,
Neo4j mayuse its own internal id's losing this property.
:param col_types: dict, data types for other columns in the form of column_name:data_type
:return: DataFrame, with neo4j compatible column headings
"""
reserved_cols = ['id', 'label', 'start_id', 'end_id', 'type']
# Get the reserved column names that need to be changed
to_change = [c for c in df.columns if c.lower() in reserved_cols]
if not to_change:
raise ValueError("Neo4j Reserved columns (['id', 'label' 'start_id', 'end_id', 'type'] not " +
"found in DataFrame")
# Add any column names that need to be types
to_change += [c for c in df.columns if c in col_types.keys()]
change_dict = {}
for name in to_change:
# Reserved column names go after the colon
if name.lower() in reserved_cols:
if name.lower() == 'id':
new_name = id_name + ':' + name.upper()
else:
new_name = ':' + name.upper()
else:
# Data types go after the colon, while names go before.
new_name = name + ':' + col_types[name].upper()
change_dict.update({name: new_name})
return df.rename(columns=change_dict)
|
d7bca92e939c7ca109cd66841bb2a45d2fdbeac0
| 62,001
|
import math
def bound_longitude(lon, rad=False):
"""
Return a value between (-180, 180], handling the wrap-around
"""
if rad:
lon = lon * 180 / math.pi
# Keeps longitude in [0, 360) range
lon = lon % 360
lon[lon > 180] = lon[lon > 180] % 180 - 180
if rad:
lon = lon * math.pi / 180
return lon
|
11a18cf36b9a593a8fcc606fd51d3009895e9522
| 62,003
|
def get_all_under_item(item):
"""
Returns all the children of a specific QTreeWidgetItem
:param item: <QTreeWidgetItem>
:return: <list> QTreeWidgetItems
"""
items = []
for index in range(item.childCount()):
items.append(item.child(index))
items.extend(get_all_under_item(item.child(index)))
return items
|
61b6abd2e22cb82ed5b8a3f94087b77beb85530c
| 62,005
|
from typing import Union
from typing import NoReturn
from typing import Mapping
def validate_embed_fields(fields: dict) -> Union[bool, NoReturn]:
"""Raises a ValueError if any of the given embed fields is invalid."""
field_validators = ("name", "value", "inline")
required_fields = ("name", "value")
for field in fields:
if len(field.get("name")) > 256:
raise ValueError("Embed field-name length reached max limit.")
if len(field.get("value")) > 1024:
raise ValueError("Embed field-value length reached max limit.")
if not isinstance((value := field.get("inline")), bool):
raise ValueError(f"This field must be of type bool, not {type(value)}.")
if not isinstance(field, Mapping):
raise ValueError("Embed fields must be a mapping.")
if not all(required_field in field for required_field in required_fields):
raise ValueError(
f"Embed fields must contain the following fields: {', '.join(required_fields)}."
)
for field_name in field:
if field_name not in field_validators:
raise ValueError(f"Unknown embed field field: {field_name!r}.")
return True
|
c6966ecde743bea7d975db108600729d93fece58
| 62,011
|
def parse_domain_label(domain_label):
""" Parse the list of comma-separated domains from the app label. """
return domain_label.replace(',', ' ').split()
|
5273501ae1bea9517f5b9c0a620fdb78feb79112
| 62,013
|
def rankine_to_fahrenheit(temp):
"""
From Rankine (R) to Fahrenheit (ºF)
"""
return temp - 459.67
|
894d74541679979e4432e01a3f263e44725070a6
| 62,016
|
def _ch_disp_name(ch):
"""Convert channel names like 'proc_Plasmid' to 'Plasmid'"""
return '_'.join(ch.split('_')[1:])
|
208e315206fd9046b83d2c5841e581d1fc71ca58
| 62,021
|
def row_to_dict(row):
"""
Translate sql alchemy row to dict
Args:
row: SQL alchemy class
Returns:
data_dict(dict): data as dictionary
"""
if not row:
return {}
if hasattr(row, "__table__"):
return dict((col, getattr(row, col))
for col in row.__table__.columns.keys())
else:
ret = {}
for table in row:
if table:
ret[table.__tablename__] = dict(
(col, getattr(table, col)) for col
in table.__table__.columns.keys())
return ret
|
d5f13b7f582d97328f46960a02ce8007e8bfcaf6
| 62,024
|
import torch
def unnormalise(tensor: torch.Tensor):
""" Converts normalised CxHxW tensor to HxWxC numpy image. """
tensor = tensor.cpu().detach()
min, max = float(tensor.min()), float(tensor.max())
tensor = tensor.clamp_(min=min, max=max)
tensor = tensor.add_(-min).div_(max - min + 1e-5)
image = tensor.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy()
if image.shape[-1] == 1:
image = image.squeeze()
return image
|
6a41fa9019027b515e7d0bc96815e460fecf5ae7
| 62,027
|
def _add_input_output(input_files=None, output_file=None, pipe=True):
""" Add input and output files to command in the form
'command input_files > output_file', 'command > output_file',
'command input_files |', 'command input_files', 'command |' or 'command'
depending on the given files and the value of pipe
:param input_files: list of file(s) to read from (None: stdin)
:param output_file: file to write to (None: pipe or stdout,
depending on value of pipe)
:param pipe: only used if output file is None: True: write to pipe,
False: write to stdout
:return: string with input and output modifiers
"""
input_files =\
input_files if isinstance(input_files, (list, tuple)) else [
input_files]
cmd = ''
for input_file in input_files:
if input_file:
cmd += ' {}'.format(input_file)
if output_file:
cmd += ' > {}'.format(output_file)
elif pipe:
cmd += ' | '
return cmd
|
1904ea4f9ab6a99c557f296fdb0fe3a93f579dc3
| 62,028
|
import json
def prepare_update_record(record):
"""
Remove unecessary/forbidden attributes from record so it's possible to
reuse on the entity.update API call.
"""
json_loaded = json.loads(record)
not_allowed_keys = ['created']
for key in not_allowed_keys:
del json_loaded[key]
return json.dumps(json_loaded)
|
47540650703f9b4190f7e07431c6469ed1237c55
| 62,030
|
def _decimal_to_binary(decimal):
"""Convert decimal to binary"""
return int("{0:b}".format(decimal))
|
c92d4d0a8c6487f156f79e735b0fd4b30874f64d
| 62,032
|
from typing import List
from typing import Counter
from typing import Optional
def get_most_frequent(tokens: List[str], token_counts: Counter) -> Optional[str]:
"""
Find most frequent token in a list of tokens.
Args:
tokens: a list of tokens
token_counts: a dictionary of token frequencies
Returns:
the most frequent token in the list of tokens
"""
freq = 0
most_freq_token = None
for candidate in tokens:
if candidate in token_counts:
if token_counts[candidate] > freq:
freq = token_counts[candidate]
most_freq_token = candidate
return most_freq_token
|
ad35e18a5e430c35a6279d234ce19f851f943ec9
| 62,037
|
from typing import List
import pkg_resources
def _get_all_backends() -> List[str]:
"""
Return the list of known backend names.
"""
return [
entry_point.name
for entry_point in pkg_resources.iter_entry_points(
group='ibis.backends', name=None
)
]
|
9d19c19ff34203b4a3bf1df6c91eaa14ca484d58
| 62,039
|
def _short_tag(tag):
"""Helper method to remove any namespaces from the XML tag"""
return tag[tag.rfind('}')+1:len(tag)]
|
27d9c8ca4a42ccc8ec168004a5b9fade5ed69a9f
| 62,044
|
def get_url(route, base_url="{{base_Url}}"):
"""Adds base_url environment variable to url prefix."""
url = base_url + route
return url
|
7627bb75be4319095a922dcce2022121ec559716
| 62,045
|
from datetime import datetime
def is_between_timespan(timespan:tuple) -> bool:
"""Returns if current time is between given timespans"""
return timespan[0] <= datetime.now() <= timespan[1]
|
ccbac35c37476987e0f9900c6a5aa466e9ea04d8
| 62,046
|
def _crop_image_to_square(image):
"""
Given a PIL.Image object, return a copy cropped to a square around the
center point with each side set to the size of the smaller dimension.
"""
width, height = image.size
if width != height:
side = width if width < height else height
left = (width - side) // 2
top = (height - side) // 2
right = (width + side) // 2
bottom = (height + side) // 2
image = image.crop((left, top, right, bottom))
return image
|
26ddaa8a1fe2a3a87b278b58cd131b45433c2c81
| 62,052
|
def not_between(a, b):
"""Evaluates a not between b[0] and b[1]"""
if not isinstance(b, list):
raise TypeError('other value must be a list of length 2')
result = b[0] <= a <= b[1]
return False if result else True
|
effc8f05cace0fc3e0aaba8239a82a6985fe65d3
| 62,053
|
def get_origin(manifest):
"""Parse the coordinate of the origin in the manifest file
Args:
manifest: The manifest from which to parse the coordinates of the origin
Returns:
the parsed coordinates (or throw an exception if they could not be parsed)
"""
with open(manifest, "r") as save_file:
for line in save_file:
if "<gml:coordinates>" in line:
coor = line.replace(" <gml:coordinates>", "")\
.replace("</gml:coordinates>", "").split(" ")
coord = [(float(val.replace("\n", "").split(",")[0]),\
float(val.replace("\n", "")\
.split(",")[1]))for val in coor]
return coord[0], coord[1], coord[2], coord[3]
raise Exception("Coordinates not found in "+str(manifest))
|
2577ac3d34b739aad2a8aa0deb7cd774b8d85ca0
| 62,057
|
def mjpeg_info_cmp(x,y):
"""
Comparison function for sorting a list of (camera_name, camera_info) pairs.
"""
name_x = x[0]
name_y = y[0]
value_x = int(name_x.replace('camera_', ''))
value_y = int(name_y.replace('camera_', ''))
if value_x > value_y:
return 1
elif value_y > value_x:
return -1
else:
return 0
|
31d98998bd3ece11a591b841505d50e67af68182
| 62,060
|
import torch
def set_loss_weight(pivot_set,exp):
"""
The weight of the k-th auxiliary loss: gamma_k = \max(0.01, (\frac{L_k}{L_K})^2)
More details can be found in Section 3.2 in "The Shallow End: Empowering Shallower Deep-Convolutional Networks
through Auxiliary Outputs": https://arxiv.org/abs/1611.01773.
"""
base_weight = 0
lr_weight = torch.zeros(len(pivot_set)).cuda()
pivot_weight = lr_weight.clone()
for i in range(len(pivot_set) - 1, -1, -1):
temp_weight = max(pow(float(pivot_set[i]/pivot_set[-1]), exp), 0.01)
base_weight += temp_weight
pivot_weight[i] = temp_weight
lr_weight[i] = base_weight
return pivot_weight, lr_weight
|
8998cf935ae12f6568598152155c7a6b6df99121
| 62,066
|
import unicodedata
def displayText (text):
""" Convert text into a string that is always renderable without combining,
control or invisible characters """
if text is None:
return text
if all (map (lambda x: unicodedata.combining (x) != 0, text)):
# add circle if combining
return '\u25cc' + text
invMap = {
'\t': '⭾',
'\n': '↳',
' ': '\u2423',
'\b': '⌦',
'\u200e': '[LRM]', # left to right mark
'\u061c': '[ALM]', # arabic letter mark
'\u202c': '[PDF]', # pop directional formatting
"\u2066": '[LRI]', # left-to-right isolate (lri)
"\u2067": '[RLI]', # right-to-left isolate (rli)
"\u2069": '[PDI]', # pop directional isolate (pdi)
}
return invMap.get (text, text)
|
412b646c55a8c498f217a516f207173c2356a3e9
| 62,069
|
def isGenericParamName(name):
"""Check if name is a generic parameter name."""
if name is None:
raise ValueError("parameter name is None")
return name.startswith('param')
|
85595ed602db3588d6150a0380b47a9fe8f060e6
| 62,072
|
def parse_hal_binned_tags(hal_binned_tags):
"""Parses the tag information from the output of HAL
Parameters
----------
hal_binned_tags: output of HAL.get_outputs() (list of tuples)
Returns a nested dictionary:
[output_id][dimension] = list of (times, count) tuples
"""
parsed_tags = {}
for time, output_id, dim, count in hal_binned_tags:
if output_id not in parsed_tags:
parsed_tags[output_id] = {}
if dim not in parsed_tags[output_id]:
parsed_tags[output_id][dim] = []
parsed_tags[output_id][dim].append((time, count))
return parsed_tags
|
347952ed53e0c575554293a333f30507101549e7
| 62,073
|
from typing import List
def multiplication_table(n: int) -> List[List[int]]:
"""
A completely unnecessary generation of a multiplication table, which is not needed to solve the problem.
:param n: the size of the table, ranging [1,n] x [1,n].
:return: the multiplication table, where the entry [i][j] = (i + 1) * (j + 1)
>>> multiplication_table(4)
[[1, 2, 3, 4], [2, 4, 6, 8], [3, 6, 9, 12], [4, 8, 12, 16]]
>>> multiplication_table(1)
[[1]]
"""
return [[(r + 1) * (c + 1) for c in range(n)] for r in range(n)]
|
141e5a6649955785113091f9d66608498344f063
| 62,075
|
import inspect
from typing import OrderedDict
def get_args_kwargs(fct, n_optional):
"""
Extracts arguments and optional parameters of a function.
:param fct: function
:param n_optional: number of arguments to consider as
optional arguments and not parameters, this parameter skips
the first *n_optional* paramerters
:return: arguments, OrderedDict
Any optional argument ending with '_' is ignored.
"""
params = inspect.signature(fct).parameters
if n_optional == 0:
items = list(params.items())
args = [name for name, p in params.items()
if p.default == inspect.Parameter.empty]
else:
items = []
args = []
for name, p in params.items():
if p.default == inspect.Parameter.empty:
args.append(name)
else:
if n_optional > 0:
args.append(name)
n_optional -= 1
else:
items.append((name, p))
kwargs = OrderedDict((name, p.default) for name, p in items
if (p.default != inspect.Parameter.empty and
name != 'op_version'))
if args[0] == 'self':
args = args[1:]
kwargs['op_'] = None
return args, kwargs
|
45adbee36983f67486474a0f93730df63a19cf77
| 62,077
|
def quizn_to_index(quizn):
"""See: https://github.com/fielddaylab/jo_wilder/blob/master/src/scenes/quiz.js
For some reason there are 5 quizzes, but there is no quiz numbered 1.
Returns:
The correct quiz number for quizzes 2-5, or 0 for quiz 0.
"""
return quizn - 1 if quizn >= 2 else quizn
|
b57df8c103d3124872be02eb487772787cb8131e
| 62,078
|
def day_in_sec(dy, ml=False):
"""
Convertion d'un nombre de jours en secondes ou milisecondes
:param int dy: nombre de jours
:param bool ml: en millisecondes si True sinon en secondes, dafault False
:return: (milli) secondes
"""
nb = int(dy)
nb = nb * 24 * 60 * 60
return nb * 1000 if ml else nb
|
08394bc4a04e4a8ca10eb42b6dbb4f425ba44a41
| 62,085
|
def clean_chars(value, cleanchars):
""" Remove chars for cleaning
:param value: String to be cleaned
:param cleanchars: Characters to remove from value
:return value: Cleaned string
"""
for char in cleanchars:
value = value.replace(char, '')
return value
|
480d921152f9bc3e6491b4a015d61f53932dd16c
| 62,090
|
def makeNeighborLists(position, nRow, nCol):
"""
Build a neighbor list for each cell
This will create a list of all positions in a cell's Moore neighborhood.
Pos is the cell's position, nRow is the maximum width, nCol is the maximum height
"""
r, c = position
neighborList = [(r+a, c+b)
for a in [-1, 0, 1]
for b in [-1, 0, 1]
if 0 <= r + a <= nRow
if 0 <= c + b <= nCol
if not (a == 0 and b == 0)]
return neighborList
|
2e8f54f4727a9477bfee9cc8b6d113b0a79deeed
| 62,092
|
def ceil4(x):
""" Find the closest greater or equal multiple of 4
Parameters
----------
x: int
The size
Returns
-------
x_ceil: int
The closest greater integer which is a multiple of 4.
"""
return (((x - 1) >> 2) + 1) << 2
|
452cfae5fc9ad92cab7c7e27c1ba1a8442fc5bd4
| 62,094
|
def findRxnName(rxnid, reactionsBiGG):
"""
Retreive (descriptive) name of reaction from BiGG-file.
Returns
-------
String with name (when found), otherwise empty.
"""
if rxnid in list(reactionsBiGG.index):
return(str(reactionsBiGG.loc[rxnid]['name']))
else:
return(str(''))
|
e21cd364013adb333332167522efb8a34f33f3cf
| 62,096
|
def calculate_snr(rx_power, noise_power):
""" Function that calculates SNR in dB!
Args:
rx_power: (numpy array) received power in dB!
noise_power: noise power in dB!
Returns:
snr: (numpy array) Signal-to-Noise ratio in dB!
"""
snr = rx_power - noise_power # rx_power and noise_power should be in dB!
return snr
|
0011e261ba7a2df9a9374657caa47040a76a209d
| 62,101
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.