content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def validate_output_transform(x, y, y_pred):
"""A transform to format the output of the supervised evaluator before calculating the metric
Args:
x (:obj:`torch.Tensor`): the input to the model
y (:obj:`torch.Tensor`): the output of the model
y_pred (:obj:`torch.Tensor`): the ground truth labels
Returns:
(:obj:`torch.Tensor`, :obj:`torch.Tensor`): model predictions and ground truths reformatted
"""
output = y_pred
labels = y
B, V, C = output.shape
B_labels, V_labels, C_labels = labels.shape
output = output.view(B * V, C)
labels = labels.view(B_labels * V_labels, C_labels)
return output, labels | 27966ec755bead36ba6310e85ae92704b35928e1 | 125,637 |
def partial_clique_graph(i, set_cliques, minimum, num_cliques):
"""
Function that supports the creation of the clique graph, the second stage of CPM.
This function is detached from the main function since it is parallelised
(based on the amout of workers).
Parameters
----------
i : integer
The iterator for parallelisation.
set_cliques : list(set)
List containing all found cliques. Each clique is a set so it becomes easier to compare
minimum : int
Minimum overlapping between two cliques (size_of_cliques-1).
num_cliques : int
Number of cliques found in the graph.
Returns
-------
edge_list : list
List of edges belonging to the iterated node.
"""
edge_list = list()
this_set = set_cliques[i]
for j in range(i+1, num_cliques):
if len(this_set.intersection(set_cliques[j])) == minimum:
edge_list.append((i,j))
return edge_list | 48d814f43f6b82f005a12f5e499095efa0902aff | 125,640 |
def solution(n: int = 1000) -> int:
"""Returns the number of letters used to write all numbers from 1 to n.
where n is lower or equals to 1000.
>>> solution(1000)
21124
>>> solution(5)
19
"""
# number of letters in zero, one, two, ..., nineteen (0 for zero since it's
# never said aloud)
ones_counts = [0, 3, 3, 5, 4, 4, 3, 5, 5, 4, 3, 6, 6, 8, 8, 7, 7, 9, 8, 8]
# number of letters in twenty, thirty, ..., ninety (0 for numbers less than
# 20 due to inconsistency in teens)
tens_counts = [0, 0, 6, 6, 5, 5, 5, 7, 6, 6]
count = 0
for i in range(1, n + 1):
if i < 1000:
if i >= 100:
# add number of letters for "n hundred"
count += ones_counts[i // 100] + 7
if i % 100 != 0:
# add number of letters for "and" if number is not multiple
# of 100
count += 3
if 0 < i % 100 < 20:
# add number of letters for one, two, three, ..., nineteen
# (could be combined with below if not for inconsistency in
# teens)
count += ones_counts[i % 100]
else:
# add number of letters for twenty, twenty one, ..., ninety
# nine
count += ones_counts[i % 10]
count += tens_counts[(i % 100 - i % 10) // 10]
else:
count += ones_counts[i // 1000] + 8
return count | 98b86fd01be75b250164dc8bbe93b945ed15c3ef | 125,643 |
def PairEnum(x, y):
""" Enumerate all pairs of feature in x and y"""
assert x.ndimension() == 2, 'Input dimension must be 2'
x = x.repeat(x.size(0), 1)
y = y.repeat(1, y.size(0)).view(-1, y.size(1))
return x, y | a14c451fa7387595d234914b300fce99f675464c | 125,645 |
import re
def filter_with_regex(files, regex):
"""filters files by regular expressions
Parameters
------
files (list): list of files
regex (str): regular expression string
Returns
------
list: a list of files with regular expression applied
"""
return [file for file in files if re.search(regex, file)] | dbf1fce73dfdf59362bef238f98ac0f4073a5e92 | 125,649 |
import re
def natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
"""
Use this function as sorting key to sort in 'natural' order.
>>> names = ['file10', 'file1.5', 'file1']
>>> sorted(names, key=natural_sort_key)
['file1', 'file1.5', 'file10']
Source: http://stackoverflow.com/a/16090640/715090
"""
return [int(text) if text.isdigit() else text.lower()
for text in re.split(_nsre, s)] | 4e85fe62a379a28c55418aaf21ee022e295b1c36 | 125,650 |
def cred_UserKeyge(params):
""" Generates keys and parameters for credential user """
G, g, h, o = params
priv = o.random()
pub = priv * g # This is just an EC El-Gamal key
return (priv, pub) | a6c8dacc86cd02d2a97d56621cd1c90a47ce520a | 125,656 |
def read_genes(gene_file):
"""Read in list of gene names from \n separated text file and
return list."""
genes = []
with open(gene_file, 'rU') as genefile:
for gene in genefile:
gene = gene.strip()
genes.append(gene)
return(genes) | 2d0d5f2a34446f177e0f811db1ae73164e14d1d4 | 125,663 |
def vowel_articulation_index(F1a, F1i, F1u, F2a, F2i, F2u):
"""
Return vowel articulation index
Args:
F1a: (float) the 1. formant frequency of the vowel /a [Hz]
F1i: (float) the 1. formant frequency of the vowel /i [Hz]
F1u: (float) the 1. formant frequency of the vowel /u [Hz]
F2a: (float) the 1. formant frequency of the vowel /a [Hz]
F2i: (float) the 1. formant frequency of the vowel /i [Hz]
F2u: (float) the 1. formant frequency of the vowel /u [Hz]
Returns:
VAI: (float) vowel articulation index
"""
# Return vowel articulation index
return float((F2i+F1a)/(F1i+F1u+F2u+F2a)) | 92706cfa86e2daaa5f6891141838a48494cdc75d | 125,668 |
def recordenum_view_url(enum_id, coll_id="testcoll", type_id="testtype"):
"""
Returns public access URL / URI for indicated enumerated value.
enum_id id of enumerated value
coll-id id of collection
type_id id of enumeration type
"""
return (
"/testsite/c/%(coll_id)s/d/%(type_id)s/%(enum_id)s/"%
{'coll_id': coll_id
, 'type_id': type_id
, 'enum_id': enum_id}
) | f8eb4d36cb5cfe8107b979d29d9737108711f980 | 125,669 |
def phred33toQ(qual):
""" Trun Phred33 to integer according to ASCII table"""
return ord(qual) -33 | 6a5dcac4f81f84c837a435c4cc7efab2e9cebd80 | 125,676 |
def is_single_bit(num):
"""
True if only one bit set in num (should be an int)
"""
num &= num - 1
return num == 0 | e9520afa2e93598ac4f6d6786d88441bf604eb8a | 125,677 |
def tags_list_to_string(tags):
"""
Given a list of ``tags``, turn it into the canonical string representation
(space-delimited, enclosing tags containing spaces in double brackets).
"""
tag_string_list = []
for tag in tags:
if ' ' in tag:
tag = '[[%s]]' % tag
tag_string_list.append(tag)
return u' '.join(tag_string_list) | 8b07a6f7d2d9aff2f8bf4271cbd318ef36c9d743 | 125,678 |
def cos_2_args(input_A, input_B, lib):
""" sin(A + B) = sin A cos B + cos A sin B """
sin_A = lib.sin(input_A)
cos_B = lib.cos(input_B)
sincosA = sin_A * cos_B
cos_A = lib.cos(input_A)
sin_B = lib.sin(input_B)
sincosB = cos_A * sin_B
result = sincosA + sincosB
return result | 66e34704f5e88eeccd4aaa00f20eb32d71188bc8 | 125,680 |
from typing import Any
from typing import List
def _order_by_line_nos(objs: Any, line_nos: List[int]) -> List[str]:
"""Orders the set of `objs` by `line_nos`."""
ordering = sorted(range(len(line_nos)), key=line_nos.__getitem__)
return [objs[i] for i in ordering] | 6fa08e7cf707a4f29f88320521a75fbfc781aa51 | 125,685 |
import torch
def compute_bbox_proj(verts, f, img_size=256):
"""
Computes the 2D bounding box of vertices projected to the image plane.
Args:
verts (B x N x 3): Vertices.
f (float): Focal length.
img_size (int): Size of image in pixels.
Returns:
Bounding box in xywh format (Bx4).
"""
xy = verts[:, :, :2]
z = verts[:, :, 2:]
proj = f * xy / z + 0.5 # [0, 1]
proj = proj * img_size # [0, img_size]
u, v = proj[:, :, 0], proj[:, :, 1]
x1, x2 = u.min(1).values, u.max(1).values
y1, y2 = v.min(1).values, v.max(1).values
return torch.stack((x1, y1, x2 - x1, y2 - y1), 1) | a6c3fedb12ed6e293a54e7171e69451be07e8f64 | 125,688 |
def add_file(args, data):
"""Add one or more files to the current project."""
for name in args.file:
data["files"].append(name)
return data | f844d8394cac8c02ef352b99c49bfaa6ad942958 | 125,691 |
def is_logged_in(session):
"""Check if the user is currently logged in.
If the user has a teamID in their session return success:1 and a message
If they are not logged in return a message saying so and success:0
"""
if 'tid' in session:
return {'success': 1, 'message': 'You appear to be logged in.'}
else:
return {"success": 0, "message": "You do not appear to be logged in."} | 1947c7dbcea97f46f7e258f61a57c2587fd819fa | 125,692 |
def float_or_string(arg):
"""Force float or string
"""
try:
res = float(arg)
except ValueError:
res = arg
return res | 3dd47e9688ae8e4693e76482e041644bb2d8ab4d | 125,697 |
def __getitem__(self, index):
"""
Retrieve a field or slice of fields from the record using an index.
Args:
index: int or slice object
Index can be an integer or slice object for normal sequence
item access.
Returns:
If index is an integer the value of the field corresponding to
the index is returned. If index is a slice a list of field values
corresponding to the slice indices is returned.
"""
if isinstance(index, int):
return self._attr_getters[index](self)
# Slice object
return [getter(self) for getter in self._attr_getters[index]] | 9a77450c8d9103dd88ce253983a638d21677d834 | 125,699 |
def fullname(o):
"""Get fully qualified class name"""
module = o.__module__
if module is None or module == str.__module__:
return o.__name__
return module + "." + o.__name__ | 4e431cee2589d6a5735ef3d5952ba8f4433ccb55 | 125,706 |
from typing import List
from typing import Dict
from typing import Set
def scope2dict(scopes: List[str]) -> Dict[str, Set]:
"""Given a list of scopes like ["admin:write", "user:read"]
it will returns a dictionary where the namespace is the key and the actions
turns into a set"""
permissions: Dict[str, Set] = {}
for s in scopes:
try:
ns, action = s.split(":", maxsplit=1)
except ValueError:
ns = s
action = "any"
ns = ns if ns else "any"
actions = {a for a in action.split(":")}
permissions[ns] = actions
return permissions | 17f7aebcbe555456963dcadd88a527cb6fdb1bc8 | 125,710 |
def shell_quote(var):
"""
Escape single quotes and add double quotes around a given variable.
Args:
_str (str): string to add quotes to
Returns:
str: string wrapped in quotes
.. warning:: This is not safe for untrusted input and only valid
in this context (``os.environ``).
"""
_repr = repr(var)
if _repr.startswith('\''):
return "\"%s\"" % _repr[1:-1] | dc22989476ede64a579e8d585d0287eb0f641bab | 125,714 |
def load_file(file_path):
"""
Loads and returns content into file.
:param file_path: Path to the file.
:return: Content of the file.
"""
try:
with open(file_path, 'r') as file:
data = file.read()
except IsADirectoryError:
raise Exception("Reading file '{0}' not possible, because it's a directory.".format(file_path))
except IOError:
raise Exception("IOError while reading the '{0}' file.".format(file_path))
return data | dc8331719277881b7645986280e1fa3383d84b68 | 125,715 |
def calculated_stat(base_stat, level, iv, effort, nature=None):
"""Returns the calculated stat -- i.e. the value actually shown in the game
on a Pokémon's status tab.
"""
# Remember: this is from C; use floor division!
stat = (base_stat * 2 + iv + effort // 4) * level // 100 + 5
if nature:
stat = int(stat * nature)
return stat | 904213a03fea0e8d350e3a4d364632bcbda1ebb8 | 125,716 |
def _get_received_by(received_header):
"""
Helper function to grab the 'by' part of a Received email header.
"""
received_header = received_header.replace('\r', '').replace('\n', '')
info = received_header.split('by ')
try:
return info[-1].split('for ')[0]
except:
return '' | 73bcc4b83075308a8ee3696f73cb39f46e4aa653 | 125,728 |
def dot(u, v):
"""Dot product between vectors ``u`` and ``v``."""
return sum(uu * vv for uu, vv in zip(u, v)) | ff813aa79ce9a1abe8252cb1aec46ca815138d78 | 125,732 |
def func_create_file(path_to_file):
"""
Function description
Example/s of use (and excutable test/s via doctest):
>>> func_create_file("file.txt")
'line in file'
:param path_to_file: set a path to file
:return: string with first line in file
"""
with open(path_to_file, "w") as f:
f.write("line in file")
with open(path_to_file, "r") as f:
return f.readline() | 41754e9eb415aeb5b13ddf4993b2e96dfe4f0d4f | 125,740 |
from typing import Optional
def reformat_content_lines(
content: str,
fmt: str,
header: Optional[str] = None,
footer: Optional[str] = None,
) -> str:
"""Apply a (new-style) Python format expression to each line of a string
content block.
Also optionally add a header and/or footer to the content block.
Parameters
----------
fmt : `str`
Python format statement that each line of the content is processed
with. The default format argument is the original content line.
For example, ``# {}`` turns the content into a Python comment.
header : `str`, optional
Text content that can be added above the original content.
footer : `str`, optional
Text content that can be added before the original content
"""
# Take any final newline off the end of the content so there isn't an
# empty final line
content_lines = content.rstrip().split("\n")
output_lines = []
if header is not None:
output_lines.append(header)
for line in content_lines:
output_lines.append(fmt.format(line).rstrip())
if footer is not None:
output_lines.append(footer)
# Always include a final newline
return "\n".join(output_lines) + "\n" | 0554c6e7cf31c58aa60cc1b97e1b4d44578bda38 | 125,742 |
def shader_source_with_tex_offset(offset):
"""Returns a vertex shader using a texture access with the given offset."""
return """#version 150
uniform sampler1D tex;
void main() { vec4 x = textureOffset(tex, 1.0, """ + str(offset) + "); }" | 3a8764e74f588afbfb983db7ab5dd86a4e61b5c9 | 125,745 |
def sanitize_num_processes(num_processes, min_processes, parallel=False):
"""
Returns False to prevent parallel processing in case more processes have
been requested than can be used.
:param num_processes: How many processes have been requested
:param min_processes: Minimum number of cores to keep free
:param parallel: If parallel is requested
:return bool: True if num_processes is sensible
"""
if parallel:
if num_processes < min_processes:
parallel = False
return parallel | faed21622d9c02876e643219a424b6a8db69ceec | 125,749 |
from typing import Any
import re
def parse_hyperopt_param(string: str) -> Any:
"""Parses lower and upper bounds from string representation of hyperopt
parameter.
Parameters
----------
string : str
String representation of hyperopt parameter
Returns
-------
param_type : str
Type of parameter
parsed : list
Lower and upper bounds of parameter
"""
param_type: str = re.findall('categorical|quniform|uniform|loguniform', string)[0]
if param_type == 'categorical':
raise ValueError("categorical bounds not supported")
else:
# Get all the Literal{} entries
string = ' '.join(re.findall("Literal{-*\d+\.*\d*}", string))
# Parse all the numbers within the {} and map to float
parsed = list(map(float, re.findall("[Literal{}](-*\d+\.*\d*)", string)))
return param_type, parsed[:2] | 34cc5f0653e6e7f24ed0efb406570f28ebbeb4a3 | 125,752 |
def daybounds( season ):
"""Input is a 3-character season, e.g. JAN, JJA, ANN. Output is bounds for that season
in units of "days since 0".
In most cases the bounds will be like [[lo,hi]]. But for DJF it will be like
[[lo1,hi1],[lo2,hi2]].
The noleap (365 day) calendar is required."""
# This is an incredibly crude way to do it, but works for now.
# If permanent, I would make these dictionaries global for performance.
dbddic = { 'JAN': [[0,31]],
'FEB': [[31,59]],
'MAR': [[59,90]],
'APR': [[90,120]],
'MAY': [[120,151]],
'JUN': [[151,181]],
'JUL': [[181,212]],
'AUG': [[212,243]],
'SEP': [[243,273]],
'OCT': [[273,304]],
'NOV': [[304,334]],
'DEC': [[334,365]] }
sesdic = { 'MAM': [[ dbddic['MAR'][0][0], dbddic['MAY'][0][1] ]],
'JJA': [[ dbddic['JUN'][0][0], dbddic['AUG'][0][1] ]],
'SON': [[ dbddic['SEP'][0][0], dbddic['NOV'][0][1] ]],
'DJF': [ dbddic['DEC'][0],
[dbddic['JAN'][0][0], dbddic['FEB'][0][1]] ],
'ANN': [[ dbddic['JAN'][0][0], dbddic['DEC'][0][1] ]] }
dbddic.update( sesdic )
return dbddic[season] | faf20174547b54cb44b5b74ac83ecd4a36001842 | 125,758 |
def generate_session(public, private, modulus):
"""Calculates the session key given the received public key, the receiver's
private key, and the modulus"""
return pow(public, private, modulus) | fc1f84e7c45fdd4a42bd72a8556b30603c7f1ca3 | 125,760 |
def get_user_input(message,default):
"""formats message to include default and then prompts user for input
via keyboard with message. Returns user's input or if user doesn't
enter input will return the default."""
message_with_default = message + " [{}]: ".format(default)
user_input = input(message_with_default)
print("")
if len(user_input) == 0:
return default
else:
return user_input | c02539d48514cfb577cfdd923df4e9f62e96bf77 | 125,761 |
def get_port_from_usb( first_usb_index, second_usb_index ):
"""
Based on last two USB location index, provide the port number
"""
acroname_port_usb_map = {(4, 4): 0,
(4, 3): 1,
(4, 2): 2,
(4, 1): 3,
(3, 4): 4,
(3, 3): 5,
(3, 2): 6,
(3, 1): 7,
}
return acroname_port_usb_map[(first_usb_index, second_usb_index)] | 37a236ac3a6a16a67f55b123c1c650e8ff0b685c | 125,771 |
def compute_label(results):
"""
Counts the number of 0's in a list of results, if it is greater than
then number of 1's, output label = 0, else output label = 1
"""
new_result = list(results)
num_zeros = new_result.count([0])
num_ones = new_result.count([1])
if num_zeros >= num_ones: return 0
elif num_zeros < num_ones: return 1 | 4b2cc3c7cf1b43662ac5da9b81651c2d8a31ac17 | 125,773 |
def exist(dataset_name, dataset_type):
"""
Check if dataset exists
"""
if dataset_name in ["Clotho", "audiocaps"] and dataset_type in ["train", "test", "valid"]:
return True
elif dataset_name == "BBCSoundEffects" and dataset_type in ["train", "test"]:
return True
elif dataset_name == "audioset" and dataset_type in ["balanced_train", "unbalanced_train", "eval"]:
return True
else:
return False | 4e1ad5c83d5311d5951e079676d698f3cd4cd46e | 125,779 |
def line_count(file_path, include_blank_line=True) -> int:
"""
计算单个文件中有多少行内容
:param file_path: 文件路径
:param include_blank_line: 计算中是否包含空行,默认包含
"""
with open(file_path, 'r', encoding='utf8') as f:
content = f.readlines()
if include_blank_line:
return len(content)
content2 = [x for x in content if len(x.strip()) != 0]
return len(content2) | 6306639b1d97f788bd0bf62b8f0e4bcf5b373656 | 125,780 |
import json
def json2csv(jsondata, header=True): #---------------------------------------<<<
"""Convert JSON data to CSV.
jsondata = string containing a JSON document
e.g., open('filename.json').read()
header = whether to output a CSV header row of field names
Returns a string of the CSV version of the JSON data.
"""
jsondoc = json.loads(jsondata)
if not jsondoc:
return '' # no JSON data found
fldnames = sorted([field for field in jsondoc[0]])
csvdata = ','.join(fldnames) + '\n' if header else ''
for row in jsondoc:
values = [row[fldname] for fldname in fldnames]
csvdata += ','.join(values) + '\n'
return csvdata | 569a48fc6e2b8392de904c2bb7c76f40e287a457 | 125,782 |
def to3(pnts):
"""
Convert homogeneous coordinates to plain 3D coordinates.
It scales the x, y and z values by the w value.
Aruments:
pnts: A sequence of 4-tuples (x,y,z,w)
Returns:
A list of 3-tuples (x,y,z)
"""
return [(p[0] / p[3], p[1] / p[3], p[2] / p[3]) for p in pnts] | 0d4488cadd400772a0a5b6581bc572521bc294e9 | 125,784 |
def partial_difference_quotient(f, v, i, h):
"""
Compute the ith partial difference quotient of f at v
"""
w = [v_j + (h if j== i else 0) # add h to just the ith element of h
for j, v_j in enumerate(v)]
return (f(w) - f(v)) / h | 0cdf808f865b21be0e45894b46059e20d182b614 | 125,786 |
import pathlib
def path_resolved(path_str):
"""Return a resolved Path for a string."""
path = pathlib.Path(path_str)
path = path.resolve()
return path | 8bcfa77fb9f8d336845dd38ebcf2861597d58be2 | 125,790 |
def slack_attachment(alarm, state):
"""Return the slack attachment for a cloudwatch alarm."""
return [
{
"color": state,
"type": "mrkdwn",
"fields": [
{
"title": "Account",
"value": alarm.account,
"short": True
},
{
"title": "Region",
"value": alarm.region,
"short": True
},
{
"title": "Alarm Name",
"value": alarm.name,
"short": False
},
{
"title": "Alarm Description",
"value": alarm.description,
"short": False
},
{
"title": "Trigger",
"value": alarm.trigger.statistic + " of "
+ alarm.trigger.metric + " "
+ alarm.trigger.comparison_operator + " "
+ str(alarm.trigger.threshold) + " for "
+ str(alarm.trigger.evaluation_period) + " period(s) of "
+ str(alarm.trigger.period) + " seconds.",
"short": False
}
],
}
] | 2b8f7377f91f39d74f8cc5dee0c24806260554e3 | 125,793 |
import re
def is_separator(line):
"""
Does the given line consist solely of separator characters?
-=+: and Unicode line-drawing equivalents
"""
return bool(re.match(r"^[-=+:|\.`'\s\u2500-\u257F]*$", line)) | d7c4cdbc00836267dd656b39581f822eddfd7e0d | 125,795 |
def _check_technology_advancements_for_download(ta):
"""Checks the technology advancements input to :py:func:`download_demand_data` and
:py:func:`download_flexibility_data`.
:param set/list ta: The input technology advancements that will be checked. Can be
any of: *'Slow'*, *'Moderate'*, *'Rapid'*, or *'All'*.
:return: (*set*) -- The formatted set of technology advancements.
:raises TypeError: if ta is not input as a set or list, or if the components of ta
are not input as str.
:raises ValueError: if the components of ta are not valid.
"""
# Check that the input is of an appropriate type
if not isinstance(ta, (set, list)):
raise TypeError("Technology advancements must be input as a set or list.")
# Check that the components of ta are str
if not all(isinstance(x, str) for x in ta):
raise TypeError("Individual technology advancements must be input as a str.")
# Reformat components of ta
ta = {x.capitalize() for x in ta}
if "All" in ta:
ta = {"Slow", "Moderate", "Rapid"}
# Check that the components of ta are valid
if not ta.issubset({"Slow", "Moderate", "Rapid"}):
invalid_ta = ta - {"Slow", "Moderate", "Rapid"}
raise ValueError(f'Invalid electrification scenarios: {", ".join(invalid_ta)}')
# Return the reformatted ta
return ta | 7b7b7ed35d38f43922f2b74ecb166e0b5db5b6db | 125,798 |
def partition_nodes(ilist,bitpos):
"""return a tuple of lists of nodes whose next bit is zero, one or
a letter (others)"""
zeros = []
ones = []
others = []
zero = '0'
one= '1'
for inst in ilist:
bit = inst.ipattern.bits[bitpos]
if bit.value == zero:
zeros.append(inst)
elif bit.value == one:
ones.append(inst)
else:
others.append(inst)
return (ones,zeros,others) | 2f8ce1a85d18497512dac4708291d8c94fac7b87 | 125,799 |
def add_lists(*lists):
"""Add two lists together without numpy
For example, given lists:
[1, 2] [3, 4]
The result is:
[4, 6]
Lists are sliced to prevent mutation.
"""
lists = (l[:] for l in lists)
return list(map(sum, zip(*lists))) | 48d7d41d9466d95e5db705268c469d502478496d | 125,807 |
def _get_effective_padding_node_input(stride, padding,
effective_padding_output):
"""Computes effective padding at the input of a given layer.
Args:
stride: Stride of given layer (integer).
padding: Padding of given layer (integer).
effective_padding_output: Effective padding at output of given layer
(integer).
Returns:
effective_padding_input: Effective padding at input of given layer
(integer).
"""
return stride * effective_padding_output + padding | a60dcb6499e702bc94ed242e48bf9b0d7b503e52 | 125,809 |
import zlib
def zlib_to_bytes(zlib_data):
""" Decompress a compressed byte array """
return zlib.decompress(zlib_data) | 959c1f9f64510a42b02a19370c80f8a536c58bd2 | 125,810 |
def nb_digit(request):
"""Return the number of digits."""
return request.param | 873afeb46c33e09044751a3983e530a03b7924b5 | 125,813 |
import six
def _is_start_piece_bert(piece):
"""Check if the current word piece is the starting piece (BERT)."""
# When a word has been split into
# WordPieces, the first token does not have any marker and any subsequence
# tokens are prefixed with ##. So whenever we see the ## token, we
# append it to the previous set of word indexes.
return not six.ensure_str(piece).startswith("##") | 2d28d425dc3449f31fb8c3989a6e650a4d9682bc | 125,821 |
def baseline_value_corr(case):
"""Baseline correction that sets the first independent variable of each
spectrum to zero."""
position = 0
subtract = case[position]
return (case-subtract) | ba033a62fad0eef3d22e444e21a27739ecf1e549 | 125,828 |
def tl_add(a, b):
"""
Element-wise addition for tuples or lists.
"""
lst = [aa + bb for aa, bb in zip(a, b)]
if type(a) == list:
return lst
elif type(a) == tuple:
return tuple(lst)
else:
raise ValueError(f"Unknown type {type(a)}") | aa9c20c5f8882ba9cca3983175b5fc85379f7940 | 125,829 |
def buf_to_int(x, n_bytes=2):
"""
Convert a floating point buffer into integer values.
This is primarily useful as an intermediate step in wav output.
Args:
x: np.ndarray [dtype=float]
Floating point data buffer
n_bytes: int [1, 2, 4]
Number of bytes per output sample
Returns:
x_int : np.ndarray [dtype=int]
The original buffer cast to integer type.
"""
if n_bytes not in [1, 2, 4]:
raise ValueError('n_bytes must be one of {1, 2, 4}')
# What is the scale of the input data?
scale = float(1 << ((8 * n_bytes) - 1))
# Construct a format string
fmt = '<i{:d}'.format(n_bytes)
# Rescale and cast the data
return (x * scale).astype(fmt) | c73d7691996782b6b3ad0b69d067c303c1d2d9cc | 125,832 |
from typing import Dict
def gamma_reparam(mu: float, sd: float) -> Dict[str, float]:
"""Transforms mean and standard deviation parameters to `scipy.stats.gamma` parameters.
The following transformation is computed and returned::
.. math::
a = \frac{\mu^2}{\sigma^2}\\
scale = \frac{\sigma^2}{\mu}
:param mu: mean of desired gamma distribution (>0)
:param sd: standard deviation of desired gamma distribution (>0)
:return {a, scale}: shape and scale parameters for `scipy.stats.gamma`
"""
if not sd > 0:
raise ValueError("`sd` must be positive! Found: %f" % sd)
a = mu**2 / sd**2
scale = sd**2/mu
return {'a': a, 'scale': scale} | 250703fd7941c9d58fd5bb7c3499a838a1118c4e | 125,833 |
def load_dictionary(datafile):
"""Return a dictionary created from the specified CSV file."""
dictionary = dict()
for line in open(datafile, 'r').readlines():
key, value = line.strip().split(',')
dictionary[key.lower()] = value.lower()
return dictionary | b94ddc076f0b8cb38af9c20c4792b39e6f7ef25c | 125,839 |
def token_classification_meta_data(train_data_size,
max_seq_length,
num_labels,
eval_data_size=None,
test_data_size=None,
label_list=None,
processor_type=None):
"""Creates metadata for tagging (token classification) datasets."""
meta_data = {
"train_data_size": train_data_size,
"max_seq_length": max_seq_length,
"num_labels": num_labels,
"task_type": "tagging",
"label_type": "int",
"label_shape": [max_seq_length],
}
if eval_data_size:
meta_data["eval_data_size"] = eval_data_size
if test_data_size:
meta_data["test_data_size"] = test_data_size
if label_list:
meta_data["label_list"] = label_list
if processor_type:
meta_data["processor_type"] = processor_type
return meta_data | 24d6d9ea3fb98a4705640d66a4b3ea1dd2038779 | 125,850 |
def isNonNull(requestContext, seriesList):
"""
Takes a metric or wild card seriesList and counts up how many
non-null values are specified. This is useful for understanding
which metrics have data at a given point in time (ie, to count
which servers are alive).
Example:
.. code-block:: none
&target=isNonNull(webapp.pages.*.views)
Returns a seriesList where 1 is specified for non-null values, and
0 is specified for null values.
"""
def transform(v):
if v is None: return 0
else: return 1
for series in seriesList:
series.name = "isNonNull(%s)" % (series.name)
series.pathExpression = series.name
values = [transform(v) for v in series]
series.extend(values)
del series[:len(values)]
return seriesList | 7fd287ca3393f6ed1756df0d678cc6467adda166 | 125,856 |
import random
def random_depots(num_objects=20):
"""Return list of random depots"""
return [
{"latitude": random.uniform(-90, 90), "longitude": random.uniform(-180, 180)}
for i in range(num_objects)
] | 801f141080fad2592c263a4c805cc91032396c0d | 125,858 |
def w_oper(rho_mix, rho_vapor):
"""
Calculates the operating speed in column.
Parameters
----------
rho_mix : float
The destiny of mix, [kg/m**3]
rho_vapor : float
The destiny of vapor, [kg/m**3]
Returns
-------
w_oper : float
The operating speed in column. [m]
References
----------
Дытнерский, страница 205, формула 5.33
"""
return 0.05 * rho_mix**0.5 / rho_vapor**0.5 | 4436478517d470d8259369212d68286b53c15cf2 | 125,867 |
import sqlite3
def connect_to_db(db_name = "buddymove_holidayiq.sqlite3"):
"""
Function to connect/insantiate a SQLite database
"""
return sqlite3.connect(db_name) | d17d81ef3a1cdf0ebc002b992bc78a08b96dacc3 | 125,868 |
from typing import Dict
def parse_preprocess(preprocess_config: Dict) -> Dict:
"""
Parse the preprocess configuration.
:param preprocess_config: potentially outdated config
:return: latest config
"""
if "data_augmentation" not in preprocess_config:
preprocess_config["data_augmentation"] = {"name": "affine"}
return preprocess_config | 5af45ed53b7c46d6803ca56fbd0320eca1518a73 | 125,874 |
def whoosh2dict(hits):
"""Convert from whoosh results list to
a list of dictionaries with a key/value pair for
each schema column"""
m = []
for hit in hits:
d = {}
for k, v in hit.items():
d[k] = v
m.append(d)
return m | 27fed65043b481c9cdd509c74cbb504659d3a263 | 125,882 |
def builder_id_string(builder_id_message): # pragma: no cover
"""Returns a canonical string representation of a BuilderID."""
bid = builder_id_message
return '%s/%s/%s' % (bid.project, bid.bucket, bid.builder) | fdbbd8c12066851f72014ef38172236ec4bcf20b | 125,883 |
import re
def convert_to_prettyprint(xmlstr):
"""
Convert XML to pretty print for older python versions (< 2.7).
:param xmlstr: input XML string
:return: XML string (pretty printed)
"""
text_re = re.compile(r'>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL) # Python 3 (added r)
return text_re.sub(r'>\g<1></', xmlstr) | 1f3a9a9d0a92d46c9dafe111f1c71746f1a1ad0c | 125,895 |
def convert(value: float, fmt: str) -> float:
"""Converts the value to the designated format.
:param value: The value to be converted must be numeric or raise a TypeError
:param fmt: String indicating format to convert to
:return: Float rounded to 4 decimal places after conversion
"""
if not (type(value) == float or type(value) == int):
raise TypeError("Wrong type, lady!")
if fmt.lower() == "cm":
result = value * 2.54
return round(result, 4)
if fmt.lower() == "in":
result = value / 2.54
return round(result, 4)
else:
raise ValueError("Wrong value, sis!") | a889dad80673491aef24e78829800caa6c0f05c4 | 125,897 |
def dcost(y_hat, y):
"""calc dloss, where loss = (y_hat - y)^2 """
return y_hat - y | a0eccc56eb4342f653c18483d0bf2483616d20c5 | 125,902 |
def versioning_model_classname(manager, model):
"""Get the name of the versioned model class."""
if manager.options.get('use_module_name', True):
return '%s%sVersion' % (
model.__module__.title().replace('.', ''), model.__name__)
else:
return '%sVersion' % (model.__name__,) | 2e4ba22c6a6caff35df1668087cfb8be91521a56 | 125,903 |
def getIdHexChars(id):
""" get the hex chars of the given id """
if id[0] == 'c':
# don't include chunk index
index = id.index('_')
parts = id[0:index].split('-')
else:
parts = id.split('-')
if len(parts) != 6:
raise ValueError(f"Unexpected id format for uuid: {id}")
return "".join(parts[1:]) | 21be133e05fd8b510b65e1c653c97a1360c22158 | 125,904 |
def set_bits(store, bit_val, r_position, spread):
"""
:param store: integer pre-manipulation
:param bit_val: value of the bits to be set
:param r_position: zero-based indexing position of the right-most bit impacted by the manipulation
:param spread: how many bits are to be changed. Note: a spread of zero will result in return equal to input.
:return: integer with bits set
"""
spread_store_mask = (-1 << spread + r_position) | (1 << r_position) - 1
store = store & spread_store_mask
bit_val = bit_val & ((2**spread) - 1)# resize bit_val to fit in spread
bit_val = bit_val << r_position
store = store | bit_val
return store | 747dea2757d39b2d681e0531dcff2414cdb69ffd | 125,905 |
import signal
def set_timeout(timeout, func):
"""
Call provided function and interrupt after given timeout.
"""
def timeout_handler(*args):
raise TimeoutError()
try:
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(timeout)
return func()
finally:
signal.signal(signal.SIGALRM, lambda *args: None)
return False | d0331c25ac8f806b9d58f0ffbf68d5c60944b273 | 125,907 |
def fahrenheit2kelvin(F):
"""
Convert Fahrenheit to Kelvin
:param F: Temperature in Fahrenheit
:return: Temperature in Kelvin
"""
return 5.0 / 9.0 * (F + 459.67) | 26278a9b0096a77d610a5dbf16fdab52d55e7f3a | 125,909 |
def load_input(fname):
""" Load a file. """
with open(fname, "r") as f: # pylint: disable=invalid-name
return f.read() | f56e1806bddb4a74f3196a0ba82836fe0d2dfdaf | 125,910 |
def count_characters(cursor):
"""Returns the number of characters from sqlite cursor.
Args:
cursor (sqlite3.Cursor): cursor to sqlite database
Returns:
(int) Number of characters
"""
return cursor.execute("""SELECT COUNT(character_id)
FROM charactercreator_character;""").fetchone()[0] | 95533b381d9feeed966203116c86179dcf5f77ec | 125,912 |
def email(faker):
"""Random email address."""
return faker.email() | d0654da6a3b9cb1e946ec997bbc39257c06828da | 125,918 |
def _insert_substr(text: str,
substr: str,
index: int,
cap_before: bool = False,
cap_after: bool = False,
replace: bool = False) -> str:
"""Insert a substring into the text."""
before = text[0:index]
if replace:
index += 1
after = text[index:]
if cap_before:
before = before.title()
if cap_after:
after = after.title()
return f'{before}{substr}{after}' | 3a9564f764299db7d90ec000de86bc14f9b1ce2a | 125,919 |
def get_set_value(set_data, match_key):
"""Extract value from SET output"""
for val in set_data:
try:
key, value = val.split('=', 1)
except ValueError:
continue
if key == match_key:
return value
return None | 00ff696944b4026874d61c3cf1c7b288fc8dd1cd | 125,922 |
def mirror_action(act):
"""
Mirror an action, swapping left/right.
"""
direction = (act % 18) // 6
act -= direction * 6
if direction == 1:
direction = 2
elif direction == 2:
direction = 1
act += direction * 6
return act | 60fe1d7b02e0c2a8eb1c53bbf89544a1fae5a34a | 125,927 |
def create_image_info(image_id, image_size, file_name, license_id=1):
""" Create an image information with coco format
"""
return {
"id": image_id,
"width": image_size[0],
"height": image_size[1],
"file_name": file_name,
"license": license_id,
} | 3632cfe817bb34bc96bac7b240359e5237eab4c8 | 125,928 |
import itertools
def getAllListCombinations(a):
#http://www.saltycrane.com/blog/2011/11/find-all-combinations-set-lists-itertoolsproduct/
""" Nice code to get all combinations of lists like in the above example, where each element from each list is represented only once """
list_len_db={}
for x in a:
list_len_db[len(x)]=[]
if len(list_len_db)==1 and 1 in list_len_db:
### Just simply combine non-complex data
r=[]
for i in a:
r.append(i[0])
return [r]
else:
return list(itertools.product(*a)) | 0b2aaa791e0b77a037cbf9e39c91c574fef31303 | 125,930 |
def name_of_month(m):
"""Given an integer m between 1 and 12 inclusive,
indicating a month of the year, returns the name of that month.
For example: name_of_month(1) == 'January' and name_of_month(12) == 'December'.
If the month does not exist (that is, if m is outside the legal range),
then this function returns None.
"""
if m < 1 or m > 12: # Non-existent month
return None
if m == 1:
return 'January'
elif m == 2:
return 'February'
elif m == 3:
return 'March'
elif m == 4:
return 'April'
elif m == 5:
return 'May'
elif m == 6:
return 'June'
elif m == 7:
return 'July'
elif m == 8:
return 'August'
elif m == 9:
return 'September'
elif m == 10:
return 'October'
elif m == 11:
return 'November'
else:
return 'December' | d7fdb2d89a328f16b8c076ecd14ddbe280d654bf | 125,932 |
import pathlib
def check_star_idx_exists(star_idx_dir):
""" checks to see if all files in star idx are present """
staridx_files = [
'chrLength.txt',
'chrName.txt',
'Genome',
'SA',
'chrNameLength.txt',
'chrStart.txt',
'genomeParameters.txt',
'SAindex'
]
# boolean array
file_result = []
for file in staridx_files:
if pathlib.Path(star_idx_dir).joinpath(file).exists():
file_result.append(True)
else:
file_result.append(False)
if all(file_result):
return True
return False | e6233408dac5ff6d0c10182ac8b17fc128aee007 | 125,940 |
def clean_number(number_string):
"""
Cleans non-numeric characters from entered string
and converts and returns the new number.
"""
cleaned_number_string = ""
for char in number_string:
if char in "0123456789":
cleaned_number_string += char
return int(cleaned_number_string) | cb2aa38ed54480386e2c1e96a46284621a48463b | 125,942 |
def quotestrip(word):
"""Remove quotes and/or double quotes around identifiers."""
if not word:
return None
while (word.startswith("'") and word.endswith("'")) or (word.startswith('"') and word.endswith('"')):
word = word[1:-1]
return word | fbf51f048012b4a027b89ab3574363fb46e012d7 | 125,943 |
def create_parameter_list(path_params):
""" Create description from a list of path parameters."""
param_list = []
for param in path_params:
parameter = dict()
parameter['in'] = 'path'
parameter['name'] = str(param)
parameter['description'] = 'ID of ' + str(param)
parameter['required'] = True
parameter['type'] = 'string'
param_list.append(parameter)
return param_list | a28a0d820a3d302e64e2730aa95cf0ddc21f4f3f | 125,946 |
def modulename(filename):
"""
Find the modulename from filename.
filename
string, name of a python file
"""
return filename.split('/')[-1].replace('.pyc', '').replace('.py', '') | defe4e5c7e42f05a63cdc3908171f7534dad62a6 | 125,948 |
import hashlib
def _create_finding_id(control_id, resource_name, length=20):
"""Hash the control and resource; repeatable (tho not strictly unique).
Needs to be repeatable such that the same control/test maps to the
same SCC Finding over multiple runs.
"""
input = control_id + resource_name
hex = hashlib.sha256(input.encode('UTF-8')).hexdigest()
result = int(hex, 16) % (10 ** length)
return str(result) | a2be7749a2a937bfbcbf381371f950fd84ee11a3 | 125,949 |
def load_lines(file_path):
"""Read file into a list of lines.
Input
file_path: file path
Output
lines: an array of lines
"""
with open(file_path, 'r') as fio:
lines = fio.read().splitlines()
return lines | 32fc9103b1205df24dc1afbbe2ab20cd437c973a | 125,952 |
def verify_device(db_obj, query_obj, device) -> bool:
"""
Checks to see if a device has the same id as that recorded within the
db
:param db_obj:
:param query_obj:
:return: boolean
"""
result = db_obj.read_one(query_obj.verify_device(device))
if result:
return True
else:
return False | cd2c547855c86af20315697b44d27a75e754daf7 | 125,953 |
from typing import Dict
from typing import List
from typing import Tuple
def _is_explanation_equal(dict1: Dict[str, List[Tuple[str, float]]],
dict2: Dict[str, List[Tuple[str, float]]]) -> bool:
"""
Tests if the two dictionaries of a given structure are equal.
The both of the input parameters must be a dictionary with string keys and
list values. The latter is composed of 2-tuples of strings and floats.
The keys in the dictionary and the tuples must match exactly, while the
floats only need to be approximately equal. The ordering of the tuples in
the list does not need to be the same.
Parameters
----------
dict1 : Dictionary[string, List[Tuple[string, float]]]
The first dictionary to be compared.
dict2 : Dictionary[string, List[Tuple[string, float]]]
The second dictionary to be compared.
Returns
-------
equal : boolean
``True`` if the dictionaries are the same, ``False`` otherwise.
"""
if set(dict1.keys()) == set(dict2.keys()):
equal = True
for key in dict1:
val1 = sorted(dict1[key])
val2 = sorted(dict2[key])
if len(val1) != len(val2):
equal = False
break
for i in range(len(val1)):
if val1[i][0] != val2[i][0]:
equal = False
break
if abs(val1[i][1] - val2[i][1]) > 1e-1:
equal = False
break
if not equal:
break
else:
equal = False
return equal | 92fedbf80441db270934017a67c7f5b88702381b | 125,954 |
def did_run_test_module(output, test_module):
"""Check that a test did run by looking in the Odoo log.
test_module is the full name of the test (addon_name.tests.test_module).
"""
return "odoo.addons." + test_module in output | 8255ec9615bd6f35f6e7d4bcc35868ebea20c30c | 125,958 |
def lookup_named_tensor(name, named_tensors):
"""Retrieves a NamedTensor by name.
Args:
name: Name of the tensor to retrieve.
named_tensors: List of NamedTensor objects to search.
Returns:
The NamedTensor in |named_tensors| with the |name|.
Raises:
KeyError: If the |name| is not found among the |named_tensors|.
"""
for named_tensor in named_tensors:
if named_tensor.name == name:
return named_tensor
raise KeyError('Name "%s" not found in named tensors: %s' % (name,
named_tensors)) | 6bbee1a3402d1723c9b2029f33a078cf28cfed48 | 125,960 |
def fill_unk(lexicon, words_to_fill, pseudo="UNK"):
"""
Fill rare words with UNK
:param lexicon: vocabulary
:param words_to_fill: the words to fill in
:param pseudo: the pseudo token to substitute unknown words
:return:
"""
return [w if w in lexicon else pseudo for w in words_to_fill] | c85dcb9aecde1c18e05d13504eb45ca69b0b18e0 | 125,961 |
def remove_illegal_chars_for_postscript_name_part(name):
"""
Conforming The Compact Font Format Specification (version 1.0), section 7 "Name INDEX".
Also removing "-".
"""
return name.translate({
ord(i): None for i in "[](){}<>/%\u0000\u0020\u0009\u000D\u000A\u000C-"
}) | 99ebb632fdbfc936a1c407cdc44b5e01dc001184 | 125,962 |
def object_access_allowed(groups, path):
"""
Decide if a user is allowed to access a path
"""
for group in groups.split(','):
if path.startswith(group):
return True
return False | 17dc4bf72019042cebd5c48097c8744394cf61f9 | 125,963 |
def get_field_names(model):
"""
Get field names which aren't autogenerated
Args:
model (class extending django.db.models.Model): A Django model class
Returns:
list of str:
A list of field names
"""
return [
field.name
for field in model._meta.get_fields()
if not field.auto_created # pylint: disable=protected-access
] | 97e24e0f4992661448a075948fccba3a0c4094ac | 125,964 |
def command_line_for_tool(tool_dict, output):
"""
Calculate the command line for this tool when ran against the output file 'output'.
"""
proc_args = [tool_dict['tool']] + tool_dict['args'](output)
return proc_args | 39339f6f16cb23e7f6744952617597ecf1edaf55 | 125,967 |
import gzip
import bz2
def fwriter(filename, gz=False, bz=False):
""" Returns a filewriter object that can write plain or gzipped output.
If gzip or bzip2 compression is asked for then the usual filename extension will be added."""
if filename.endswith('.gz'):
gz = True
elif filename.endswith('.bz2'):
bz = True
if gz:
if not filename.endswith('.gz'):
filename += '.gz'
return gzip.open(filename, 'wb')
elif bz:
if not filename.endswith('.bz2'):
filename += '.bz2'
return bz2.BZ2File(filename, 'w')
else:
return open(filename, 'w') | 9c81326f63d0a76534f25bf9366b469278913897 | 125,968 |
def is_logs_synced(mst, slv):
"""Function: is_logs_synced
Description: Checks to see if the Master binary log file name and log
position match that the Slave's Relay log file name and log position.
Arguments:
(input) mst -> Master instance.
(input) slv -> Slave instance.
(output) True or False -> True is return if logs are in sync.
"""
is_synced = True
if mst.gtid_mode and slv.gtid_mode:
if mst.exe_gtid != slv.exe_gtid:
is_synced = False
# Non-GTID server.
else:
if mst.file != slv.relay_mst_log or mst.pos != slv.exec_mst_pos:
is_synced = False
return is_synced | 9ee81bd9cfa66fb450bdd289c9437ffc9bf349bb | 125,969 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.