content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def toExportS16(op):
"""Converts number to exportable signed 16-bit number."""
return max(min(int(round(op)), 32767), -32768)
|
7ddfe94a1d5ad3147dbbf439d9eb49ca91927f99
| 68,696
|
def _format_and_kws(fmt):
"""
>>> _format_and_kws("fmt")
('fmt', {})
>>> _format_and_kws("fmt:+a")
('fmt', {'a': True})
>>> _format_and_kws("fmt:a")
('fmt', {'a': True})
>>> _format_and_kws("fmt:+a,-b") #doctest: +SKIP
('fmt', {'a': True, 'b': False})
>>> _format_and_kws("fmt:c=d")
('fmt', {'c': 'd'})
"""
fmt, kws = fmt, {}
if fmt and ':' in fmt:
fmt, kwrepr = fmt.split(':')
for kw in kwrepr.split(','):
if '=' in kw:
k, v = kw.split('=')
kws[k] = v
elif kw.startswith('-'):
kws[kw[1:]] = False
elif kw.startswith('+'):
kws[kw[1:]] = True
else: # same as "+"
kws[kw] = True
return fmt, kws
|
3d5e9119be4f8fbbe57e40e4c1d624025b2d1baa
| 68,697
|
def is_unique(x):
"""verify that all elemens of list are not repeated
Args:
x (list): list of elements
Returns:
bool: True if all elemens of list are not repeated
"""
return len(x) == len(set(x))
|
5b248278fd2f89af49c63128a9d16ead3392fb3f
| 68,703
|
import torch
def stack_feats(feats):
"""
Args:
feats: [B, (n, c)]
Returns:
feats: (B, N, c)
valid: (B, N)
"""
max_num = max([len(f) for f in feats])
s0 = feats[0].shape
for f in feats:
assert f.shape[1:] == s0[1:], f"{f.shape} vs {s0}"
shape = (max_num,) + feats[0].shape[1:]
new_feats = []
valids = []
for feat in feats:
new_feat = torch.zeros(shape, dtype=feat.dtype).to(feat.device)
valid = torch.zeros(max_num, dtype=torch.bool).to(feat.device)
new_feat[:len(feat)] = feat
valid[: len(feat)] = 1
new_feats.append(new_feat)
valids.append(valid)
return torch.stack(new_feats), torch.stack(valids)
|
98c0dcbc53bc5a35677fd6e67a48ec29bc37b64e
| 68,704
|
def _CompareLocaleLists(list_a, list_expected, list_name):
"""Compare two lists of locale names. Print errors if they differ.
Args:
list_a: First list of locales.
list_expected: Second list of locales, as expected.
list_name: Name of list printed in error messages.
Returns:
On success, return False. On error, print error messages and return True.
"""
errors = []
missing_locales = sorted(set(list_a) - set(list_expected))
if missing_locales:
errors.append('Missing locales: %s' % missing_locales)
extra_locales = sorted(set(list_expected) - set(list_a))
if extra_locales:
errors.append('Unexpected locales: %s' % extra_locales)
if errors:
print('Errors in %s definition:' % list_name)
for error in errors:
print(' %s\n' % error)
return True
return False
|
74a32eb59543262fb684def2abb78ff046689d6f
| 68,708
|
def load_dict_settings(dictionary):
"""
A simple wrapper that just copies the dictionary
:param dict dictionary:
:return: A configuration dictionary
:rtype: dict
"""
return dictionary.copy()
|
4e0fb0f9a718a277fc5a6195db10be26abcfd425
| 68,709
|
import re
def convert_range_to_one_amount(string: str) -> str:
"""Converts a range of amount to one (biggest).
Example
>>> convert_range_to_one_amount('100 - 200 g of chicken breast')
200 g of chicken breast
Args:
string: String which may contain such range.
Returns:
String with converted range (if found)
"""
return re.sub(r"\d+\s{0,1}-\s{0,1}(\d+)", r"\1", string)
|
3eec82bbfe0e1a502a95a5e06c86046f9b2c2fdd
| 68,713
|
def final_pose(objectpose, **kwargs):
"""Return the final pose"""
last_pose = objectpose.iloc[-1][
["posX", "posY", "posZ", "eulerX", "eulerY", "eulerZ"]
]
return last_pose.values.tolist()
|
2d311bb2c3d153f69dd3f97d85a91d4e630ca631
| 68,714
|
def standard_month(date):
"""Return the month of date 'date'."""
return date[1]
|
a8a36b8da682584f9c44457c450e89f61bdf4586
| 68,715
|
def sign_bit(value: int, size: int) -> int:
"""Returns the highest bit with given value and byte width."""
return (value >> ((8 * size) - 1)) & 0x1
|
7f04607777b972ae29a50f699c7c4f04908ca84c
| 68,721
|
def decode_uri(URI):
"""Decode JavaScript encodeURIComponent"""
return URI.replace("&", "&")
|
dc53de9b6f96b6ccf34355aca5331d5fca3af7e5
| 68,723
|
def find_example2(sequence):
"""
Returns the INDEX of the first string in the given sequence,
or -1 if the given sequence contains no strings.
"""
# Returns the index (k) or -1
for k in range(len(sequence)):
if type(sequence[k]) == str:
return k
return -1
|
cd22c4780ea9f6c1dd37a6ac3ae31ad326824485
| 68,726
|
from typing import List
from typing import Tuple
def get_lm(l_max: int) -> List[Tuple[int, int]]:
"""Get list of all (l,m) in order up to (and including) l_max"""
return [(l, m) for l in range(l_max + 1) for m in range(-l, l + 1)]
|
b18a8da2657032fcec10a7dd0bfb2fdd26275f80
| 68,728
|
def is_binary_string(string_):
"""
detect if string is binary (https://stackoverflow.com/a/7392391)
:param string_: `str` to be evaluated
:returns: `bool` of whether the string is binary
"""
if isinstance(string_, str):
string_ = bytes(string_, 'utf-8')
textchars = (bytearray({7, 8, 9, 10, 12, 13, 27} |
set(range(0x20, 0x100)) - {0x7f}))
return bool(string_.translate(None, textchars))
|
b6594326632ebe3426f9d7845c808f8952d22bbc
| 68,730
|
def readFile(filePath):
""" reads a file, returning an array of lines in the file """
lines = [] # or even lines = [l for l in file]
try:
file = open(filePath)
except FileNotFoundError:
print("Invalid File Path Provided")
else:
for l in file:
lines.append(l)
return lines
|
4dabd1f303f6a85f13a84d8d8ee5f7177af0b83c
| 68,733
|
def INVALID_NAME(name):
"""Error message for invalid names."""
return "invalid name '{}'".format(name)
|
3a5f6767a31e1a50e9023033461788ead6215df8
| 68,734
|
from typing import Tuple
def split_segment_id(segment_id: str) -> Tuple[float, float, int]:
"""Split a segment ID to segment begin, segment end, and a running number.
Args:
segment_id (str): Segment ids are in the form session-001-2015-START-END[NUMBER] or
session-001-2015-START-END-NUMBER
Returns:
Tuple[float, float, int]: start, end, and number
"""
if "[" in segment_id:
_, begin, end = segment_id.rsplit("-", 2)
end, number = end.split("[")
number = number.replace("]", "")
else:
_, begin, end, number = segment_id.rsplit("-", 3)
return float(begin) / 100.0, float(end) / 100.0, int(number)
|
9c878b25836e993e114c6af9eb6c576580d16b36
| 68,735
|
def vtInEntity2(vertex, entity):
"""
Does the 2D vertex lie in the entity defined by vertices (x1,y1,x2,y2,x3,y3)?
:param vertex: vertex coordinates of mesh
:type vertex: numpy.ndarray[float64 x dim]
:param entity: connectivity of an entity
:type entity: numpy.ndarray[int x (dim+1)]
:return: vtInEntity2: logical flag indicating if it is or isn't.
:rtype: bool
"""
(x, y) = vertex
(x1, y1, x2, y2, x3, y3) = entity
a = ((y2 - y3) * (x - x3) + (x3 - x2) * (y - y3)) / (
(y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3)
)
b = ((y3 - y1) * (x - x3) + (x1 - x3) * (y - y3)) / (
(y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3)
)
c = 1 - a - b
# vt lies in entity if and only if 0 <= a <= 1 and 0 <= b <= 1 and 0 <= c <= 1
return 0 <= a and a <= 1 and 0 <= b and b <= 1 and 0 <= c and c <= 1
|
3094a2e27ec7c215ee94ac06b3cbfb8090b88f2b
| 68,738
|
def option_list(opts):
"""Convert key, value pairs into a list.
This converts a dictionary into an options list that can be passed to
ArgumentParser.parse_args(). The value for each dictionary key will be
converted to a string. Values that are True will be assumed to not have
a string argument added to the options list.
Args:
opts (dict): Dictionary of options.
Returns:
(list): The list of options.
"""
optlist = []
for key, val in opts.items():
keystr = "--{}".format(key)
if val is not None:
if isinstance(val, bool):
if val:
optlist.append(keystr)
else:
optlist.append(keystr)
if isinstance(val, float):
optlist.append("{:.14e}".format(val))
elif isinstance(val, (list, tuple)):
optlist.extend(val)
else:
optlist.append("{}".format(val))
return optlist
|
58607fb57dcbd936a052f45f9990ae3d5e89a011
| 68,740
|
def read2dict(filePath):
"""Expects the path to a two column tab-delimited as input. Reads
the first two columns in the file given by filePath into a
dictionary {column1:column2} and returns the dictionary.
"""
outDict = {}
with open(filePath) as fl:
for line in fl:
key, val = line.strip().split()
outDict[key] = val
return outDict
|
70005d976461430bd5c3954dacac56dd67342ecf
| 68,743
|
def _get_bin_centers(bin_edges):
"""Return the arithmetic mean of the bin_edges"""
return 0.5 * (bin_edges[:-1] + bin_edges[1:])
|
b85cdcfaf268c771032070106d1578033b831af4
| 68,746
|
def decode_authorization_header(header):
""" Decodes a HTTP Authorization Header to python dictionary """
authorization = header.get("Authorization", "").lstrip(" ").lstrip("OAuth")
tokens = {}
for param in authorization.split(","):
try:
key, value = param.split("=")
except ValueError:
continue
key = key.lstrip(" ")
value = value.lstrip(" ").lstrip('"')
value = value.rstrip(" ").rstrip('"')
tokens[key] = value
return tokens
|
fe762bc048835d325b15ee2ba3e550f85217c455
| 68,747
|
def sum_(s):
"""
Sum items of a list
:param s:
:return:
"""
sumd = 0
for l in s:
sumd += l
return sumd
|
40d5706f52da236bf3a9c956c281978c88c90edd
| 68,749
|
import inspect
def is_pkg(mod_or_pkg):
"""Returns True if `mod_or_pkg` is a package"""
return inspect.ismodule(mod_or_pkg) and hasattr(mod_or_pkg, "__path__")
|
a28f39bd9f16fce6908a55e74a32257f80c1030b
| 68,751
|
def str_to_raw(str):
"""Convert string received from commandline to raw (unescaping the string)"""
try: # Python 2
return str.decode('string_escape')
except: # Python 3
return str.encode().decode('unicode_escape')
|
286ab4aaf2b517c276cd441fc605cf69ef971400
| 68,757
|
def _get_change_making_matrix(c: int, n: int) -> list:
"""Returns a list of c + 1 lists of size n + 1. Each of the c + 1 inner
lists contains zeros, except for the first one, where each element is
initialized to its index, because we assume that the first coin is 1 and
it is always available, so that there's always a way to total n. Note that
the list of coins given may already contain 1 as one of its denominations,
but we do not know this, in general."""
m = [[0 for _ in range(n + 1)] for _ in range(c + 1)]
for i in range(n + 1):
# m[0], the first list of m, is associated with the usage of the
# denomination 1, which we assume is always available. So, using
# denomination 1, we can total i using i 1s.
m[0][i] = i
return m
|
b6a65967c6c4217b088b191944d194b3d3501e60
| 68,759
|
def compPt(p0, p1):
"""
Returns True if 2-dim points p0 and p1
are equal. Otherwise, returns False.
"""
return p0[0] == p1[0] and p0[1] == p1[1]
|
28399404b035fbd9ef5e04d4b44f89879a77e51c
| 68,765
|
import json
def dumps(data, **kwargs):
"""Serialize ``data`` as a JSON formatted string.
We use ``ensure_ascii=False`` to write unicode characters specifically as this improves the readability of the json
and reduces the file size.
"""
return json.dumps(data, ensure_ascii=False, **kwargs)
|
731abfa5afe6f652b93dce33e79ebfa2173ed858
| 68,768
|
def _extract_purchased(transcript_group):
"""Extracts the purchased column
Args:
transcript_group (pandas.DataFrame): The transcript dataset
Returns:
pandas.DataFrame: The modified transcript with the purchased column extracted
"""
transcript_group.loc[~transcript_group.received, "purchased"] = transcript_group.non_offer_amount > 0.0
transcript_group.loc[transcript_group.received, "purchased"] = transcript_group.viewed & transcript_group.completed
return transcript_group
|
181b63ce5671c5f9b2a42ac94a97bd1c539b39e2
| 68,771
|
def get_prjct_anlss_nm(project_name: str) -> str:
"""
Get a smaller name for printing in qstat / squeue.
Parameters
----------
project_name : str
Command-line passed project name.
Returns
-------
prjct_nm : str
Same name without the vows ("aeiouy").
"""
alpha = 'aeiouy'
prjct_nm = ''.join(x for x in project_name if x.lower() not in alpha)
if prjct_nm == '':
prjct_nm = project_name
return prjct_nm
|
1582444d08eae968a19653c3f02e7093747640eb
| 68,772
|
def is_literal_query(term: str) -> bool:
"""Determine whether the term is intended to be treated as a literal."""
# return re.match('"[^"]+"', term) is not None
return '"' in term
|
ede795987ebc917f79bb2de3e864dffc1cf26e43
| 68,774
|
def expand_bbox(bbox):
"""Create a bigger box surrounding the inner one."""
pad = 75 # Pad the QR-Code bounding box by this many pixels
return (
min(bbox[0], bbox[2]) - pad,
min(bbox[1], bbox[3]) - pad,
max(bbox[0], bbox[2]) + pad,
max(bbox[1], bbox[3]) + pad)
|
1a36121c013819d7f6a5a854995ec9c916808e06
| 68,775
|
def tors_beta(universe, seg, i):
"""beta backbone dihedral
The dihedral is computed based on position atoms for resid `i`.
Parameters
----------
universe : Universe
:class:`~MDAnalysis.core.universe.Universe` containing the trajectory
seg : str
segment id for base
i : int
resid of the first base
Returns
-------
beta : float
torsion angle in degrees
.. versionadded:: 0.7.6
"""
b = universe.select_atoms(" atom {0!s} {1!s} P ".format(seg, i),
" atom {0!s} {1!s} O5\' ".format(seg, i),
" atom {0!s} {1!s} C5\' ".format(seg, i),
" atom {0!s} {1!s} C4\' ".format(seg, i))
beta = b.dihedral.value() % 360
return beta
|
502135839ee133d3a2ed45745347c515a8ad8b26
| 68,778
|
from typing import Optional
from typing import Type
def is_type_unknown(type_: Optional[Type]) -> bool:
"""Is the type of this variable unknown?"""
return type_ is None
|
d951e5362da8ec6239cf574e443a0c054abd1e70
| 68,779
|
def get_value(values, keys: list, default=None):
"""
returns value from json based on given key hierarchy
Ex:
val_map = {'one' : {'two' : 123 }}
get_value(val_map, ['one', 'two']) returns 123
@param values: json object
@param keys: list keys from the hierarchy tree
@param default: default value to return if the key is not found
@return: value if exists
default otherwise
"""
if values is None:
return default
for key in keys:
if key in values:
values = values[key]
else:
return default
return values if keys else default
|
f65d1203d911c3f8f2b378a51131c241e45b2399
| 68,783
|
import requests
def get_services_json(strCSPProdURL, ORG_ID, session_token):
"""Gets services and URI for associated access token and Org ID"""
myHeader = {'csp-auth-token': session_token}
myURL = f'{strCSPProdURL}/csp/gateway/slc/api/v2/ui/definitions/?orgId={ORG_ID}'
response = requests.get(myURL, headers=myHeader)
json_response = response.json()
return json_response
|
ceecead3a368380512e18c23628ab6d2ed8e7402
| 68,785
|
import re
def get_results_dir(output_list):
"""Given output from acme_diags_driver, extract the path to results_dir."""
for line in output_list:
match = re.search('Viewer HTML generated at (.*)viewer.*.html', line)
if match:
results_dir = match.group(1)
return results_dir
message = 'No viewer directory listed in output: {}'.format(output_list)
raise RuntimeError(message)
|
435244187fb876b068a8ddb8bf1288fad391f06a
| 68,791
|
def is_float(word: str):
"""
Checks if a number is a float
"""
if "." not in word:
return False
split = word.split(".")
if len(split) > 2 or len(split) < 1:
return False
for num in split:
if not num.isdigit():
return False
return True
|
a9069b186fceea3f7d1392fa2b5d5d0fefd11071
| 68,793
|
import importlib
def extension_context(extension_name='cpu', **kw):
"""Get the context of the specified extension.
All extension's module must provide `context(**kw)` function.
Args:
extension_name (str) : Module path relative to `nnabla.extensions`.
kw (dict) : Additional keyword arguments for context function in a extension module.
Returns:
:class:`nnabla.Context`: The current extension context.
Example:
.. code-block:: python
ctx = extension_context('cuda.cudnn', device_id=0)
nn.set_default_context(ctx)
"""
try:
mod = importlib.import_module(
'.' + extension_name, 'nnabla.extensions')
except ImportError:
mod = importlib.import_module('.' + extension_name, 'nnabla_ext')
return mod.context(**kw)
|
c32075bb4ef1f15b958eaa3b45a9f5e5e8b47687
| 68,795
|
def argpad(arg, n, default=None):
"""Pad/crop list so that its length is ``n``.
Parameters
----------
arg : scalar or iterable
Input argument(s)
n : int
Target length
default : optional
Default value to pad with. By fefault, replicate the last value
Returns
-------
arg : list
Output arguments
"""
try:
arg = list(arg)[:n]
except TypeError:
arg = [arg]
if default is None:
default = arg[-1]
arg += [default] * max(0, n - len(arg))
return arg
|
5d0c01e8e33fbf63d5c109e88019e5a984f98a8d
| 68,796
|
def my_merge(list_1, list_2, key=lambda x: x):
""" Quick function to improve the performance of branch_and_bound
Uses the mergesort algorithm to save the headache of resorting the
entire agenda every time we modify it.
Given two sorted lists and a key, we merge them into one sorted list and
return the answer. Preference is given to the elements of the first list
in the cases of ties
"""
list_res = []
while list_1 and list_2:
if key(list_1[0]) <= key(list_2[0]):
list_res.append(list_1.pop(0))
else:
list_res.append(list_2.pop(0))
if list_1:
[list_res.append(elem) for elem in list_1]
elif list_2:
[list_res.append(elem) for elem in list_2]
return list_res
|
f0bc226ec95581969116b12bfe1dfdcd30bb58d9
| 68,799
|
def intercept_signal(signal, start, stop=None, sf=128):
"""Intercept the required signal
Parameters
----------
signal : ndarray
Target data, only the last dimension will be operated.
start : int
Start of interval. If stop is not given, start defaults to 0.
stop : int
End of interval.
sf : int, default=128
Sampling frequency of signal.
Return
---------
data : ndarray
"""
if stop is None:
start, stop = 0, start
point_of_start, point_of_end = start*sf, stop*sf
return signal[..., point_of_start:point_of_end]
|
d62eca5e5b7082e77f69b122fb2cc9dfe1fa309d
| 68,802
|
from datetime import datetime
def name_current_file(input_name):
"""
Generate the output Current.xlsx name for permanent archival
:param input_name: input file name (e.g. Current.xlsx)
:type input_name: str|unicode
:return: output formatted name
:rtype: str|unicode
"""
dt = datetime.now()
dt = dt.strftime("_%Y-%m-%d_%I:%M:%S")
input_split = input_name.split('.')
input_split[0] += dt
return '.'.join(input_split)
|
dbe6215d224de6009acfcac1ba1116598bf1095c
| 68,804
|
def get_subjectaltname(certificate):
"""Return subjectAltName associated with certificate. """
return certificate.get_extension(6)._subjectAltNameString()
|
683dd5e3dd31a622ff70ba036701968d551bbf0c
| 68,807
|
def graphTrain(graph, id_list):
"""
construct graph from training set
Args:
graph: networkx type graph
id_list: IDs of training set
Returns:
graph constucted from training set
"""
graph_train = graph.subgraph(id_list)
return graph_train
|
3d4affe0445595b970de0b2235650256d872b411
| 68,812
|
import logging
def fix_pe_from_memory(pe, imagebase=None):
"""
Fixes PE file from memory and returns the pe
:param pe: pefile object, assuming valid
:param imagebase: If we want to change the image base, set it here as int
:return:
"""
# Fix image base according
if imagebase is not None:
pe.OPTIONAL_HEADER.ImageBase = int(imagebase, 16)
for section in pe.sections:
# Change section address back to raw
logging.info('==' + section.Name.decode('utf-8',errors='ignore') + '==')
logging.info('Modifying virtual addresses:')
logging.info('{} => {}'.format(hex(section.VirtualAddress), hex(section.PointerToRawData)))
section.VirtualAddress = section.PointerToRawData
return pe
|
9d84b91bc4d747cb75e73748ccacbc69db69583e
| 68,817
|
def check_type_of_nest_spec_keys_and_values(nest_spec):
"""
Ensures that the keys and values of `nest_spec` are strings and lists.
Raises a helpful ValueError if they are.
Parameters
----------
nest_spec : OrderedDict, or None, optional.
Keys are strings that define the name of the nests. Values are lists of
alternative ids, denoting which alternatives belong to which nests.
Each alternative id must only be associated with a single nest!
Default == None.
Returns
-------
None.
"""
try:
assert all([isinstance(k, str) for k in nest_spec])
assert all([isinstance(nest_spec[k], list) for k in nest_spec])
except AssertionError:
msg = "All nest_spec keys/values must be strings/lists."
raise TypeError(msg)
return None
|
0363545577d5d3e5652517ee37eea45bc32bdadf
| 68,821
|
import re
def handle_duplicate_name(contig_name, names_seen):
"""Add a tiebreaker to a duplicate contig name."""
name = re.sub(r'_v\d+$', '', contig_name, re.IGNORECASE)
names_seen[name] += 1
if names_seen[name] > 1:
name += '_v{}'.format(names_seen[name])
return name
|
6e0dae11331f90e5d0259d1104d61e111103e10b
| 68,822
|
def join_commands(cmds):
"""Joins a list of shell commands with ' && '.
Args:
cmds: The list of commands to join.
Returns:
A string with the given commands joined with ' && ', suitable for use in a
shell script action.
"""
return " && ".join(cmds)
|
8f20061d9300ae0586bd589cf8d7bd95a7f6b0fa
| 68,823
|
def unpack_ushort(data: bytes) -> int:
"""Unpacks unsigned short number from bytes.
Keyword arguments:
data -- bytes to unpack number from
"""
return int.from_bytes(data, byteorder="little", signed=False)
|
a7440c7b6389e45d7f205aded2afc1ad0b8a37b9
| 68,826
|
def group_cc_emails(audit_ccs, assessment_ccs):
"""Returns grouped cc emails between audit and assessment.
Args:
audit_ccs: List of audit ccs
assessment_ccs: List of assessment ccs
Returns:
Grouped list of ccs
"""
audit_ccs = frozenset(audit_ccs)
assessment_ccs = frozenset(assessment_ccs)
grouped_ccs = list(audit_ccs.union(assessment_ccs))
return grouped_ccs
|
adc4014a915e2ea191aefbfdc3af48ae4f17ae7f
| 68,828
|
def get_non_classic(templates_non_classic, js):
"""
Getting edges with non classic peptide bonds.
Parameters
----------
templates_non_classic : list
List of atoms of non classic peptide bonds.
js : dict
Opend rBAN peptideGraph.json.
Returns
-------
non_classic : list
List of amino acids, bonded with non classic peptide bonds.
"""
non_classic = []
for tp in templates_non_classic:
for atom in js['atomicGraph']['atomicGraph']['atoms']:
if atom['cdk_idx'] in tp:
if atom['matchIdx'] in non_classic:
continue
non_classic.append(atom['matchIdx']) #Adding amino
return non_classic
|
134e623636068cd96886cd365c10fd070ce755b4
| 68,831
|
import re
def get_tag(sequence_of_strings):
"""Return first string that matches the 'item{number}' pattern."""
for tag in sequence_of_strings:
if re.match(r'^item\d+$', tag):
return tag
return None
|
302f121fc93f8cf80260557f384e3e9b670190c1
| 68,833
|
def env_var_key(key):
"""return env variable name for given config key.
. and - is replaced by _.
"""
return key.replace(".", "_").replace("-", "_").upper()
|
1e242d53ef39c9f36495ecc7240829b3f7105f45
| 68,835
|
def parse_ranges(range_string):
"""Turn ranges (like you specify in a print dialog) into a list of inclusive bounds."""
ret = []
components = [rang.split('-') for rang in range_string.split(',')]
for rang in components:
if len(rang) == 1:
ret.append((int(rang[0]), int(rang[0])))
elif len(rang) == 2:
ret.append((int(rang[0]), int(rang[1])))
else:
raise ValueError('invalid range specification')
return ret
|
414e9cea66434a384426135370f335f43675e4b8
| 68,836
|
def flatten_dict_values(d: dict) -> list:
"""Extract all values from a nested dictionary.
Args:
d: Nested dictionary from which to extract values from
Returns:
All values from the dictionary as a list
"""
if isinstance(d, dict):
flattened = []
for k, v in d.items():
if isinstance(v, dict):
flattened.extend(flatten_dict_values(v))
else:
flattened.append(v)
return flattened
else:
return [d]
|
61fa7d56ad44d03fa6b4f1b13c5bbca903f3a3ce
| 68,837
|
import torch
def unsorted_segment_sum(data, segment_ids, num_segments):
"""
Computes the sum along segments of a tensor. Similar to
tf.unsorted_segment_sum, but only supports 1-D indices.
:param data: A tensor whose segments are to be summed.
:param segment_ids: The 1-D segment indices tensor.
:param num_segments: The number of segments.
:return: A tensor of same data type as the data argument.
"""
assert (
len(segment_ids.shape) == 1 and
segment_ids.shape[0] == data.shape[0]
)
segment_ids = segment_ids.view(
segment_ids.shape[0], *((1,) * len(data.shape[1:]))
)
segment_ids = segment_ids.expand(data.shape)
shape = [num_segments] + list(data.shape[1:])
tensor = (
torch.zeros(*shape, device=segment_ids.device)
.scatter_add_(0, segment_ids, data.float())
)
tensor = tensor.type(data.dtype)
return tensor
|
4777e5dfe0edd1c5918ab53b600e930a84753c3a
| 68,838
|
def get_nested_field(fieldname, field_dict):
"""Takes a field name in dot notation and a dictionary of fields and finds the field in the dictionary"""
fields = fieldname.split('.')
nested_field = field_dict[fields[0]]
for field in fields[1:]:
nested_field = nested_field['fields'][field]
return nested_field
|
6755b005980846872a6c59fe0f59f9b5c6d9d13c
| 68,843
|
def void(func):
"""Create a wrapper that calls *func* and returns nothing."""
def void_wrapper(*args):
func(*args)
return void_wrapper
|
503275af8e22f1d9e92b9a629a4c0550b4539090
| 68,849
|
def sliding_window(image, stride=320, window_size=(320, 320)):
"""Extract patches according to a sliding window.根据一个滑动窗口提取补丁
Args:
image (numpy array): The image to be processed.要处理的图像
stride (int, optional): The sliding window stride (defaults to 10px). 动窗口的跨度(默认为10px)
window_size(int, int, optional): The patch size (defaults to (20,20)).补丁大小(默认为(20,20))
Returns:
list: list of patches with window_size dimensions
具有window_size尺寸的补丁的列表
"""
patches = []
# slide a window across the image
for x in range(0, image.shape[0], stride):
for y in range(0, image.shape[1], stride):
new_patch = image[x:x + window_size[0], y:y + window_size[1]]
if new_patch.shape[:2] == window_size:
patches.append(new_patch)
return patches
|
69fa6e6d9ace6da2996f41c66288cd0f2e1fdede
| 68,851
|
def clean_keys(bibliography, good_keys=None):
"""Takes the bibtexparser bibliography object
and returns an object of the same type after
removing the unwanted categories."""
if good_keys:
for entry in bibliography.entries:
entry_keys = list(entry.keys())[:]
for key in entry_keys:
if key.lower() not in good_keys:
del entry[key]
return bibliography
|
c25f8f9fe8826ece24097bd7b9fa2f4ccc45117b
| 68,852
|
def _where_location(record: dict, item: str, last_section_title: str, last_subsection_title: str) -> dict:
"""Get information where a parsed item should be stored in the resulting JSON."""
where = record[item]
if last_section_title:
if last_section_title not in where:
where['@sections'] = {}
where = where['@sections']
key = last_section_title
if key not in where:
where[key] = {}
where = where[key]
else:
if '@top' not in where:
where['@top'] = {}
where = where['@top']
if last_subsection_title:
key = '@subsections'
if key not in where:
where[key] = {}
where = where[key]
if last_subsection_title not in where:
where[last_subsection_title] = {}
where = where[last_subsection_title]
return where
|
41396ff1ed222f9e421088346d32ea9118c9bf9b
| 68,853
|
def remove_if_exists_copy(mylist, item):
""" Return new list with item removed """
new_list = []
for el in mylist:
if el != item:
new_list.append(el)
return new_list
|
5692b668f6b101fcec369bb6c514b1cac1ea4cc2
| 68,854
|
def define_pipeline_parameters() -> dict:
"""
Called by submit_experiment().
Defines pipeline parameters to use when submitting the pipeline as an AML experiment.
Note: AzureML converts booleans to strings so it's recommended to use only strings and numbers for PipelineParameters.
"""
return {"use_test_dataset": "True", "train_test_ratio": 0.75}
|
1989e1fafc2c4c02a5875abad0802b2737458161
| 68,861
|
from typing import Any
from typing import List
def agent_names_from(input:Any)->List[str]:
"""
Attempts to extract a list of agent names from various input formats.
The returned value is a list of strings.
>>> ### From dict:
>>> agent_names_from({"Alice":{"x":1,"y":2}, "George":{"x":3,"y":4}})
['Alice', 'George']
>>> ### From list of dicts:
>>> agent_names_from([{"x":1,"y":2}, {"x":3,"y":4}])
['Agent #0', 'Agent #1']
>>> ### From list of lists:
>>> agent_names_from([[1,2],[3,4]])
['Agent #0', 'Agent #1']
>>> ### From list of valuations:
>>> agent_names_from([AdditiveValuation([1,2]), BinaryValuation("xy")])
['Agent #0', 'Agent #1']
>>> ### From list of agents:
>>> agent_names_from([AdditiveAgent([1,2], name="Alice"), BinaryAgent("xy", name="George")])
['Alice', 'George']
>>> d = {"Alice": 123, "George": 456}
>>> agent_names_from(d.keys())
['Alice', 'George']
"""
if hasattr(input, "keys"):
return sorted(input.keys())
elif hasattr(input, 'num_of_agents'):
num_of_agents = input.num_of_agents
return [f"Agent #{i}" for i in range(num_of_agents)]
if len(input)==0:
return []
input_0 = next(iter(input))
if hasattr(input_0, "name"):
return [agent.name() for agent in input]
elif isinstance(input_0, int):
return [f"Agent #{index}" for index in input]
elif isinstance(input_0, str):
return list(input) # convert to a list; keep the original order
else:
return [f"Agent #{i}" for i in range(len(input))]
|
9437a2ed34766e449530b7f69e515f85a6ec8277
| 68,869
|
import math
def _rescale_path(path, depth):
"""Rescales the input path by depth! ** (1 / depth), so that the last
signature term should be roughly O(1).
Parameters
----------
path : np.ndarray
Input path of shape [N, L, C].
depth : int
Depth the signature will be computed to.
Returns
-------
np.ndarray:
Tensor of the same shape as path, corresponding to the scaled path.
"""
coeff = math.factorial(depth) ** (1 / depth)
return coeff * path
|
0d57eb62609f66b10f3eb1053d48061d15779df5
| 68,870
|
def deault_parse_function(data: bytes,intro: str) -> bytes:
"""Default parse function. Print the data and who sent them and return it unchanged."""
print("[Received from {intro}]: {data}\n\n".format(intro=intro,data=str(data)))
return data
|
4189d4198b44e9854a92fc11256d3031ee5ebb1f
| 68,871
|
def get_number_of_projects(source_wb) -> int:
"""
Simple helper function to get an accurate number of projects in a master.
Also strips out any additional columns that openpyxl thinks exist actively
in the spreadsheet.
Returns an integer.
"""
ws = source_wb.active
top_row = next(ws.rows) # ws.rows produces a "generator"; use next() to get next value
top_row = list(top_row)[1:] # we don't want the first column value
top_row = [i.value for i in top_row if i.value is not None] # list comprehension to remove None values
return len(top_row)
|
3b68302847721324798d93a93442bc55097574cb
| 68,873
|
def override_class(overriden_class, overrider_class):
"""Override class definition with a MixIn class
If overriden_class is not a subclass of overrider_class then it creates
a new class that has as bases overrider_class and overriden_class.
"""
if not issubclass(overriden_class, overrider_class):
name = overriden_class.__name__
bases = (overrider_class, overriden_class)
overriden_class = type(name, bases, {})
return overriden_class
|
20dd9c56ecc1ba9f9c6b378e8ad9b57db5c55507
| 68,874
|
def _find_valid_path(options):
"""Find valid path from *options*, which is a list of 2-tuple of
(name, path). Return first pair where *path* is not None.
If no valid path is found, return ('<unknown>', None)
"""
for by, data in options:
if data is not None:
return by, data
else:
return '<unknown>', None
|
2f156fd1d592fb3a44c5280a53180b4066fe7d18
| 68,875
|
import csv
import re
def se_iban_load_map(filename: str) -> list:
"""
Loads Swedish monetary institution codes in CSV format.
:param filename: CSV file name of the BIC definitions.
Columns: Institution Name, Range Begin-Range End (inclusive), Account digits count
:return: List of (bank name, clearing code begin, clearing code end, account digits)
"""
out = []
name_repl = {
'BNP Paribas Fortis SA/NV, Bankfilial Sverige': 'BNP Paribas Fortis SA/NV',
'Citibank International Plc, Sweden Branch': 'Citibank',
'Santander Consumer Bank AS (deltar endast i Dataclearingen)': 'Santander Consumer Bank AS',
'Nordax Bank AB (deltar endast i Dataclearingen)': 'Nordax Bank AB',
'Swedbank och fristående Sparbanker, t ex Leksands Sparbank och Roslagsbanken.': 'Swedbank',
'Ålandsbanken Abp (Finland),svensk filial': 'Ålandsbanken Abp',
'SBAB (deltar endast i Dataclearingen)': 'SBAB Bank AB',
'SBAB deltar endast i Dataclearingen': 'SBAB Bank AB',
'DNB Bank ASA, filial Sverige': 'Den Norske Bank',
'Länsförsäkringar Bank Aktiebolag': 'Länsförsäkringar Bank AB',
'MedMera Bank AB': 'Med Mera Bank AB',
}
with open(filename) as fp:
for row in csv.reader(fp):
if len(row) == 3:
name, series, acc_digits = row
# pprint([name, series, acc_digits])
# clean up name
name = re.sub(r'\n.*', '', name)
if name in name_repl:
name = name_repl[name]
# clean up series
ml_acc_digits = acc_digits.split('\n')
for i, ser in enumerate(series.split('\n')):
begin, end = None, None
res = re.match(r'^(\d+)-(\d+).*$', ser)
if res:
begin, end = res.group(1), res.group(2)
if begin is None:
res = re.match(r'^(\d{4}).*$', ser)
if res:
begin = res.group(1)
end = begin
if begin and end:
digits = None
try:
digits = int(acc_digits)
except ValueError:
pass
if digits is None:
try:
digits = int(ml_acc_digits[i])
except ValueError:
digits = '?'
except IndexError:
digits = '?'
out.append([name.strip(), begin.strip(), end.strip(), digits])
# print('OK!')
return out
|
76ee6368c993f7ce46c752e8350c5966aa9ca9be
| 68,877
|
import logging
def getLogger(name=None):
"""Prepends 'pybrreg.<project>' to the standard dotted path given by `name`.
Normal usage:
from pybrreg.utils import logging
logging.getLogger(__name__)
"""
logger_name = 'pybrreg.{}'.format(name)
return logging.getLogger(logger_name)
|
17af3ff1e703a9e556182ccc701fdefbff1ef98a
| 68,886
|
def all_fields(item):
"""
Makes no change to the item, passes through. Used primarily as an example
and for testing the filter workflow
"""
return item
|
2463a002719c4018a28f66a578f426fe2fbe416e
| 68,888
|
import math
def upround(x, base):
"""
Round <x> up to nearest <base>.
Parameters
---------
x : str, int, float
The number that will be rounded up.
base : int, float
The base to be rounded up to.
Returns
-------
float
The rounded up result of <x> up to nearest <base>.
"""
return base * math.ceil(float(x)/base)
|
a232d9ece007df5f01a9a39ea0d051816f504d4f
| 68,902
|
def filter_image_only(f):
"""Filter only png file through."""
if f.endswith('png'):
return True
return False
|
2177bded7d41b7c6800a44e7678d6dae10c7064d
| 68,903
|
def get_config_xml_head(head_xpath):
"""Get xml head string when config.
Args:
head_xpath: The string of xpath_key.
Returns:
xml_head_str: The xml head str.
"""
xml_head_str = "<config>"
for item in head_xpath.split("/")[1:]:
xml_head_str = xml_head_str + "<" + item + ">"
return xml_head_str
|
06bb8230cd37a2e5e8bc508f8ce2e772b2a76332
| 68,905
|
def get_theme_keys(filename):
"""Return list of theme keys.
Arguments:
filename - path to file where keys are stored
"""
theme_keys = []
with open(filename, 'r') as fp:
theme_keys = [line.strip() for line in fp if line]
return theme_keys
|
ed819228a26bb6fb5191d1c1886066a3a0d57f54
| 68,907
|
import re
def cleanFilename(fname):
"""Turn runs of bad characters to have in a filename into a single underscore,
remove any trailing underscore"""
return re.sub("_$", "", re.sub("[ _\n\t/()*,&:;@.]+", "_", fname))
|
9dce26172d9b4cc6db3c9cdd13e4855c224a1a0c
| 68,914
|
def human_size(nbytes):
""" Convert size in bytes to a human readable representation.
Args:
nbytes (int): Size in bytes.
Returns:
Human friendly string representation of ``nbytes``, unit is power
of 1024.
>>> human_size(65425721)
'62.39 MiB'
>>> human_size(0)
'0.00 B'
"""
for unit in ['', 'ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(nbytes) < 1024.0:
return "{:.2f} {}B".format(nbytes, unit)
nbytes /= 1024.0
return "{:.2f} {}B".format(nbytes, 'Yi')
|
e8363f28d6c0471c3ecec261d943d2d74ff81222
| 68,915
|
def get_fragment(text, startend):
"""Return substring from a text based on start and end substrings delimited by ::."""
startend = startend.split("::")
if startend[0] not in text or startend[1] not in text:
return
start_idx = text.index(startend[0])
end_idx = text.index(startend[1])
if end_idx < start_idx:
return
return text[start_idx: end_idx+len(startend[1])]
|
1990f5e4bd4615230f95348ec1ac48d282e57b21
| 68,917
|
def is_band_auto(settings):
"""Check whether automatic band paths setting or not."""
return type(settings.band_paths) is str and settings.band_paths == "auto"
|
eef4ce0eda8675dfc06969d9293587dc4356d839
| 68,920
|
def list_to_csv(args):
"""
Convert a list to a string csv.
"""
args = map(str, args)
args = ",".join(args)
return args
|
79ef7816716e9e48bc123b84ad15632efdae6d95
| 68,925
|
def list2csv (l) :
""" Converts a list to a string of comma-separated values."""
s = None
if isinstance(l,list) :
s = str(l[0])
for i in range(1,len(l)) :
s += ','+str(l[i])
return s
|
f86ee3949f5ef221febbb36a627190771c8f7f10
| 68,931
|
def column_marker(column):
"""
Unique markers modulo 7
"""
if (column)//7 == 0:
marker = 'x'
elif (column)//7 == 1:
marker = '+'
else:
marker = 'd'
return marker
|
f922efcd9cc59d4aa07ce46709d494ae5bc6d8f9
| 68,934
|
def longest_repetition(chars):
"""
>>> assert(longest_repetition(None) == ('', 0))
>>> assert(longest_repetition('') == ('', 0))
>>> assert(longest_repetition('a') == ('a', 1))
>>> assert(longest_repetition('ab') == ('a', 1))
>>> assert(longest_repetition('aaaaaabbbbbcccc') == ('a', 6))
>>> assert(longest_repetition('aaaabbbbbbccccc') == ('b', 6))
>>> assert(longest_repetition('aaaabbbbbcccccc') == ('c', 6))
"""
if chars is None or len(chars) == 0:
return '', 0
chars_length = len(chars)
if chars_length == 1:
return chars, 1
longest_repeating_char, longest_repeating_count = '', 0
previous_char, current_count = '', 0
for i, char in enumerate(chars):
if char == previous_char:
current_count += 1
if i != chars_length - 1:
continue
if current_count > longest_repeating_count:
longest_repeating_count = current_count
longest_repeating_char = previous_char
current_count = 1
previous_char = char
return longest_repeating_char, longest_repeating_count
|
7f9530f963705d097c18497006730d0e00e9f0e4
| 68,936
|
from datetime import datetime
def _to_epoch_time(date):
"""Convert a `datetime` object to an integer number of seconds since
the (local) Unix epoch.
"""
if hasattr(date, 'timestamp'):
# The `timestamp` method exists on Python 3.3+.
return int(date.timestamp())
else:
epoch = datetime.fromtimestamp(0)
delta = date - epoch
return int(delta.total_seconds())
|
476aebee787912dedbf6e6e4afc574fc9bf3a692
| 68,939
|
def read_bytes_from_field_file(field_file):
"""
Returns the bytes read from a FieldFile
:param ~django.db.models.fields.files.FieldFile field_file:
:return bytes: bytes read from the given field_file
"""
try:
field_file.open()
result = field_file.read()
finally:
field_file.close()
return result
|
726317f8085fc1441c5cd1deff86bc202d5fd7ba
| 68,945
|
import time
def unixtime(ts):
"""convert from datetime.datetime to UNIX epoch int timestamp"""
return int(time.mktime(ts.timetuple()))
|
b784757f0905191ea754d060b6c9a6afcf738d7b
| 68,947
|
def encode_problem_index(function_idx, dimension_idx, instance_idx):
"""
Compute the problem index for the bbob suite with 15 instances and 24 functions.
"""
return instance_idx + (function_idx * 15) + (dimension_idx * 15 * 24)
|
a1524ab20c6a9e4db3b2f0c769b15e9640eb6766
| 68,949
|
def increment(num: int) -> int:
"""Return the delta (0, 1, or -1) that is needed to get from zero to the parm number in single unit increments."""
if num < 0:
return -1
elif num > 0:
return 1
else:
return 0
|
2d37694134b7a35c104472095256ab37eca09808
| 68,951
|
def sortByUsers(tokDocs):
"""
Sorts the given tokenized reviews by users.
Arguments:
tokDocs -- Dictionary where the review ids are keys and the values are lists representing the tokens in the review referenced by the key.
Returns:
Dictionary with user ids as keys and lists of lists as values, where each list represents the tokens of a review authored by that user.
"""
userDocs = {}
for value in tokDocs.itervalues():
user = value['user']
tokens = value['tokens']
try:
userDocs[user].append(tokens)
except KeyError:
userDocs[user] = []
userDocs[user].append(tokens)
return userDocs
|
07b68e11982b3a80a6cd35404b157a7b96324b14
| 68,952
|
def generate_human_readable_size(byte_size: int) -> str:
"""Generate a human readable size from a byte size.
Returns a human readable string with the size converted in one of the multiple of the byte
according to the standards defined by the International Electrotechnical Commission (IEC) in
1998. Available multiples are kibibytes (1024 bytes, KiB), mibibytes (1024^2 bytes, MiB),
gibibytes (1024^3 bytes, GiB) and tibibytes (1024^4 bytes, TiB).
Parameters
----------
byte_size: int
The size in bytes to convert.
Returns
-------
str
The converted byte size, followed by the right suffix.
"""
size_measurement_units = (('KiB', 1024), ('MiB', 1024**2), ('GiB', 1024**3), ('TiB', 1024**4))
suffix = None
divisor = None
for u, m in size_measurement_units:
if byte_size >= m:
suffix = u
divisor = m
if suffix and divisor:
return f'{round(byte_size / divisor, 1)} {suffix}'
return f'{byte_size}B'
# return f'{round(byte_size/divisor, 1)} {suffix}'
|
5790f668e7e91320af7af090eb72a4d7ad1ffe31
| 68,956
|
def add(a, b=0):
"""Simple addition function
arguments:
a: value for which addition is defined
b: value for which addition is defined (optional, defaults to 0)
returns:
a + b
"""
return a + b
|
e925943aff77e39a815015e8f7d94f767e033894
| 68,959
|
def get_filename_from_url(url, accession):
"""
Return the filename extracted from the given URL. If it is not a pdf file, return the original url
:param url: url to parse
:param accession: accession number
:return: file name
"""
if (not url) or (url and len(url) == 0):
# print(f"{accession} url is empty")
return ""
if url.lower().endswith(".pdf"):
return url.split("/")[-1]
else:
return url
|
3dd4daa26990296b753c3a557ef0fc8e2f2fe13d
| 68,960
|
import random
def place_orders_same_price(ob, price, n_orders, order_type):
"""
Helper function, place n_orders with given price
into the order_book instance
:param ob: order_book instance
:param price: float
:param n_orders: number of orders with the same price
:param order_type: 'ask' or 'bid'
:return: total quantity of the placed orders
"""
quantity = 0
for _ in range(n_orders):
order = {
'price': price,
'quantity': random.randint(1, 100)
}
quantity += order['quantity']
ob.place_order(**order, order_type=order_type,
order_id=str(random.random()))
return quantity
|
46473427834f838cc3dc4635dca5c330fba83053
| 68,964
|
def computeRatedCurrent(d):
""" Produces rated current (amps) as a function of wire diameter (mm)."""
resistance = 22.0/d**2.0 # ohms per 1000m
ratedCurrent = 100.0/resistance**(0.72)
return ratedCurrent
|
7ea0a891246dd95fd96ef9843348f8d858caccd6
| 68,965
|
def sanitize(name):
"""Make the name able to be a valid path name.
No spaces or slashes, everything lowercase"""
return name.lower().replace(" ", "_").replace("/", "-slash-")
|
5e7d54410e5a4b1657c8a149230cf2ef1587a4a3
| 68,968
|
def load_metadata(filename):
"""Load and parse the metadata file for a given dataset.
Args:
filename: path to the metadata file
Returns:
A dictionnary of the metadata values
"""
metadata = {}
with open(filename, 'r') as f:
for line in f.read().splitlines():
key, values = line.split('\t', 1)
if key in ['data_classes', 'feature_keys']:
metadata[key] = values.split(',')
elif key.endswith('tfrecords') or key == 'image_folder':
metadata[key] = values
else:
metadata[key] = int(values)
return metadata
|
ae2603ce7a90a826dcf340dd33fa24d0babc4992
| 68,970
|
def filter_and_sort_simproducts(dfsimproducts, min_price=None):
"""
removes similar products with too low price and
sorts them by decreasing similarity
"""
if min_price:
dfsimproducts = dfsimproducts[dfsimproducts['price'] >= min_price]
dfsimproducts.sort_values('sim', ascending=False, inplace=True)
return dfsimproducts
|
e334aaa92d8334f5b8178045acfbb867a65719aa
| 68,971
|
def get_device(config):
""" Return the device portion of the part
Device portion of an example:
xc6slx9-tqg144-3: xc6slx9
Args:
config (dictionary): configuration dictionary
Return:
(string) device
Raises:
Nothing
"""
part_string = config["device"]
device = part_string.split("-")[0]
return device.strip()
|
597176ac2dc69c3fbd9ee8f0537248151edaddf2
| 68,978
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.