content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def gcd_euclidean(a, b):
"""Efficiently finds the gcd of two integers
Uses Euclidean algorithm
http://en.wikipedia.org/wiki/Greatest_common_divisor
http://en.wikipedia.org/wiki/Euclidean_algorithm
"""
if a == 0:
return b
if b == 0:
return a
while a != b:
if a > b:
a = a - b
else:
b = b - a
return a
|
d3036745ee422a69cffda7975f599f095c66e653
| 508,722
|
def get_fps(
i_datasets_folder: str,
dat: str) -> tuple:
"""
Parameters
----------
i_datasets_folder : str
Path to the folder containing the data/metadata sub-folders
dat : str
Dataset name
Returns
-------
tsv_fp : str
Path to the .tsv feature table
qza_fp : str
Path to the .qza feature table
meta : str
Path to the metadata
"""
tsv_fp = '%s/data/tab_%s.tsv' % (i_datasets_folder, dat)
qza_fp = tsv_fp.replace('.tsv', '.qza')
meta_fp = tsv_fp.replace(
'%s/data/' % i_datasets_folder,
'%s/metadata/' % i_datasets_folder
).replace('tab_', 'meta_')
return tsv_fp, qza_fp, meta_fp
|
850bf3bbc956a49da432d799345f685182027fbb
| 310,575
|
import importlib
def is_citation_template_name(template_name, lang='en'):
"""
Is this name the name of a citation template?
If true, returns a normalized version of it. Otherwise, returns None
"""
if not template_name:
return False
template_name = template_name.replace('_', ' ')
template_name = template_name.strip()
template_name = template_name[0].upper()+template_name[1:]
lang_module = importlib.import_module('.' + lang, package='wikiciteparser')
if template_name in lang_module.citation_template_names:
return template_name
|
6798f28017e4f14177ea08628b927b383d4aba8c
| 383,145
|
def nu_i(n_i, n_n, A):
"""approximate calculation of ion collision frequency from Kelley 89
Parameters
----------
n_i : (float)
ion density cm-3
n_n : (float)
neutral density cm-3
A : (int)
mean neutral molecular mass in atomic mass units
"""
return 2.6 * 10**(-9) * (n_i + n_n) * A**(-1/2)
|
db2dd730faf4b0ca7d31bad545c98f384f35e44d
| 487,193
|
import time
def timestamp_to_secs(ts: str, ts_format="%Y-%m-%d %H:%M:%S") -> float:
""" Converts a timestamp string to number of seconds since epoch """
return time.mktime(time.strptime(ts, ts_format))
|
ca6d4f5eeada2308311e098a4cb7d1685e3ca8cc
| 525,510
|
def find_double_day(older_birthday, younger_birthday):
"""Takes 2 datetime.date objects representing different days, returns the date when one is
twice as old as the other
Precondition: older_birthday < younger_birthday"""
assert older_birthday < younger_birthday
difference = younger_birthday - older_birthday
double_day = younger_birthday + difference
return double_day
|
a0becdd53b4d63b1a57a79c88bc3e86332a73812
| 644,885
|
def test_mat(default_rng):
"""The test array used for unit tests.
Values are randomly sampled from standard Gaussian distribution. See the
top-level package conftest.py for the default_rng fixture.
Returns
-------
numpy.ndarray
Shape (10, 3, 10), entries sampled from standard normal distribution.
"""
return default_rng.normal(size=(10, 3, 10))
|
c72bbabee690a391bc6653292ded3b1309c762b0
| 80,241
|
def _count_leading(line, ch):
"""
Return number of `ch` characters at the start of `line`.
Example:
>>> _count_leading(' abc', ' ')
3
"""
i, n = 0, len(line)
while i < n and line[i] == ch:
i += 1
return i
|
bd0d70794b69e0bd58d4eb6ac73fe03ca5a66171
| 544,635
|
def _allow_all(root, path, pool):
"""Generic authz_read_func that permits access to all paths"""
return 1
|
5fe75af7fbc971153b23f5543ae2fb02df8ce296
| 591,915
|
import re
def c_name(name: str, protect: bool = True) -> str:
"""
Map ``name`` to a valid C identifier.
Used for converting 'name' from a 'name':'type' qapi definition
into a generated struct member, as well as converting type names
into substrings of a generated C function name.
'__a.b_c' -> '__a_b_c', 'x-foo' -> 'x_foo'
protect=True: 'int' -> 'q_int'; protect=False: 'int' -> 'int'
:param name: The name to map.
:param protect: If true, avoid returning certain ticklish identifiers
(like C keywords) by prepending ``q_``.
"""
# ANSI X3J11/88-090, 3.1.1
c89_words = set(['auto', 'break', 'case', 'char', 'const', 'continue',
'default', 'do', 'double', 'else', 'enum', 'extern',
'float', 'for', 'goto', 'if', 'int', 'long', 'register',
'return', 'short', 'signed', 'sizeof', 'static',
'struct', 'switch', 'typedef', 'union', 'unsigned',
'void', 'volatile', 'while'])
# ISO/IEC 9899:1999, 6.4.1
c99_words = set(['inline', 'restrict', '_Bool', '_Complex', '_Imaginary'])
# ISO/IEC 9899:2011, 6.4.1
c11_words = set(['_Alignas', '_Alignof', '_Atomic', '_Generic',
'_Noreturn', '_Static_assert', '_Thread_local'])
# GCC http://gcc.gnu.org/onlinedocs/gcc-4.7.1/gcc/C-Extensions.html
# excluding _.*
gcc_words = set(['asm', 'typeof'])
# C++ ISO/IEC 14882:2003 2.11
cpp_words = set(['bool', 'catch', 'class', 'const_cast', 'delete',
'dynamic_cast', 'explicit', 'false', 'friend', 'mutable',
'namespace', 'new', 'operator', 'private', 'protected',
'public', 'reinterpret_cast', 'static_cast', 'template',
'this', 'throw', 'true', 'try', 'typeid', 'typename',
'using', 'virtual', 'wchar_t',
# alternative representations
'and', 'and_eq', 'bitand', 'bitor', 'compl', 'not',
'not_eq', 'or', 'or_eq', 'xor', 'xor_eq'])
# namespace pollution:
polluted_words = set(['unix', 'errno', 'mips', 'sparc', 'i386'])
name = re.sub(r'[^A-Za-z0-9_]', '_', name)
if protect and (name in (c89_words | c99_words | c11_words | gcc_words
| cpp_words | polluted_words)
or name[0].isdigit()):
return 'q_' + name
return name
|
828efa5c4b2e586f4b4b3173ad44eac2952fb0a6
| 519,849
|
def tick_percent(decimals=1):
"""A tick formatter to display the y-axis as a float percentage with a given number of decimals.
Args:
decimals = 1: The number of decimals to display.
Returns:
A tick formatter function (f(y, position)) displaying y as a percentage.
"""
return (lambda y, position: '{:.{decimals}f}%'.format(100.0 * y, decimals=decimals))
|
b9c064b767e39b4a98abd389ef1d0656dfea582a
| 690,765
|
from typing import Union
from typing import Callable
import importlib
def import_object(import_path: str) -> Union[object, Callable]:
"""Imports the object at the given module/class path.
Parameters
----------
import_path : str
The import path for the object to import.
Returns
-------
The imported object (this can be a class, a callable, a variable).
"""
separator_idx = import_path.rindex('.')
module_path = import_path[: separator_idx]
obj_name = import_path[separator_idx + 1:]
module = importlib.import_module(module_path)
return getattr(module, obj_name)
|
841ce20283023589e9f76b51a0f7d3bd67819b4f
| 509,800
|
import math
def convert_to_year_month(CENTURY_date):
"""Convert CENTURY's representation of dates (from output file) to year
and month. Returns a list containing integer year and integer month."""
if CENTURY_date - math.floor(CENTURY_date) == 0:
year = int(CENTURY_date - 1)
month = 12
else:
year = int(math.floor(CENTURY_date))
month = int(round(12. * (CENTURY_date - year)))
return [year, month]
|
8d0c8f8a4b3f46ca929321b612d2f51d7f4a28f7
| 349,907
|
import torch
def encode(localization_match, localizations_default):
"""localization_match are converted relatively to their default location
localization_match has size [batch, number_of_localizations, 2] containing the ground truth
matched localization (representation x y)
localization_defaults has size [number_of_localizations, 2]
returns localization_target [batch, number_of_localizations, 2]
"""
center = (localization_match[:, 0] + localization_match[:, 1]) / 2 - localizations_default[:, 0]
center = center / localizations_default[:, 1]
width = torch.log((localization_match[:, 1] - localization_match[:, 0]) / localizations_default[:, 1])
localization_target = torch.cat([center.unsqueeze(1), width.unsqueeze(1)], 1)
return localization_target
|
e01250a0c188af45615b748e01c932395e7b6381
| 604,244
|
import re
def extract_ticid(fn):
"""
Read TIC id from the file name.
"""
return int(re.findall(r'\d+', fn)[2])
|
abd452f1730dbd33998615819f7ad8f26293d551
| 297,653
|
def isEventClassMatchingName(eventClass, className):
"""
Check if the `eventClass` name or it's parent classes equals to `className`
"""
if eventClass.__name__ == className:
return True
return any(
map(lambda c: isEventClassMatchingName(c, className),
eventClass.__bases__))
|
7aebd413167311c22cffdbecb050f5094aa585e0
| 86,156
|
def get_skip_comments(events, skip_users=None):
"""
Determine comment ids that should be ignored, either because of
deletion or because the user should be skipped.
Args:
events: a list of (event_type str, event_body dict, timestamp).
Returns:
comment_ids: a set of comment ids that were deleted or made by
users that should be skipped.
"""
skip_users = skip_users or []
skip_comments = set()
for event, body, _timestamp in events:
action = body.get('action')
if event in ('issue_comment', 'pull_request_review_comment'):
comment_id = body['comment']['id']
if action == 'deleted' or body['sender']['login'] in skip_users:
skip_comments.add(comment_id)
return skip_comments
|
30663624714104bc7b9aa0fd4da45f537b06420f
| 18,086
|
def find_all(L, x):
"""Find indexes of all occurrences of *x* in list *L*, and return a list of them."""
out = []
prev = -1
while 1:
try:
prev = L.index(x,prev+1)
out.append(prev)
except:
return out
|
9b2a061418cdee0767e37764edb6b60a791f52e9
| 642,479
|
def tokens_to_conllu(
doc_name: str, sent_ids: list, sent_tokens: list, sent_texts: list
) -> list:
"""
Create conllu string representation based on original sentences and tokens.
Parameters
----------
doc_name: str
Name of the given doc.
sent_ids: list
List of sentence ids.
sent_tokens: list
List of parsed tokens.
sent_texts: list
List of sentence texts.
Returns:
List of strings on the conllu format.
"""
conllu_text = [""]
for sent_id, sent_token, sent_text in zip(sent_ids, sent_tokens, sent_texts):
conllu_text.append(f"# newdoc id = {doc_name}\n")
conllu_text.append("# newpar\n")
conllu_text.append(f"# sent_id = {sent_id}\n")
sent_text = sent_text.replace("\n", " ")
conllu_text.append(f"# text = {sent_text}\n")
for token_id, token in enumerate(sent_token, start=1):
conllu_text.append(f"{token_id}\t{token}" + "\t_" * 7)
if token_id == len(sent_token):
conllu_text[-1] += "\tSpacesAfter=\\n\n"
else:
conllu_text[-1] += "\t_\n"
conllu_text.append("\n")
return conllu_text
|
f5feb265210da30f545dbf5a058d0f8b20211db0
| 151,137
|
import torch
def calc_weighted_average(vals, weight):
"""Calculate weighted average along the second dim of values
Args:
vals (torch.Tensor): The values to weight; the first dim is samples
weight (torch.Tensor): The 1d weights to apply to the second dim of vals
Returns:
result (torch.Tensor): The result
"""
weight = weight[None, ...].repeat([vals.shape[0], 1])
result = torch.mean(weight * vals)
return result
|
ca6bde914f31bda57776076ee9d066d03d928b81
| 367,136
|
from typing import Dict
from typing import Any
from typing import List
def _format_default_options(defaults: Dict[str, Any], indent: str = "") -> List[str]:
"""Format default options to docstring lines."""
docstring_lines = [
".. dropdown:: Default values",
indent + ":animate: fade-in-slide-down",
"",
]
if not defaults:
docstring_lines.append(indent + "No default options are set.")
else:
docstring_lines.append(indent + "Following values are set by default.")
docstring_lines.append("")
docstring_lines.append(indent + ".. parsed-literal::")
docstring_lines.append("")
for par, value in defaults.items():
if callable(value):
if value.__class__.__name__ == "function":
# callback function
value_repr = f"Callable {value.__name__}"
else:
# class instance with call method
value_repr = repr(value)
else:
value_repr = repr(value)
docstring_lines.append(indent * 2 + f"{par:<25} := {value_repr}")
return docstring_lines
|
2c35c1e2603524afeb240a25b287cc757ca2177a
| 523,345
|
import re
def __remove_usernames(post):
"""It deletes usernames from a post.
Args:
post(str): the target post.
Returns:
str: the target post without any username.
"""
search_key = '@([A-Za-z0-9_]+)'
return re.sub(search_key, '', post)
|
7f57268fc98c34cd57bdcd8a436ec61eea097d9b
| 154,707
|
import json
def read_config(file_path):
""" Load the json file
Args:
file_path: input json file path
Returns:
config dict
"""
with open(file_path, 'r') as f_config:
json_object = json.load(f_config)
return json_object
|
610e41e75d2919d0c145b5bc578daefe471a57ff
| 488,361
|
import copy
def getadjpoints(point):
"""returns point objects of all of the adjacent points of a given point"""
superduperpoint = copy.deepcopy(point)
left = copy.deepcopy(superduperpoint)
left['x'] = left['x']-1
# print('left:')
# print(left)
right = copy.deepcopy(superduperpoint)
right['x'] = right['x']+1
# print('right:')
# print(right)
up = copy.deepcopy(superduperpoint)
up['y'] = up['y']-1
# print('up')
# print(up)
down = copy.deepcopy(superduperpoint)
down['y'] = down['y']+1
# print('down')
# print(down)
points = [left, right, up, down]
# print(points)
return points
|
41df3b9ac3a02bc3025d70e2cd8bb4cd9e419bb9
| 483,069
|
import random
def get_random_coordinate(image):
"""
Example:
coordinates= get_random_coordinates() # gives random x-y coordinates inside image
Output:
return tupe (x,y).
"""
x,y,z=image.shape
return (random.randint(0,y),random.randint(0,x))
|
54cbed37aba48363875602b60e2efa6c43dcff99
| 664,868
|
def sample_task(self, user_id, argument, **kwargs): # pylint: disable=unused-argument
"""
Example of a specific task inheriting from UserTask.
"""
print(f'Ran SampleTask for argument "{argument}"')
return argument
|
15cc4463971831b6cc51e7c0898afb9799635a7d
| 597,703
|
def resolve_name_obj(name_tree_kids):
"""Resolve 'Names' objects recursively.
If key 'Kids' exists in 'Names', the name destination is nested in a hierarchical structure. In this case, this
recursion is used to resolve all the 'Kids'
:param name_tree_kids: Name tree hierarchy containing kid needed to be solved
:return: Resolved name tree list
"""
temp_list = []
for kid in name_tree_kids:
if 'Kids' in kid and kid['Kids']:
temp_list.extend([kid_kid.resolve() for kid_kid in kid['Kids']])
elif 'Names' in kid:
return name_tree_kids
return resolve_name_obj(temp_list)
|
863d299516d97d0abdf9b0fdc97833da3b41eef9
| 68,291
|
import random
def get_dice(input_dice):
"""
Returns a list of Boggle dice (a list of letters), either Classic or New Boggle Dice
"""
dice = []
for die in input_dice:
choice = random.choice(die)
if choice == 'Q':
dice.append('QU')
else:
dice.append(choice)
return dice
|
3c934c80c07338131687bf3869d7ad322b27c672
| 324,223
|
def darken_color(c):
"""
Darken a color.
Small utility function to compute the color for the border of
the problem name badges.
When no color is passed, a dark border is set.
"""
if not c:
return '#000' # black
r, g, b = int(c[1:3], 16), int(c[3:5], 16), int(c[5:], 16)
return '#{:0>6s}'.format(
hex(int(r * 0.5) << 16 | int(g * 0.5) << 8 | int(b * 0.5))[2:])
|
bbf0873f716d29781b10e3c93dafe39a7704fbc5
| 122,370
|
def _expt__CMORvar(self):
"""Return set of CMORvar item identifiers for CMORvars requested for this experiment"""
cmv = set()
for u in self._get__requestItem():
ri = self._inx.uid[u]
rl = self._inx.uid[ri.rlid]
for i in rl._get__CMORvar():
cmv.add(i)
return cmv
|
db40213a4aee047321a6e19647e04228c2c9f084
| 663,697
|
def lat_to_yindex(lat, res=1):
"""
For a given latitude return the y index in a 1x1x5-day global grid
:param lat: Latitude of the point
:param res: resolution of the grid
:type lat: float
:type res: float
:return: grid box index
:rtype: integer
The routine assumes that the structure of the SST array is a grid that is 360 x 180 x 73
i.e. one year of 1degree lat x 1degree lon data split up into pentads. The west-most box is at 180degrees with
index 0 and the northern most box also has index zero. Inputs on the border between grid cells are pushed south.
"""
if res == 1:
yindex = int(90 - lat)
if yindex >= 180:
yindex = 179
if yindex < 0:
yindex = 0
return yindex
else:
yindex = int((90 - lat) / res)
if yindex >= 180 / res:
yindex = int(180 / res - 1)
if yindex < 0:
yindex = 0
return yindex
|
3a77fc2c4a04d707a5b6a7dd30629ee2686b08ec
| 291,024
|
def samerule (rulea, ruleb) :
"""check is rule a & b are the same
Parameters
a, b : rules to compare
Returns
True or False
"""
if len(rulea) == len(ruleb) :
for opa, opb in zip (rulea, ruleb) :
if not (opa.type == opb.type and opa.val == opb.val) :
return False
return True
else :
return False
|
6394c643bfc5c0bd493f71e1df1af195d5fa604d
| 347,971
|
def stream_interactions(G):
"""Generate a temporal ordered stream of interactions.
Parameters
----------
G : graph
A DyNetx graph.
Returns
-------
nd_iter : an iterator
The iterator returns a 4-tuples of (node, node, op, timestamp).
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.add_path([3,4,5,6], t=1)
>>> list(dn.stream_interactions(G))
[(0, 1, '+', 0), (1, 2, '+', 0), (2, 3, '+', 0), (3, 4, '+', 1), (4, 5, '+', 1), (5, 6, '+', 1)]
"""
return G.stream_interactions()
|
8c65513fe7a4971fdac20f73b0c7a9fbede85e15
| 297,646
|
def makeStamp(target, recipe):
"""Make the stamp filename"""
return '{}-{}.stamp'.format(target, recipe)
|
0a4361c98637f63eca493cacc9a4602c5f190f2b
| 456,564
|
import posixpath
def get_relative_url(url, other):
"""
Return given url relative to other.
Both are operated as slash-separated paths, similarly to the 'path' part of a URL.
The last component of `other` is skipped if it contains a dot (considered a file).
Actual URLs (with schemas etc.) aren't supported. The leading slash is ignored.
Paths are normalized ('..' works as parent directory), but going higher than the
root has no effect ('foo/../../bar' ends up just as 'bar').
"""
if other != '.':
# Remove filename from other url if it has one.
parts = posixpath.split(other)
other = parts[0] if '.' in parts[1] else other
relurl = posixpath.relpath('/' + url, '/' + other)
return relurl + '/' if url.endswith('/') else relurl
|
3dbfd35c41e133b02af656b7548b201a4f39e0b2
| 564,439
|
def middle(shape):
""" Given the 2D vertices of a shape, return the coordinates of the middle
of the shape.
"""
return (sum(p[0] for p in shape) / len(shape),
sum(p[1] for p in shape) / len(shape))
|
765dfcf8219ae63a2489abcd164328bea6c48db9
| 459,473
|
def get_at_index(l, index):
"""Filter in template, returns object at index. """
return l[index]
|
fbaa95ebe47c09b5348d8f46001cccb099658a12
| 298,119
|
import ipaddress
def is_in_network(ip, net):
""" Check if an IP address is part of a network
Arguments:
ip: IP address
net: Network wich mask bits (like '1.3.4.0/24')
Returns True if IP address is in the network, else False
"""
return ipaddress.ip_address(ip) in ipaddress.ip_network(net)
|
2a35eef9ab6a5d25b9d35668dc0946661f4609cd
| 227,130
|
def config_to_options(config):
"""
Convert ConfigParser instance to argparse.Namespace
Parameters
----------
config : object
A ConfigParser instance
Returns
-------
object
An argparse.Namespace instance
"""
class Options:
host=config.get('smtp', 'host', raw=True)
port=config.getint('smtp', 'port')
to_addr=config.get('mail', 'to_addr', raw=True)
from_addr=config.get('mail', 'from_addr', raw=True)
subject=config.get('mail', 'subject', raw=True)
encoding=config.get('mail', 'encoding', raw=True)
username=config.get('auth', 'username')
opts = Options()
# format
opts.from_addr % {'host': opts.host, 'prog': 'notify'}
opts.to_addr % {'host': opts.host, 'prog': 'notify'}
return opts
|
5bed4f5f64bd9b4fe1a955057cc13441ff8f4f4d
| 158,997
|
def find_production_volume_by_id(dataset, uuid):
"""Find production volume in ``dataset`` with id ``uuid``.
Raises ``ValueError`` if ``uuid`` not found in dataset, or if the found exchange does not have a production volume."""
for exc in dataset['exchanges']:
if exc['id'] == uuid:
if 'production volume' not in exc:
raise ValueError("Referenced exchange does not have a prod. volume")
return exc['production volume']
raise ValueError("Exchange id {} not found in dataset".format(uuid))
|
96bf9a84d360df7e6173a02ef09b9fbcf223af5c
| 691,546
|
def parseNum(num):
"""0x or $ is hex, 0b is binary, 0 is octal. Otherwise assume decimal."""
num = str(num).strip()
base = 10
if (num[0] == '0') & (len(num) > 1):
if num[1] == 'x':
base = 16
elif num[1] == 'b':
base = 2
else:
base = 8
elif num[0]=='$':
base = 16
return int(num, base)
|
9cf86b2fe67eee89aabba77465afffb5c20cbb5b
| 181,211
|
import pathlib
def process_path(path, parent, default):
"""Convert a path to an absolute path based on its current value.
Arguments
---------
path : PathLike
The path to be processed.
parent : PathLike
The parent path that `path` will be appended to if `path` is a relative path.
default : PathLike
If the content of `path` is None, substitute `path` with this value.
Returns
-------
If `path` is None and `default` is a relative path, return `parent/default`.
If `path` is None and `default` is an absolute path, return `default`.
If `path` is a relative path, return `parent/path`.
If `path` is an absolute path, return `path`.
The type of the retuen is `pathlib.Path`.
"""
path = pathlib.Path(default) if path is None else pathlib.Path(path)
if path.is_absolute():
return path
return pathlib.Path(parent).joinpath(path)
|
8e69ef8881f854e9d6016aaffb96a0210f432d20
| 652,553
|
import re
def detect_patient_age(info_par):
"""Returns age of patient described in paragraph of WHO assessment"""
# Searches for "year-old" or 'year old'
if re.findall('(\d{1,2}) ?(:?-?(year|month)(-| )old)',info_par) != []:
age = re.findall('(\d{1,2}) ?(:?-?(year|month)(-| )old)',info_par)[0][0]
else:
age = 'unknown'
return(age)
|
60f4027884753872c75814026e351eeaf7eedf3c
| 647,204
|
from pathlib import Path
def _get_files_from_dir(directory, file_format):
"""
Returns the list of file paths with the given format contained in the given directory and its subdirectories.
:param directory: Path to the root directory.
:type directory: str
:param file_format: Format of the files to retrieve, specified as '*.format'. It can specify any pattern that
matches the desired files as long as it is compatible with Path().rglob().
:type file_format: str
:return: A list with the paths to all the files found in 'dir' with format 'file_format'.
:rtype: list[Path]
"""
return list(Path(directory).rglob(file_format))
|
222b050d3fcb02705257629fbf69a29e30f12c72
| 543,579
|
def post(tokens):
"""
post-process output from NLTK tokenizer
Args:
tokens: a list contains a tokenized text
Returns:
processed tokens
"""
out = []
for t in tokens:
if t[-1] == ".":
out.append(t[:-1])
else:
out.append(t)
return out
|
dca1e3c925a09d0791410b28e62093efb3c459c3
| 94,181
|
def _regularize_spaces(text: str) -> str:
""" Replaces spaces in a string with underscores. """
return text.replace(' ', '_')
|
a144bae23d81b99e6f49d6421d6b1952ac5fc28b
| 293,020
|
import typing
import multiprocessing
def enumerate_cpu_counts() -> typing.List[int]:
"""This program prints the number of CPU counts to benchmark on this machine.
We remove some percentage of CPU cores off the top for system / background processing. With
the CPUs that remain, we generate a list of evenly spaced worker counts. The list is limited
by the number of trials desired. This is meant to help us explore the number of CPUs that
should be dedicated to MZ_WORKERS, not as a prescription for the correct values to choose.
On a Macbook with 8 cores, this will return [6, 4, 3, 2].
On a 56 core machine, this returns [24, 18, 12, 6].
On a 96 core machine, this returns [41, 30, 20, 10].
"""
# 15% overhead and count physical cores only
max_cpus = round(multiprocessing.cpu_count() * 0.425)
num_trials = 4
# Yield the fractional points (4/4, 3/4, ...) between max and 0, not including 0
worker_counts = [round(i * max_cpus / num_trials) for i in range(num_trials, 0, -1)]
return list(reversed(sorted(set(worker_counts))))
|
592fec8e11f381fb05d8b6dea12d3fe0cf34c5d6
| 108,729
|
def stations_highest_rel_level(stations, N):
"""Returns a list of N stations with highest water level relative to typical range"""
res = []
count = 0
for station in stations:
if station.relative_water_level() != None:
res.append(tuple((station.name, station.relative_water_level())))
res.sort(key = lambda x:x[1], reverse=True)
return res[:N]
|
5e3dd920bdd82d85b18d4dd6f63302c1ab1d8b84
| 209,118
|
def get_unique_labels(data_frame, label_column):
"""
return unique items in a `pd.DataFrame` column.
"""
return data_frame[label_column].unique().tolist()
|
3878eaf435fe175c8f721907895e2197550af156
| 535,168
|
def px(cin, dpi=600):
"""Convert a dimension in centiinch into pixels.
:param cin: dimension in centiinch
:type cin: str, float, int
:param dpi: dot-per-inch
:type dpi: int
"""
return int(float(cin) * dpi / 100)
|
776bedc26211e52f5f98ca48710e2774a1c06730
| 268,506
|
def split_extension(file_name, special=['tar.bz2', 'tar.gz']):
"""
Find the file extension of a file name, including support for
special case multipart file extensions (like .tar.gz)
Parameters
------------
file_name : str
File name
special : list of str
Multipart extensions
eg: ['tar.bz2', 'tar.gz']
Returns
----------
extension : str
Last characters after a period, or
a value from 'special'
"""
file_name = str(file_name)
if file_name.endswith(tuple(special)):
for end in special:
if file_name.endswith(end):
return end
return file_name.split('.')[-1]
|
7d8ee13b27f0ec5fce10f30817e4ed960a447b64
| 87,860
|
def speaker_emails(talk):
""" Return a list of the speakers' emails of the talk."""
return [u'{}'.format(speaker.user.email) for speaker in talk.get_all_speakers()]
|
036e2f82d9cd2c78d3180941a8c1c8e729d83c8a
| 512,394
|
def build_slitw_format_dict(slitw_vals):
"""Build a dict mapping the slit width vals to matplotlib format statements"""
idx = 0
odict = {}
slit_fmt_list = ['r.', 'g.', 'b.', 'k.']
n_slit_fmt = len(slit_fmt_list)
for slitw in slitw_vals:
if slitw not in odict:
odict[slitw] = slit_fmt_list[idx % n_slit_fmt]
idx += 1
return odict
|
87fd6c91df450548367396dfaa0d225d86916866
| 600,090
|
import socket
def get_hostname() -> str:
"""Returns the fully-qualified domain name of the server this code is
running on.
"""
return socket.getfqdn()
|
8f2f65498a993c1034f26991369613d05c0bd083
| 624,587
|
def _ParseProjectNameMatch(project_name):
"""Process the passed project name and determine the best representation.
Args:
project_name: a string with the project name matched in a regex
Returns:
A minimal representation of the project name, None if no valid content.
"""
if not project_name:
return None
return project_name.lstrip().rstrip('#: \t\n')
|
cb9f92a26c7157a5125fbdb5dd8badd7ffd23055
| 707,497
|
def calculate_coordinates(pth):
"""
Create a set of tuples representing the coordinates that the path
traverses, with a starting point at (0,0)
"""
x = 0
y = 0
coords = set()
for instruction in pth:
direction = instruction[:1]
distance = int(instruction[1:].strip())
if direction.lower() == "d":
for _ in range(distance):
y -= 1
coords.add((x, y))
elif direction.lower() == "u":
for _ in range(distance):
y += 1
coords.add((x, y))
elif direction.lower() == "l":
for _ in range(distance):
x -= 1
coords.add((x, y))
elif direction.lower() == "r":
for _ in range(distance):
x += 1
coords.add((x, y))
else:
raise Exception(f"Unknown direction {direction}")
return coords
|
0bba7e13ec8480104f6a96f7caae7f2bd73d3e2d
| 694,174
|
def abs2(x):
"""
Compute complex modulus squared.
"""
return x.real**2 + x.imag**2
|
fd0e021619b004fd7b3fcdee213129f4bd947275
| 161,392
|
def read_until(stream, delimiter, max_bytes=16):
"""Read until we have found the given delimiter.
:param file stream: readable file-like object.
:param bytes delimiter: delimiter string.
:param int max_bytes: maximum bytes to read.
:rtype: bytes|None
"""
buf = bytearray()
delim_len = len(delimiter)
while len(buf) < max_bytes:
c = stream.read(1)
if not c:
break
buf += c
if buf[-delim_len:] == delimiter:
return bytes(buf[:-delim_len])
|
2e582e536ac1b71a33a9a6910426acfe78284842
| 174,398
|
def dict_merge(base, override):
"""Recursively merge two dictionaries
Parameters
----------
base : dict
Base dictionary for merge
override : dict
dictionary to override values from base with
Returns
-------
dict
Merged dictionary of base and overrides
"""
# recursively merges dictionaries a and b, using b
# as the overrides where applicable
merged = base.copy()
for k, v in override.items():
if k in merged:
if isinstance(merged[k], dict) and isinstance(v, dict):
merged[k] = dict_merge(merged[k], v)
elif isinstance(merged[k], set) and isinstance(v, set):
merged[k] = merged[k].union(v)
else:
merged[k] = v
else:
merged[k] = v
return merged
|
ab35c0e4f9f72bd31855212354d6d91402dcd651
| 668,284
|
import torch
def load_ckp(model, ckp_path, device, parallel=False, strict=True):
"""Load checkpoint
Args:
ckp_path (str): path to checkpoint
Returns:
int, int: current epoch, current iteration
"""
ckp = torch.load(ckp_path, map_location=device)
if parallel:
model.module.load_state_dict(
ckp['state_dict'], strict=strict)
else:
model.load_state_dict(ckp['state_dict'], strict=strict)
return ckp['epoch'], ckp['iter']
|
4fe4e368d624583216add3eca62293d5d1539182
| 6,402
|
import requests
def crypto_current(ticker = "BTC", fiat = "USD"):
"""
Use this function to aquire current price of a given cryptocurrency
Call: crypto_current()
Parameters:
ticker(which cryptocurrency)
(BTC for Bitcoin, ETH for ethermeum, etc.)
fiat(base currency)
(USD for US dollars, EUR for Euro, etc.)
"""
url = ("https://min-api.cryptocompare.com/data/price?fsym={}&tsyms={}".format(ticker,fiat))
data = requests.get(url).json()
return data[fiat]
|
ab0c1c452dc71ee1199e039dd69fc678a9334a40
| 536,504
|
def __is_global(lon, lat):
"""
check if coordinates belong to a global dataset
Parameters
----------
lon : np.ndarray or xarray.DataArray
lat : np.ndarray or xarray.DataArray
Returns
-------
bool
"""
if lon.max() - lon.min() > 350 and lat.max() - lat.min() > 170:
return True
return False
|
82340b5fef5d1826fac0eabd4c0adba0a6525a41
| 690,058
|
import glob
def _sglob(x):
"""Sorted glob()."""
return sorted(glob.glob(x))
|
bf1334c5c8cfded9612cfd93525e1ab512c032c4
| 439,900
|
def test_should_run(testname, test_filter):
"""Check if test should run.
Args:
testname: Name of test to check.
test_filter: Regex list that limits the tests to run.
Returns:
True if test_filter list is empty or None, True if testname matches any
regex in test_filter, False otherwise.
"""
if not test_filter:
return True
for r in test_filter:
if r.search(testname):
return True
return False
|
bead9ab9d6d580e0944841bb0e18ff907d9885df
| 306,338
|
def unescape(s):
"""The inverse of cgi.escape()."""
s = s.replace('"', '"').replace('>', '>').replace('<', '<')
return s.replace('&', '&')
|
3bad6bc3679405dd0d223ea8ab6362a996067ea5
| 20,827
|
import random
def get_random(floor, ceiling):
"""Takes in floor and ceiling integers and returns a random number between them"""
return random.randrange(floor, ceiling + 1)
|
3446535ae1fa3f4c2b20dbfa38fbd5da5f02f08a
| 210,631
|
def uf_affiliation(affiliation):
"""
Given an affiliation string, return true if the affiliation is for
UF, False if not
"""
# Is this list of authors a UF list?
k1 = affiliation.find("Gainesville")
k2 = affiliation.find("Univ Fl")
k3 = affiliation.find("UNIV FL")
k4 = affiliation.find("UF Col Med")
k5 = affiliation.find("UF Coll Med")
isUF_affiliation = k1 >= 0 or k2 >= 0 or k3 >= 0 or k4 >= 0 or\
k5 >= 0
return isUF_affiliation
|
be452ad9e52e752ab7a57399a4acd23dc006b6ae
| 520,954
|
def is_same_shape(d1, d2):
"""
Returns true if the two dictionaries have the same shape. Meaning same
structure and keys, values may differ.
"""
if isinstance(d1, dict):
if isinstance(d2, dict):
# then we have shapes to check
return (d1.keys() == d2.keys()
# so the keys are all the same
and all(is_same_shape(d1[k], d2[k]) for k in d1.keys()))
# thus all values will be tested in the same way.
else:
return False # d1 is a dict, but d2 isn't
else:
return not isinstance(d2, dict)
|
3c76da4068f20989a7f5e7a097ed90afc1c3709a
| 392,672
|
def save_siteconfig_without_historical_record(siteconfig, *args, **kwargs):
"""
Save model without saving a historical record
Make sure you know what you're doing before you use this method.
Note: this method is copied verbatim from django-simple-history.
"""
siteconfig.skip_history_when_saving = True
try:
ret = siteconfig.save(*args, **kwargs)
finally:
del siteconfig.skip_history_when_saving
return ret
|
c113922f0c8ac50edd06f3e58c6687f0a3ef41ee
| 209,351
|
import glob
def get_globfiles(fileglob, minfiles=1, maxfiles=1):
"""
Get file(s) matching ``fileglob``. If the number of matching
files is less than minfiles or more than maxfiles then an
exception is raised.
:param fileglob: Input file glob
:param minfiles: Minimum matching files (None => no minimum)
:param maxfiles: Maximum matching files (None => no maximum)
"""
files = glob.glob(fileglob)
nfiles = len(files)
if minfiles is not None and nfiles < minfiles:
raise ValueError('At least %d file(s) required for %s but %d found' % (minfiles, fileglob, nfiles))
if maxfiles is not None and nfiles > maxfiles:
raise ValueError('No more than %d file(s) required for %s but %d found' % (maxfiles, fileglob, nfiles))
return files
|
15318ed3c7c7c80f70295ca9a74d7fb6f5b299fd
| 624,572
|
def update_stats_collection(
self,
stat_type: str,
enable: bool,
) -> bool:
"""Update statistics collection Enable/Disable info for named
statistic
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - statsRetention
- PUT
- /gms/stats/collection/{statType}
:param stat_type: Statistic type e.g. ``interfacebyoverlay``
:type stat_type: str
:param enable: ``True`` for Enabled or ``False`` for Disabled
:type enable: bool
:return: Returns True/False based on successful call
:rtype: bool
"""
data = {}
data[stat_type] = enable
return self._put(
"/gms/stats/collection/{}?isEnabled={}".format(stat_type, enable),
data=data,
return_type="bool",
)
|
efd1c8f415ec297f4401f3fd4a86c042160ffadb
| 427,761
|
from pathlib import Path
def get_file_name(file_path: str) -> str:
"""
Returns the name of a file from a file path.
:param file_path: file path
:return: name of file
"""
p = Path(file_path)
return str(p.name)
|
64d8a3237d382869ec4c0ed30f27bd99d67ff210
| 362,527
|
def get_permission_test(model, field, permission, instance=None):
"""
Fetch a permission test for a field and permission.
:param model: The model or instance
:param field: Name of the field or None for instance/model-wide
:param permission: Permission to check for
"""
return getattr(model, '__jsonapi_permissions__', {})\
.get(field, {})\
.get(permission, lambda x: True)
|
e67135b5853d74ab6c35e94493c6528b6d4fdbff
| 129,895
|
def add_lat_long_coordinates(art_no_lat_lon_df, col_name):
"""
Parameters
----------
art_no_lat_lon_df (dataframe object) contains dataframe with column col_name containing coordinates in a list
col_name (string) name of column
Returns
-------
art_df (dataframe object) dataframe with new columns latitude and longitue
"""
art_df = art_no_lat_lon_df.copy()
# After removing empty entries, we can then iterate over them
clean_list_of_coordinates = art_df[col_name].to_list()
# not super efficient way of doing things, but they get it done
latitude = []
longitude = []
# Iterate over every pair of coordinates
for item in clean_list_of_coordinates:
longitude.append(item[0])
latitude.append(item[1])
art_df['latitude'] = latitude
art_df['longitude'] = longitude
return art_df
|
70cab28cf761d2f5bb9d155a4424240ba9f08083
| 289,574
|
def factorial(n):
"""
Returns n! = n * (n-1) * (n-2) ... * 1
0! is 1. Factorial is undefined for integers < 0.
Examples:
factorial(0) returns 1
factorial(2) returns 2
factorial(3) returns 6
factorial(5) returns 120
Parameter n: The integer for the factorial
Precondition: n is an int >= 0
"""
assert type(n) == int
assert n >= 0
result = 1
for i in range(1, n + 1):
result = result * i
return result
#pass
|
9e2c9ca7e5c92381651b69a9d49e1def61fbde78
| 551,646
|
def request_is_live_tween_factory(handler, registry):
"""
Tween that adds ``LIVE_REQUEST`` to the request's environment, used to
determine ``request.is_live``.
.. note::
This should be added as close as possible to
:data:`pyramid.tweens.INGRESS` so that ``request.is_live`` can be used
in other tweens.
"""
def request_is_live_tween(request):
request.environ["LIVE_REQUEST"] = True
return handler(request)
return request_is_live_tween
|
8d2a11b04b94009511b0e22f1233e419557a0347
| 346,879
|
def sfc_lw_cld(lwup_sfc, lwup_sfc_clr, lwdn_sfc, lwdn_sfc_clr):
"""Cloudy-sky surface net longwave radiative flux into atmosphere."""
return lwup_sfc - lwup_sfc_clr - lwdn_sfc + lwdn_sfc_clr
|
4ae62cb0d8fddacc4f8412ff3ef913a6ef4b5fcc
| 434,019
|
from typing import Any
from typing import Union
from typing import Tuple
import operator
def _ensure_index(x: Any) -> Union[int, Tuple[int, ...]]:
"""Ensure x is either an index or a tuple of indices."""
try:
return operator.index(x)
except TypeError:
return tuple(map(operator.index, x))
|
d47b62855d1cae40959412b6f509e6f76fefefc7
| 669,122
|
import pickle
def load_pickle(parent_dir):
"""
Accepts a parent directory Path object
which can contain only one pickled file with a .pkl extension
Finds the file, loads it, and returns it
"""
# print(parent_dir)
filepath = list(parent_dir.glob('*.pkl'))[0]
with open(str(filepath), 'rb') as f:
data = pickle.load(f)
return data
|
e9aba6608d945ad01894a1af24601af3d9e664e0
| 639,613
|
from typing import List
def build_on_conflict_do_nothing_query(
columns: List[str],
dest_table: str,
temp_table: str,
unique_constraint: str,
dest_schema: str = "data",
) -> str:
"""
Construct sql query to insert data originally from `df` into `dest_schema.dest_table` via the
temporary database table `temp_table`.
If there are any conflicts on the unique index `pk`, do nothing
Parameters
----------
columns: List[str]
A list of column names found in the temporary table that should be inserted into the final table
dest_table : str
The destination table
temp_table : str
The temporary table name (in public schema)
unique_constraint : str
A string referencing the unique key for the destination table
dest_schema : str
The name of the postgres schema for the destination
Returns
-------
query: str
The SQL query for copying data from the tempmorary table to the destination one
"""
colnames = ", ".join(columns)
cols = "(" + colnames + ")"
if not unique_constraint.startswith("("):
unique_constraint = f"({unique_constraint})"
return f"""
INSERT INTO {dest_schema}.{dest_table} {cols}
SELECT {colnames} from {temp_table}
ON CONFLICT {unique_constraint} DO NOTHING;
"""
|
dbeeba72cec7be0a4ee24ad30044b12fec60e27d
| 668,809
|
import time
def f_seen(self, origin, match, args):
""".seen <nick> - Reports when <nick> was last seen."""
if not match.group(2):
return self.msg(origin.sender, 'Please provide a nick.')
nick = match.group(2).lower()
if not hasattr(self, 'seen'):
return self.msg(origin.sender, '?')
if self.seen.has_key(nick):
channel, t = self.seen[nick]
t = time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(t))
#msg = "I last saw %s at %s on %s" % (nick, t, channel)
msg = 'I last saw %s at %s in some channel.' % (nick, t)
self.msg(origin.sender, str(origin.nick) + ': ' + msg)
else: self.msg(origin.sender, "Sorry, I haven't seen %s around." % nick)
|
a780a13b81e01128f3903bb2e3db2c8ab9f867c5
| 601,327
|
def get_compile_status(mod_obj):
"""
Gets compilation status of give module object
:param mod_obj: module object
:return: compilation status
"""
try:
cstatus = mod_obj.get('compilation-status')
if cstatus is None:
return ''
return cstatus
except Exception as e:
return ''
|
c7ef9e54b9cdf6d7ba22ed75245791a060c8de62
| 204,519
|
def slice_sparse_matrix_to_components(mat, ri, rf):
"""Slice the matrix `mat` between indices `ri` and `rf` -- for csr or bsr.
"""
return (mat.indptr[ri:rf + 1] - mat.indptr[ri],
mat.indices[mat.indptr[ri]:mat.indptr[rf]],
mat.data[mat.indptr[ri]:mat.indptr[rf]])
|
9360050af8d0f75659c27ab8647f8388ec23a7bd
| 400,246
|
def season_id_to_int(season_id):
"""
Util to convert a season_id to an int.
"""
return int(season_id[:4])
|
5b77bc655db6ce32e27990a48dc6145fd57e2298
| 114,210
|
import csv
def get_file_entry_names(path):
"""Get the names of all entries in the food dictionary or a log file.
:param path: A string of the food dictionary or log file pathname.
:returns: A list of the entry names associated with each entry in the food dictionary or a log file.
"""
entry_names = []
with open(path) as f:
reader = list(csv.reader(f))
for row in reader:
entry_names.append(row[0])
return entry_names
|
de75a82755c7a93b5a94a266ee149d544bc40973
| 631,950
|
def switch_bbox_coordinates_size(line, old_size, new_size):
"""
This function takes a label in COCO bbox format and fits it to a different image size
:param line: an annotation line
:param old_size: the size of image the bbox is currently set for
:param new_size: the new size of image the bbox should be converted to
:return: the upstaed annotation line
"""
line = line.split(",")
id_ = line[4]
x1 = str(round(new_size*float(line[0])/old_size))
x2 = str(round(new_size*float(line[1])/old_size))
x3 = str(round(new_size*float(line[2])/old_size))
x4 = str(round(new_size*float(line[3])/old_size))
return x1 + "," + x2 + "," + x3 + "," + x4 + "," + id_
|
021d6093d4e742e8251803e57b1ffb1889271e4f
| 483,906
|
def is_numeric(value):
"""This function checks whether or not a value is numeric either as an integer or a numeric string.
.. versionadded:: 2.3.0
:param value: The value to be examined
:type value: str, int
:returns: Boolean value indicating if the examined value is numeric
"""
return True if type(value) == int or (type(value) == str and value.isnumeric()) else False
|
8218652ff4029775feda6143ba019320f066c1da
| 104,657
|
import re
def truncate_html_words(s, num, end_text='...'):
"""Truncates HTML to a certain number of words.
(not counting tags and comments). Closes opened tags if they were correctly
closed in the given html. Takes an optional argument of what should be used
to notify that the string has been truncated, defaulting to ellipsis (...).
Newlines in the HTML are preserved. (From the django framework).
"""
length = int(num)
if length <= 0:
return ''
html4_singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area',
'hr', 'input')
# Set up regular expressions
re_words = re.compile(r'&.*?;|<.*?>|(\w[\w-]*)', re.U)
re_tag = re.compile(r'<(/)?([^ ]+?)(?: (/)| .*?)?>')
# Count non-HTML words and keep note of open tags
pos = 0
end_text_pos = 0
words = 0
open_tags = []
while words <= length:
m = re_words.search(s, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word
words += 1
if words == length:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or end_text_pos:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
tagname = tagname.lower() # Element names are always case-insensitive
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i + 1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if words <= length:
# Don't try to close tags if we don't need to truncate
return s
out = s[:end_text_pos]
if end_text:
out += ' ' + end_text
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
|
6995b4bce78670884508e08521da78d2d88fd5bc
| 658,957
|
import itertools
def take(n, iterable):
"""Return first n items of the iterable as a list."""
return list(itertools.islice(iterable, n))
|
4af17c753de382b8d2890393af31273590a1b05d
| 553,215
|
def gcd(x, y):
"""
Function to find gcm (greatest common divisor) of two numbers
:param x: first number
:param y: second number
:return: gcd of x and y
"""
while y != 0:
(x, y) = (y, x % y)
return x
|
4d6c156c0d50c766afc48b1b8373dbf0775d3916
| 163,536
|
def get_or_create_const_composite(module, type_id, operands):
"""Get an OpConstantComposite instruction with given type/operands.
An existing instruction is returned, or a new one is created if there
is no such instruction already."""
for inst in module.global_instructions.type_insts:
if (inst.op_name == 'OpConstantComposite' and
inst.type_id == type_id and
inst.operands == operands):
return inst
return module.get_global_inst('OpConstantComposite', type_id, operands[:])
|
93e5a3c7d64b625d86fed4680c5e22ca401a8e9f
| 261,945
|
def dehidratate(traj, other=("Na+", "Cl-")):
"""
Method that removes the waters and ions from the trajectorie.
PARAMS:
traj: mdtraj trajectory with topology
other: tuple with additional atom names to remove
system_indexes: list with the indexes of the system, this is to save time, if provided,
the function will not calculate them again.
RETURNS:
mdtraj trajectory without waters and the indexes of the system.
"""
system_indexes = []
if not system_indexes:
for residue in traj.topology.residues:
if residue.name not in ("HOH", "WAT") and residue.name not in other:
for atom in residue.atoms:
system_indexes.append(atom.index)
traj = traj.atom_slice(system_indexes)
return traj
|
c2eb7866741be6103a6b933a50f1dd023f3c55ed
| 191,026
|
def maior(a, b, c):
""" Retorna o valor maior entre a, b e c.
"""
maior = a
if b > maior:
maior = b
if c > maior:
maior = c
return maior
|
0d6161298b8181fa3d1936c3d85f09cf0dbb8dd6
| 449,373
|
def do_trim(value):
"""
Strip leading and trailing whitespace.
"""
return value.strip()
|
5294bad3d175d07c78b45e7151f18616c4412883
| 169,109
|
def build_description(summary, description):
"""
Return a description string from a summary and description
"""
summary = (summary or '').strip()
description = (description or '').strip()
if not description:
description = summary
else:
if summary and summary not in description:
description = '\n'.join([summary , description])
return description
|
8c61193c1b00628d432052a63328bd128da24634
| 154,871
|
from typing import Sequence
from typing import Union
from typing import Optional
def mean(values: Sequence[Union[int, float, None]]) -> Optional[float]:
"""
Returns the mean of a list of numbers.
Args:
values: values to mean, ignoring any values that are ``None``
Returns:
the mean, or ``None`` if :math:`n = 0`
"""
total = 0.0 # starting with "0.0" causes automatic conversion to float
n = 0
for x in values:
if x is not None:
total += x
n += 1
return total / n if n > 0 else None
|
a6175022257e39b1b28f438715af6a1124df8aea
| 249,236
|
def dh2hms(dh, format="{:02d}:{:02d}:{:06.3f}"):
"""Decimal hours as HH:MM:SS.SSS, or similar.
Will work for degrees, too.
Parameters
----------
dh : float
format : string, optional
Use this format, e.g., for [+/-]HH:MM, use "{:+02d}:{:02d}".
Returns
-------
hms : string
"""
sign = -1 if dh < 0 else 1
dh = abs(dh)
hh = int(dh)
mm = int((dh - hh) * 60.0)
ss = ((dh - hh) * 60.0 - mm) * 60.0
if ss >= 60:
ss -= 60
mm += 1
if mm >= 60:
mm -= 60
hh += 1
return format.format(sign * hh, mm, ss)
|
190c04e2c073026666b6a7fc337b79d4c4b31c5e
| 476,817
|
def boxproj_(z,p,l,u):
""" This subroutine projects the vector z onto the box [l,u]
z,l,u are vectors of length p
"""
z = z.flatten()
for i in range(0,p):
z[i] = min(max(l[i],z[i]),u[i])
return z
|
b62542d05ee63e36562ff3df715e7811abcdb2be
| 140,894
|
def seqify(flowgram, floworder):
"""
Turns the flowgram into a string of bases.
@param flowgram: an iterable container of integer flow values
@param floworder: the flow order
@return: a string representing the call
"""
nflows = len(floworder)
ret = []
for ndx, ext in enumerate(flowgram):
ret.append(floworder[ndx % nflows] * ext)
return "".join(ret)
|
e52943d0658c89cb3c30150cbdb6f70466fe63e0
| 175,661
|
def _cvtfield(f):
"""
If str passed in via 'f' parameter is '-' then
return empty string otherwise return value of 'f'
:param f:
:return: empty string if 'f' is '-' otherwise return 'f'
:rtype: str
"""
if f is None or f != '-':
return f
return ''
|
f7e953cb856824cb9f8cc720bac90a60874bb3d8
| 668,314
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.