content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def export_python_string(earth_model, function_name="model"):
"""
Exports model as a string that evaluates as python code, with no numpy/scipy/sklearn dependencies.
:param earth_model: Trained pyearth model
:param function_name: string, optional, will be the name of the function in the returned string
:return: string, when executed (either by writing to a file, or using `exec`, will define a python
function that accepts an iterator over examples, and returns an iterator over transformed examples
"""
i = 0
accessors = []
for bf in earth_model.basis_:
if not bf.is_pruned():
accessors.append(bf.func_string_factory(earth_model.coef_[0, i]))
i += 1
return """def {:s}(example_iterator):
accessors = [{:s}]
for x in example_iterator:
yield sum(accessor(x) for accessor in accessors)
""".format(function_name, ",\n\t\t".join(accessors))
|
46a80ab1bc16dbf049dd4289509b7fca2235e7cb
| 282,209
|
import re
def clean(string):
"""Clean string
Cleans a string to be a valid Python variable name.
It replaces all invalid characters with an underscore.
Arguments:
string {str} -- string to be formatted
Returns:
str -- formatted string
"""
string = re.sub('\W|^(?=\d)', '_', string).lower()
return string
|
3be5fabaa774e332f618d81a9a652904061b8115
| 592,598
|
import re
def parse_braces(sequence):
"""
Replace all occurrences of ``x{n}`` (where x is any character) with n
occurrences of x. Raise ValueError if the expression cannot be parsed.
>>> parse_braces('TGA{5}CT')
TGAAAAACT
"""
# Simple DFA with four states, encoded in prev
result = ''
prev = None
for s in re.split('(\{|\})', sequence):
if s == '':
continue
if prev is None:
if s == '{':
raise ValueError('"{" must be used after a character')
if s == '}':
raise ValueError('"}" cannot be used here')
prev = s
result += s
elif prev == '{':
prev = int(s)
if not 0 <= prev <= 10000:
raise ValueError('Value {} invalid'.format(prev))
elif isinstance(prev, int):
if s != '}':
raise ValueError('"}" expected')
result = result[:-1] + result[-1] * prev
prev = None
else:
if s != '{':
raise ValueError('Expected "{"')
prev = '{'
# Check if we are in a non-terminating state
if isinstance(prev, int) or prev == '{':
raise ValueError("Unterminated expression")
return result
|
0d1c5de475f311f02109f92cdbb5522c29be26bd
| 447,254
|
import time
def timestamp_dir(when=None):
"""
Returns a directory named after the specified time or
the current time, if no specific time is given
"""
if when is None:
when = time.time()
return time.strftime("%Y%m%d_%H%M%S", time.localtime(when))
|
668d08b701ce82c1bc27d5bfe45f3224709d91de
| 457,991
|
def points_from_columns(xs):
"""
Takes a list of rows and converts it into
a list of columns. For example,
xs = [[1, 2, 3], [4, 5, 6], [1], [2, 4]]
will return
x = [[1, 4, 1, 2], [2, 5, 4], [3, 6]]
xs: list of rows (lists).
returns: list of columns (lists).
"""
x = []
for row in xs:
while len(x) < len(row): x.append([])
for i in range(len(row)): x[i].append(row[i])
return x
|
190a9e7d2d7f439a4a8c918bf5e7fa43d3960b95
| 502,530
|
def l2_norm(x):
"""
Calculates the l2 norm of the input tensor.
Args:
x: tensor, any shape
tensor to take the l2 norm of.
Returns: float,
l2 norm of the input
"""
return x.view(-1).dot(x.view(-1))
|
beba8abb86b49a95259de6bcef6286617276de6f
| 615,103
|
def test_nan(dataframe):
"""
Test if dataframe contains nan value
Parameters
----------
dataframe: pandas dataframe
Raises
------
ValueError
If contains nan, raise ValueError
Returns
-------
is_valid: boolean
True if did not contain nan, False if contain
"""
is_valid = True
for i in range(len(dataframe.columns)):
if dataframe.isnull().any()[i] == True:
is_valid = False
raise ValueError("values contain nan")
return is_valid
|
1dcc96521fbbff2f95d7e4395367f50882c3b96b
| 648,477
|
import time
def most_recent_planet(tree):
"""Returns planet node that has most recent update date.
:param tree: lxml etree
:returns: lxml etree
"""
updates = tree.findall('.//lastupdate')
update_time = [time.strptime(date.text,'%y/%m/%d') for date in updates]
planet = updates[update_time.index(max(update_time))].getparent()
return planet
|
5fa5c5ae45c18b1b44231ce2b3b56b14579ae8f4
| 398,608
|
def is_zero_length(quat):
"""Checks if a quaternion is zero length.
:param numpy.array quat: The quaternion to check.
:rtype: boolean.
:return: True if the quaternion is zero length, otherwise False.
"""
return quat[0] == quat[1] == quat[2] == quat[3] == 0.0
|
86f09ce7fbf5700d85fbf4db3b754026fd250ccc
| 236,260
|
def join_regexes(regexes):
""" Utility function, takes in a list of regexes and outputs their OR """
result = r''
for reg in regexes:
result = result + '('
result = result + reg
result = result + ')|'
return result[:-1]
|
40c6c1b1fcdbaba8fd13043244165c84aa8b0f3c
| 247,893
|
def strip_query(url):
"""Remove query string from a url"""
return url.split('?', 1)[0]
|
34fe61cde242aef09495d4bbc384d68d3961e103
| 587,345
|
def point_avg(points):
"""
Accepts a list of points, each with the same number of dimensions.
NB. points can have more dimensions than 2
Returns a new point which is the center of all the points.
"""
dimensions = len(points[0])
new_center = []
for dimension in range(dimensions):
dim_sum = 0 # dimension sum
for p in points:
dim_sum += p[dimension]
# average of each dimension
new_center.append(dim_sum / float(len(points)))
return new_center
|
ac82a036ce5b6fdea0e8c842fba065acd16160a4
| 686,480
|
from functools import reduce
def fact2(n):
"""Compute the double factorial
as in https://stackoverflow.com/a/4740229/3249688
"""
return reduce(int.__mul__, range(n, 0, -2), 1)
|
b6362fcdf86748a2d666c41eaf091656b3d061d7
| 256,178
|
def slice_length(orange):
"""
Returns the length of the index corresponding to a slice.
For example, slice(0,4,2) has a length of two.
This is used by some of the methods in message.py.
"""
t = type(orange)
if t is slice:
if orange.step:
return int((orange.stop-orange.start)/orange.step) # This will also work for negative steps
else: # Step size is 1 by default
return orange.stop - orange.start
else:
return len(orange)
|
c66fea06c712bc3b417f0dcf55883bd6f4590c2f
| 453,786
|
def __get_midi_csv(midi_strings):
"""split comma seperated strings into csv file
Arguments:
midi_strings {list} -- list of comma separated strings
Returns:
csv -- midi data in csv format
"""
midi_csv = []
for row in midi_strings:
midi_data = row.split(",")
midi_csv.append(midi_data)
return midi_csv
|
c1a23dc13c566dee43d254389061159852da2be8
| 285,949
|
def _k_mer_to_index(k_mer):
"""Converts a k-mer to its numerical index.
Args:
k_mer (str): The :math:`k`-mer to convert.
Returns:
int: The index of the :math:`k`-mer.
Examples:
>>> _k_mer_to_index("A")
0
>>> _k_mer_to_index("T")
3
>>> _k_mer_to_index("TT")
15
>>> _k_mer_to_index("TTT")
63
"""
result = 0
for base in k_mer:
result = result * 4 + ["A", "C", "G", "T"].index(base)
return result
|
ad5d51284c8e5382fbb856ec760ac3fc9a48380c
| 306,897
|
def update_dict(dict_, **kwargs):
"""Add keys to a new dictionary.
Args:
dict_: the dictionary to add to
kwargs: the key, value pairs to add
Returns:
a new dictionary
"""
return dict(list(dict_.items()) + list(kwargs.items()))
|
ce0c2b5a11552e4008491398a6b862d36a295e83
| 342,043
|
def convert_byte_dict_to_str_dict(inp: dict) -> dict:
"""
Convert dictionaries with keys and values as bytes to strings
Args:
inp: Dictionary with key and values in bytes
Returns:
Dictionary with key and value as string
"""
new_dict = dict()
for k, v in inp.items():
new_dict[k.decode()] = str(v.decode())
return new_dict
|
8f91945d108bb9cabd4ac1ba1bc109fd7b43f097
| 123,590
|
from typing import Optional
def injection_file_name(
science_case: str, num_injs_per_redshift_bin: int, task_id: Optional[int] = None
) -> str:
"""Returns the file name for the raw injection data without path.
Args:
science_case: Science case.
num_injs_per_redshift_bin: Number of injections per redshift major bin.
task_id: Task ID.
"""
file_name = f"injections_SCI-CASE_{science_case}_INJS-PER-ZBIN_{num_injs_per_redshift_bin}.npy"
if task_id is not None:
file_name = file_name.replace(".npy", f"_TASK_{task_id}.npy")
return file_name
|
57b034b6a60c317f0c071c1313d0d99f2802db30
| 704,717
|
def collection_image_location(instance, filename):
"""Return the location of a stored media file for a Workflow Collection."""
collection_id = instance.id
image_type = instance.type
file_extension = filename.rpartition(".")[2]
return f"workflow_system/collections/{collection_id}/{image_type}.{file_extension}"
|
32e377c7f04423e8bdf7b4e82cb40b4abc94e067
| 265,707
|
def find_nearest_power_of_two(a):
"""return closest power of two number smaller than a
i.e. if a = 1025, return 1024
"""
i = 0
while a > 1:
a = a / 2
i += 1
return 2 ** (i - 1)
|
83f9b52cd9056c674d95f88b439ad31f376e768b
| 476,799
|
def is_simple_bool(typehint) -> bool:
"""Check if typehint is bool or Union[None, bool]."""
if typehint is bool:
return True
if hasattr(typehint, "__args__") and bool in typehint.__args__:
return True
return False
|
5c65d7f08f0d34e99451ca51fda8319b7d177164
| 282,408
|
import hashlib
def get_hash_values(file):
"""
Get MD5/SHA256/SHA512 hash
:param file: Path to file
:return: MD5/SHA256/SHA512 hash
"""
blocksize = 65536
hashval_sha256 = hashlib.sha256()
hashval_md5 = hashlib.md5()
hashval_sha512 = hashlib.sha512()
with open(file, 'rb') as file:
while True:
buf = file.read(blocksize)
if not buf:
break
hashval_md5.update(buf)
hashval_sha256.update(buf)
hashval_sha512.update(buf)
return hashval_md5.hexdigest(), hashval_sha256.hexdigest(), hashval_sha512.hexdigest()
|
12b91706a6c5fefbea5ef26d0140eaf1c049ea3d
| 213,416
|
def keys_subset(base_dict, keys):
"""
Generate a dictionary that contains a subset of the entries of the base dictionary
with the given keys
"""
return dict((k, base_dict[k]) for k in keys if k in base_dict)
|
2dd0ab1172ab16c3da0d8cbc4dd1ec9181152107
| 514,950
|
import torch
def kl_forward(logu: torch.Tensor) -> torch.Tensor:
"""
Log-space Csiszar function for forward KL-divergence D_f(p,q) = KL(p||q).
Also known as the inclusive KL-divergence, minimizing results in
zero-avoiding / mass-covering behavior.
Args:
logu (torch.Tensor): ``p.log_prob``s evaluated at samples from q.
"""
return torch.exp(logu) * logu
|
6f6bcc6a0e58f543954320154503e71ccda6f36f
| 264,945
|
import six
def rev_comp( seq, molecule='dna' ):
""" DNA|RNA seq -> reverse complement
"""
if molecule == 'dna':
nuc_dict = { "A":"T", "B":"V", "C":"G", "D":"H", "G":"C", "H":"D", "K":"M", "M":"K", "N":"N", "R":"Y", "S":"S", "T":"A", "V":"B", "W":"W", "Y":"R" }
elif molecule == 'rna':
nuc_dict = { "A":"U", "B":"V", "C":"G", "D":"H", "G":"C", "H":"D", "K":"M", "M":"K", "N":"N", "R":"Y", "S":"S", "U":"A", "V":"B", "W":"W", "Y":"R" }
else:
raise ValueError( "rev_comp requires molecule to be dna or rna" )
if not isinstance( seq, six.string_types ):
raise TypeError( "seq must be a string!" )
return ''.join( [ nuc_dict[c] for c in seq.upper()[::-1] ] )
|
2e42ccf5f37992d0fbe3a25afd70a04e6fc0c225
| 12,572
|
def capitalised_string(s):
"""
Check whether the first letter of a title is uppercase
:param s: String containing a title.
:return: True if first letter is uppercase else False.
"""
try:
return s[0].isupper()
except IndexError:
return False
|
66a109426b6dbc7c5f24a42d3b56c01014f32f57
| 538,310
|
def get_left_or_right(node_idx, struct):
""" Return True if the node is a left child of its parent.
o/w return false.
"""
parent_node = struct[node_idx]['parent']
return struct[parent_node]['left_child'] == node_idx
|
a258699d05a79ed25dd31e570d5b301cc9d0884f
| 538,339
|
def untuple(x):
"""Untuple a single value that was wrapped in a tuple."""
assert type(x) == type(()), 'Expected tuple.' # safety check
assert len(x) == 1, 'Expected tuple singleton.' # safety check
return x[0]
|
9c53199329c17f1d5b09ff29513e1870fb829c95
| 305,877
|
def get_max_rows(worksheet):
"""
Gets the number of non-empty rows of the current worksheet.
:param worksheet: Current worksheet to manipulate.
:return: Number of non-empty rows
"""
return worksheet.max_row
|
053ef312c84a99e343c52ec0ed0c667bf5cb5bc9
| 146,029
|
def get_rebalancing_flow(tnet):
"""
get rebalancing flow in a supergraph
Parameters
----------
tnet: transportation network object
Returns
-------
float
"""
return sum([(tnet.G_supergraph[i][j]['flow']-tnet.G_supergraph[i][j]['flowNoRebalancing'])*tnet.G_supergraph[i][j]['length'] for i,j in tnet.G.edges()])
|
c66c2fd4a68d8ff948f5891b97c599c19f00099b
| 174,585
|
def first(iterable, default=None):
"""Return the first element of an iterable or the next element of a generator; or default."""
try:
return iterable[0]
except IndexError:
return default
except TypeError:
return next(iterable, default)
|
10023a39bb98662c3c17fece28e28402ad56bc0a
| 527,212
|
def recursive_dichotomy(list, value, beginning, end):
"""
list: array of integers in ascending order
value: value to return its position in the list
beginning: first index of list (in this case 0)
end: last index of list (in this case len(list))
return: False in value not in list, else the position of value in the list
"""
middle = (beginning+end)//2
if value not in list:
return False
if list[middle] == value:
return middle
elif value > list[middle]:
return recursive_dichotomy(list, value, middle, end)
elif value < list[middle]:
return recursive_dichotomy(list, value, beginning, middle)
|
a677ef0f7f0e4cc843da4fa96303655776d4515d
| 598,875
|
def isrange(a, b):
"""
Return True if the passed tuple `(a,b)` is a valid range
(that is, `a` may not be greater than `b`).
"""
#~ if r[0] is None or r[1] is None: return True
#~ if r[0] <= r[1]: return True
if a is None or b is None:
return True
if a <= b:
return True
return False
|
b7c7b09d586b066d8d24e7877c62612e49731ded
| 592,301
|
from pathlib import Path
from typing import Dict
def _get_srcset_st(sources_dir: Path, hinames: Dict[float, Path]):
"""
Create the srcset string for including on the md line.
ie. sources_dir might be /home/sample-proj/source,
hinames posix paths to
0: /home/sample-proj/source/plot_types/images/img1.png,
2.0: /home/sample-proj/source/plot_types/images/img1_2_0x.png,
The result will be:
'/plot_types/basic/images/mkd_glr_pie_001.png,
/plot_types/basic/images/mkd_glr_pie_001_2_0x.png 2.0x'
"""
srcst = ''
for k in hinames.keys():
path = hinames[k].relative_to(sources_dir).as_posix().lstrip('/')
srcst += '../' + path
if k == 0:
srcst += ', '
else:
srcst += f' {k:1.1f}x, '
if srcst[-2:] == ', ':
srcst = srcst[:-2]
srcst += ''
return srcst
|
c7e28d24d40edd49d5b5715c3f9d4957a63af02f
| 210,156
|
import torch
def l2norm(X):
"""L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=-1, keepdim=True).sqrt()
X = torch.div(X, norm+1e-10)
return X
|
7517e81e97ec63be085fd2f7f6502e6b3c7541d8
| 661,657
|
def Median(data):
"""Returns the median of a time series"""
return data.median()
|
a0c7474ca3ed2240320346159c891da342d6402c
| 413,265
|
import re
def chop_after_title(source):
"""Return the source of a document after DocInfo metadata.
:param source: Source of RST document
:type source: string
:returns: All source content after docinfo
:rtype: string
"""
# find the last docinfo element
index = re.findall(r'[=-]{3,}\n.*\n[-=]{3,}', source, re.MULTILINE)[-1]
# find the character position of last docinfo element + len of it
rest = source[source.rindex(index) + len(index) :]
return rest.strip()
|
44602f53a6433073389946986bb809046616abe7
| 331,587
|
def make_matrix(rows, cols, fill=0.0):
"""Returns a matrix (list of list of floats) using a default
value.
:param rows: Number of rows
:type rows: int
:param cols: Number of columns
:type cols: int
:param fill: Default value for each element in the matrix
:type fill: float
"""
m = []
for i in range(rows):
m.append([fill] * cols)
return m
|
c361c5d6ea1f0f3af9b29f18e127bce2af394d8c
| 251,899
|
def solution(A): # O(N)
"""
Identify whether four sides (given by four integers) can form a square,
a rectangle, or neither.
Input: You will receive an array of strings, each containing four
space-separated integers, which represent the length of the sides
of a polygon. The input lines will follow the 'A B C D' order as in the
following representation:
|-----A-----|
| |
| |
D B
| |
| |
|-----C-----|
Output: Whether the sides will form a "square", a "rectange" or some "other polygon"
Constraints: The four integers representing the sides will be
such that: -2000 <=X <= 2000 (Where X represents the integer)
>>> solution('36 30 36 30')
'rectangle'
>>> solution('15 15 15 15')
'square'
>>> solution('46 96 90 100')
'other polygon'
>>> solution('86 86 86 86')
'square'
>>> solution('100 200 100 200')
'rectangle'
>>> solution('100 100 200 200')
'other polygon'
>>> solution('-100 200 -100 200')
'other polygon'
"""
array = [int(value) for value in A.split(' ')] # O(N)
side_A = array[0] # O(1)
side_B = array[1] # O(1)
unique_values = set(array) # O(N)
num_unique_sides = len(unique_values) # O(1)
if side_A < 0 or side_B < 0: # O(1)
return 'other polygon' # O(1)
if num_unique_sides == 1: # O(1)
return 'square' # O(1)
if num_unique_sides == 2: # O(1)
if side_A == side_B: # O(1)
return 'other polygon' # O(1)
return 'rectangle' # O(1)
return 'other polygon' # O(1)
|
be5f7c763d6eedb8e9740cdecbc89caa4d0773a2
| 348,689
|
import re
def filter_collection(collection, include=None, exclude=None):
"""Filter collection based on include and exclude regexps.
The regexp patterns are not implicitly anchored at the beginning of
strings (i.e. `search` is used, rather than `match`). E.g.
'abc' will match '_abc_' as well as 'abcdef'.
:return: set of matching collection elements
"""
matches = set()
if include:
include_pat = re.compile(include)
else:
include_pat = None
if exclude:
exclude_pat = re.compile(exclude)
else:
exclude_pat = None
for element in collection:
if include_pat and not re.search(include_pat, element):
continue # Do not include this database; failed to match include
if exclude_pat and re.search(exclude_pat, element):
continue # Do not include this database; matched exclude
matches.add(element)
return matches
|
a79102dcec463dd58e33b724e2bae347b2cff9c6
| 215,610
|
def count_digit(value):
"""Count the number of digits in the number passed into this function"""
digit_counter = 0
while value > 0:
digit_counter = digit_counter + 1
value = value // 10
return digit_counter
|
f9b1738804b0a40aa72283df96d2707bcfd7e74c
| 13,790
|
import re
def sanitize(phrase):
"""Sanitizes words by removing punctuation"""
return re.sub('[!-():?.]','',phrase)
|
79c20b88120144cc88fcd8934b9cc1277cb95e84
| 86,718
|
import math
def entropy(ps):
"""Calculates the entropy (log 2) of the distribution given by p
"""
entropy = 0.0
for p in ps:
if not(p == 0):
entropy -= p*math.log(p, 2)
return entropy
|
609e0e2f03579c8ce39f274116f69ca177eabccc
| 31,425
|
def H_acceptor_count(mol):
"""Hydrogen bond acceptor count """
mol.require("Valence")
return sum(1 for _, a in mol.atoms_iter() if a.H_acceptor)
|
bcbd446a1c3b729599f3d44c617f10a8ee6965b8
| 490,358
|
def check_x_lim(x_lim, max_x):
"""
Checks the specified x_limits are valid and sets default if None.
"""
if x_lim is None:
x_lim = (None, None)
if len(x_lim) != 2:
raise ValueError("The x_lim parameter must be a list of length 2, or None")
try:
if x_lim[0] is not None and x_lim[0] < 0:
raise ValueError("x_lim[0] cannot be negative")
if x_lim[1] is not None and x_lim[1] > max_x:
raise ValueError("x_lim[1] cannot be greater than the sequence length")
if x_lim[0] is not None and x_lim[1] is not None and x_lim[0] >= x_lim[1]:
raise ValueError("x_lim[0] must be less than x_lim[1]")
except TypeError:
raise TypeError("x_lim parameters must be numeric")
return x_lim
|
6dda696f4a001d021c8074675e0905f049e88e70
| 678,090
|
def _GetReleaseTracks(release_tracks):
"""Returns a string representation of release tracks.
Args:
release_tracks: API versions to generate release tracks for.
"""
release_tracks_normalized = '[{}]'.format(', '.join(
[track.upper() for track in sorted(release_tracks)]))
return release_tracks_normalized
|
2d4a66996844fe27144eb22d8eb7832d4ed9dd7f
| 672,719
|
def _map_function_arg_pairs(function_list, arg):
"""Method that maps a list of functions to a list of arguments
Args:
function_list: List of functions
arg: List of single arguments
Returns:
Map: map of of functions with injected arguments.
"""
return map(lambda func, arg: func(arg), function_list, arg)
|
681b91d8c721ed76e70feb1bae2e3a819f444f1a
| 212,004
|
def _find_in_list(key, a, start=None, force=False):
"""key - object you want to find
a - list to search
return - index in a
"""
if start is None:
sequence = range(0, len(a))
else:
sequence = range(start, len(a))
for i in sequence:
if a[i] == key:
return i
assert not force, '%s not found' % key
|
945996874c528e699f091dd47154b36ce5e68302
| 161,455
|
def token_response(oauth_test_client):
"""
Return a successful token response.
"""
oauth_test_client.authorize(data={"confirm": "yes"})
return oauth_test_client.token().response
|
150d2ae984c5b6074a2ad55df517ab075f50d844
| 292,776
|
def url_add_trailing_slash(uri: str) -> str:
"""Add trailing slash to url if this is valid
Parameters
----------
uri : str
Web address to update
Returns
-------
str
uri or updated uri
"""
if len(uri) == 0 or uri[-1] == "/":
return uri
split_uri = str.split(uri, "/")
if len(split_uri) == 0:
return f"{uri}/"
if len(split_uri[-1]) > 1 and "?" not in split_uri[-1]:
return f"{uri}/"
return uri
|
cf77459af356e6d49dc073cb0865f496dcf4d386
| 608,235
|
def add_prefix ( table , prefix = '' ) :
"""Add certain prefix to each line of the table
>>> table = ...
>>> table = add_prefix ( table , '# ')
"""
return prefix + table.replace ( '\n' , '\n' + prefix ) if prefix else table
|
a0f45c9a4daab0bee758fefd9aaf143ff5ec8f9d
| 226,239
|
def next_card(prize_card, hand, max_card):
""" Next-card strategy just picks the next card in the hand (effectively random). """
return hand.cards[0]
|
509f61b70c38b978453b8646b863f18871523e60
| 86,025
|
def cross_spectrum(spectrumA, spectrumB):
"""
Returns a cross-spectrum, ie. spectrum of cross-correlation of two signals.
This result does not depend on the order of the arguments.
Since we already have the spectra of signals A and B and and want the
spectrum of their cross-correlation, we can replace convolution in time
domain with multiplication in frequency domain.
"""
return spectrumA * spectrumB.conj()
|
5d604270c3a7b03994cd8c24b06c00c779224ca3
| 245,346
|
import random
def train_test_split_ind(n_sample, test_size, method='random'):
"""
Return indices splitting n_samples into train and test index lists.
:param n_sample: int: number of samples
:param test_size: int: number of samples in test set
:param method: str: 'first' ('last') to take first (last) t samples as test, or 'random'
:return: (list, list): list of train indices, list of test indices
"""
if method == 'first':
test_set = list(range(0, test_size))
train_set = list(range(test_size, n_sample))
elif method == 'last':
test_set = list(range(n_sample - test_size, n_sample))
train_set = list(range(0, n_sample - test_size))
elif method == 'random':
train_set = list(range(n_sample))
test_set = []
for j in range(test_size):
i = random.choice(train_set)
test_set.append(i)
train_set.remove(i)
test_set.sort()
else:
raise ValueError("'method' must be 'first', 'last', or 'random'")
return train_set, test_set
|
e639ea73be840a3a8754666ee4762779c142d493
| 527,933
|
def filter_by_states(providers, states):
"""Filter providers by list of states to reduce the quantity of data we're processing."""
return [provider for provider in providers if provider['state'] in states]
|
cbbac9800de598b29cd8c74cb5ed2d0cb8d4cbee
| 377,930
|
def humanize_join(sequence, separator=',', spacing=' ',
conjunction='or', oxford_comma=True):
"""
Join a list together using the English syntax of inserting a
conjunction before the last item.
Keyword Arguments:
sequence -- The sequence to join together. Convert iterables to sequences
using list() if necessary.
separator -- The string to separate each item with.
spacing -- A string appended to the separator to space out sequence
members (such as a space in sentences).
conjunction -- A string to insert between the last separator and the
final item in the sequence.
oxford_comma -- Boolean value that indicates whether or not to include
the separator before the conjunction inserted before the last item
in the sequence. Since there is no consensus on whether or not to use
the Oxford comma, it is provided as an option.
"""
lst = list(sequence)
full_sep = separator + spacing
string = full_sep.join(lst)
if len(lst) > 1:
last = lst[-1]
if not oxford_comma or len(lst) == 2:
start = string[:-len(last)-len(full_sep)] # Remove the last comma
else:
start = string[:-len(last) - len(spacing)]
return ''.join([start, spacing, conjunction, spacing, string[-len(last):]])
else:
return string
|
29d6812d6bb4e914f6e5fc0797235ae0a06d62e9
| 601,825
|
def cat_lump(x, n=5, prop=None, other_level="Other"):
"""
Lump together least common categories into an "Other" category
Parameters
----------
x : pd.Series
series to be modified
n : int
number of levels to preserve
prop : float
optional instead of n. Choose the minimum proportion for a level.
Must be between 0 and 1. Overrides n.
other_level : str
"other" category label
Returns
-------
y : pd.Series
modified series (with categorical type)
"""
counts = x.value_counts()
if prop:
assert 0 <= prop <= 1
min_count = int(prop * x.size)
if min_count > counts.min():
repl = counts.loc[counts < min_count].index
x = x.replace(repl, other_level)
elif len(counts) > n:
repl = counts.iloc[n:].index
x = x.replace(repl, other_level)
return x
|
04ca7d0ff64e98c737bac8e95620f27696ecc0c0
| 482,820
|
def roi_is_full(roi, shape):
"""
Check if ROI covers the entire region.
:returns: ``True`` if ``roi`` covers region from ``(0,..) -> shape``
:returns: ``False`` if ``roi`` actually crops an image
"""
def slice_full(s, n):
return s.start in (0, None) and s.stop in (n, None)
if isinstance(roi, slice):
roi = (roi,)
shape = (shape,)
return all(slice_full(s, n) for s, n in zip(roi, shape))
|
1823f166262fc8978492a943db3583c901aae4fe
| 540,930
|
def replace_input_if_output_is_temp(df, input_column, output_column, output_is_temp):
"""Replaces the input column in the dataframe if the output was not set
This is used with get_temp_col_if_not_set to enable the behavior where a
transformer will replace its input column if an output is not specified.
"""
if output_is_temp:
df = df.withColumn(input_column, df[output_column])
df = df.drop(output_column)
return df
else:
return df
|
1216659377a10f3163838c31ee9c55167a6e2363
| 553,552
|
def get_elk_command(line):
"""Return the 2 character command in the message."""
if len(line) < 4:
return ""
return line[2:4]
|
550eda4e04f57ae740bfd294f9ec3b243e17d279
| 674
|
def _get_latest_raw_pdf(cls):
"""Given an ETL class, return the latest PDF in the data directory."""
# Get PDF paths for the raw data files
dirname = cls.get_data_directory("raw")
pdf_files = dirname.glob("*.pdf")
# Get the latest
latest = sorted(pdf_files)[-1]
year, month = map(int, latest.stem.split("_"))
return year, month
|
182ddbf9a9da3101a21ebc20fbfc71e8428d5502
| 181,027
|
import functools
import random
import time
def random_delay(min_delay: float, max_delay: float):
"""
Decorator to add a random delay before executing the function.
:param min_delay: Min delay in seconds.
:param max_delay: Max delay in seconds.
:return: Decorator.
"""
assert min_delay >= 0
assert min_delay <= max_delay
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
wait = random.uniform(min_delay, max_delay)
time.sleep(wait)
return f(*args, **kwargs)
return wrapper
return decorator
|
59753137917612fbffe59f30c08fc2e087eb5d34
| 356,620
|
def check_member(self, user):
"""
Check whether a user is in the circle
:param (Circle) self: The checked circle
:param (User) user: checked user
:return (bool): whether the user is in the circle
"""
return len(list(filter(lambda member: member.id == user.id, self.members))) != 0
|
68eed99c87793f41d87911eb0c4172ea01a5be07
| 77,992
|
def format_email_address(email: str) -> str:
"""
Returns a formatted email address.
"""
return email.lower()
|
2d60b689ed47d52001bc0afa74004dd686385004
| 359,316
|
def is_prime(n):
"""Returns True is n is prime, False if not"""
for i in range(2,n-1):
if n%i == 0:
return False
return True
|
81bd12b33d52283bc054b588f2133448047330a6
| 256,329
|
from typing import Counter
def count_ngram(
n: int,
target: str) -> Counter:
"""Count n-gram.
Args:
n (int): n-gram's n.
target: (str): target sentence.
Return:
Counter: python counter object.
"""
tokens = target.split(' ')
N = len(tokens)
counter = Counter(map(lambda x: ' '.join(x), zip(*[tokens[i:N-n+i+1] for i in range(n)])))
return counter
|
e969dbbceb1c025eb26dd398d3a64ee5aa401a91
| 539,531
|
def get_most_probable_words(wordlist, characters, pattern):
"""Get a list of most probable words
>>> word_list = ["trail", "banana", "frail", "drain"]
>>> get_most_probable_words(word_list, "filyar", "+rail")
['frail']
:param wordlist: wordlist to use
:param characters: scrambled characters
:param pattern: pattern to match against
:returns: list of probable words
"""
characters = characters.lower()
pattern = pattern.lower()
results = []
len_of_pattern = len(pattern)
count_of_characters = {i: characters.count(i) for i in characters}
non_empty_indices_in_pattern = {i: char for i, char in enumerate(pattern) if char != "+"}
for word in wordlist:
if len(word) != len_of_pattern:
continue
for char in word:
if char not in characters or word.count(char) > count_of_characters[char]:
break
else:
for idx, char in non_empty_indices_in_pattern.items():
if word[idx] != char:
break
else:
results.append(word)
return results
|
837894f112b13046238b79e92a6d73b32815a18e
| 335,792
|
import torch
def rand_uniform(a, b, shape=1):
""" sample numbers uniformly between a and b.
args:
a - lower bound
b - upper bound
shape - shape of the output tensor
returns:
torch.Tensor - tensor of shape=shape
"""
return (b - a) * torch.rand(shape) + a
|
1d20523689b1be985b6918630193710694f76a3b
| 172,671
|
from typing import Iterable
from typing import Any
from typing import List
import itertools
def flatten(val: Iterable[Iterable[Any]]) -> List[Any]:
"""
Flatens a list of list into a list
>>> flatten( [['abc','def'],[12,34,46],[3.14, 2.22]])
['abc', 'def', 12, 34, 46, 3.14, 2.22]
"""
return list(itertools.chain(*val))
|
cd9ae9e393569ba7800735d09c8621f0d64beed3
| 700,535
|
def ChiSqStat(ydata, yerr, ymodel):
"""
Returns the chi-square given arrays of ydata, yerr, and ymodel values.
"""
chisquared = ((ydata - ymodel)/yerr)**2
stat = chisquared.sum()
return stat
|
b601baa57cb524188f8a8110d7853b14455158dd
| 226,407
|
def flattenDict(d):
"""Reduces the depth of a dictionary to 1, parent keys are ignored.
>>> d = {'a': 1, 'c': {'e': 5, 'd': 4}, 'b': 2, 'f': {'g': {'h': 8}}}
>>> flattenDict(d)
{'a': 1, 'h': 8, 'b': 2, 'e': 5, 'd': 4}
"""
result = {}
for k, v in d.iteritems():
if isinstance(v, dict):
result.update(flattenDict(v))
else:
result.update({k:v})
return result
|
49369e32cb0b76f6783384905fc3d80e3460cac2
| 74,636
|
import requests
def get_drive_time(apiKey, origin, destination):
"""
Returns the driving time between using the Google Maps Distance Matrix API.
API: https://developers.google.com/maps/documentation/distance-matrix/start
# INPUT -------------------------------------------------------------------
apiKey [str]
origin [str]
destination [str]
# RETURN ------------------------------------------------------------------
drive_tim [float] (minutes)
"""
url = ('https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins={}&destinations={}&key={}'
.format(origin.replace(' ','+'),
destination.replace(' ','+'),
apiKey
)
)
try:
response = requests.get(url)
resp_json_payload = response.json()
drive_time = resp_json_payload['rows'][0]['elements'][0]['duration']['value']/60
except:
print('ERROR: {}, {}'.format(origin, destination))
drive_time = 0
return drive_time
|
ed612022cdade4d98efc15fd0c42711bdc95a2e2
| 621,205
|
def matrix_to_str(mat):
"""Convert a 4x4 column-major order matrix into byte string."""
barr = [mat[i%4][i//4] for i in range(16)]
return bytes(barr)
|
516eb35a999a513962d836469dab220be9bedb04
| 137,912
|
def gcd(*numbers):
"""Euclid's algorithm for GCD. Arguments are cast to integer."""
def gcd2(a,b):
a=abs(int(a))
b=abs(int(b))
a,b = max(a,b), min(a,b)
while b>0:
a,b=b,a%b
return a
if len(numbers)==0:
return 1
res=numbers[0]
for n in numbers[1:]:
res=gcd2(n,res)
return res
|
7750920c023165ae32f300c7ecbd7cbeb5f28fb1
| 554,629
|
def id_from_name(detector, name):
""" Jiffy function to get the id of a panel using its name
@param detector detector object
@param name panel name
@return index of panel in detector
"""
return [p.get_name() for p in detector].index(name)
|
941191fb65ba2f5c423bf37192c6f800562f6a1f
| 143,414
|
def ReversedDomainComponents(s):
"""Returns a list of domain components in reverse order.
Args:
s: (str) A string of the form "a.b.c"
Returns:
list(s) E.g. ['c', 'b', 'a']
"""
if not s:
return []
parts = s.split('.')
parts.reverse()
return parts
|
75e2fa484e789ec13bff6ac7994e1fe63744707f
| 354,958
|
def option6(current_person):
"""The number of children and grandchildren of a person can be printed
using this function.
Parameters:
current_person:
The current Person object.
Returns:
The original Person object for whom the function was called.
"""
print(f"Number of children: {len(current_person.get_children())}")
grandchild = 0
temp = current_person
for child in current_person.get_children():
current_person = child
grandchild += len(current_person.get_children())
print(f"Number of grandchildren: {grandchild}")
print()
return temp
|
07f64a1be137551e6bc7c18a06ca04848b451b99
| 150,121
|
def get_monitors(mon_name):
"""
Has all the monitors with their specs and returns the one with the specified name.
Parameters
----------
mon_name : str
name of the requested monitor
Returns
-------
dict
"""
test_monitors = {
"lab": {
"size_cm": (54, 0),
"size_px": (1024, 768),
"dist": 100,
"refresh_rate": 60
},
"RaZerBlade": {
"size_cm": (38, 20),
"size_px": (2560, 1440),
"dist": 60,
"refresh_rate": 165
},
"Ryan": {
"size_cm": (0, 0),
"size_px": (0, 0),
"dist": 60,
"refresh_rate": 60
},
"Asus": {
"size_cm": (54, 30),
"size_px": (1920, 1080),
"dist": 57,
"refresh_rate": 240
}
}
return test_monitors[mon_name]
|
cec927ae0d76c5352e461a27acc0b67d61b2b6fa
| 329,210
|
def get_html_header() -> str:
"""Helper to get a HTML header with some CSS styles for tables.""
Returns:
A HTML header as string.
"""
return """<head>
<style>
table {
font-family: arial, sans-serif;
border-collapse: collapse;
width: 768px
}
td, th {
border: 1px solid #999999;
text-align: left;
padding: 8px;
}
tr:nth-child(even) {
background-color: #dddddd;
}
</style>
</head>"""
|
fc2302ff199ef944169a4096a53f6bd48fdb06fd
| 675,606
|
import json
def load_json(file_path):
"""
Loads json from a file
:param file_path: the path to the file
:return: dictionary
"""
try:
with open(file_path, "r+") as file:
return json.loads(file.read())
except (OSError, IOError):
with open(file_path, "r+") as file:
file.truncate(0)
file.seek(0)
json.dump("{}", file, indent=4)
return {}
|
7203397948b2c227d697169f6bcd663f5f818674
| 107,001
|
def evaluate(h,x):
"""evaluate an instance x with an hypothesis h, that is
a function X->{Yes,No}"""
for i,feature in enumerate(h):
if feature=="0":
return "No"
if feature!="?" and feature!=x[i]:
return "No"
return "Yes"
|
e5fb5235653b2dc096318be11ff5cbf9c096daeb
| 586,705
|
def has_resected_pair(unresected_idx, resected_imgs, img_adjacency):
"""Return true if unresected_idx image has matches to >= 1 currently resected image(s) """
for idx in resected_imgs:
if img_adjacency[unresected_idx][idx] == 1 or img_adjacency[idx][unresected_idx] == 1:
return True
return False
|
e4e205b2da5842e6a8c787cccc270d80ef5c91c4
| 264,990
|
def inhg_to_mb(val):
"""
Converts inches of mercury to millibars; accepts numeric or string
"""
try:
return float(val) * 33.8639
except (TypeError, ValueError):
return val * 33.8639
|
d1f4b7187697ab84e0b2e4ff0ccaec24693cc4c4
| 606,709
|
def _populate_wf_data(wf_def, wf_data):
"""
Populate the workflow data in the workflow definition
Args:
wf_def (dict): workflow definition
wf_data (dict): workflow data to be populated into workflow definition
Returns:
dict: workflow definition with the workflow data populated
"""
if not wf_data["arguments"]:
return wf_def
if not wf_def["spec"]["arguments"]:
wf_def["spec"]["arguments"] = {}
if "parameters" in wf_data["arguments"]:
wf_def["spec"]["arguments"]["parameters"] = wf_data["arguments"][
"parameters"]
if "artifacts" in wf_data["arguments"]:
wf_def["spec"]["arguments"]["artifacts"] = wf_data["arguments"][
"artifacts"]
return wf_def
|
a9b08117114005fa6a946e1307adbb4fd2fa8787
| 368,536
|
def search(value, node):
"""
Search for element in the linked list
:param value: value to look for
:param node: value of head node, start of list
:return: bool: weather or not element is in the list
"""
if node is not None: # iterate through while valid nodes
if node.value == value: # hey its in there
return True
return search(value, node.next_node) # keep looking
return False
|
71e842d31310364cf40c1ece73b46b252cc7514b
| 597,448
|
def connect(endpoint=None):
"""Generate connect packet.
`endpoint`
Optional endpoint name
"""
return u'1::%s' % (
endpoint or ''
)
|
bce21b5f7796ec26e5e238e68427decf2e34d46d
| 688,018
|
def _mk_key(srev_info):
"""Returns the key for a SignedRevInfo object."""
return (srev_info.rev_info().isd_as(), srev_info.rev_info().p.ifID)
|
780fb59859b514e0f6d4699cb1e6fef1d6e14042
| 40,511
|
def is_free(board: list, pos: int) -> bool:
"""checks if pos is free or filled"""
return board[pos] == " "
|
64b75aa5d5b22887495e631e235632e080646422
| 2,933
|
def process_spatial(geo):
"""Process time range so it can be added to the dates metadata
Parameters
----------
geo : list
[minLon, maxLon, minLat, maxLat]
Returns
-------
polygon : dict(list(list))
Dictionary following GeoJSON polygon format
"""
polygon = { "type": "Polygon",
"coordinates": [
[ [geo[0], geo[2]], [geo[0], geo[3]], [geo[1], geo[3]], [geo[1], geo[2]] ]
]}
return polygon
|
4ff7dea7d7dcefd5b6bfb1605dd50345fc379527
| 697,938
|
from typing import OrderedDict
def flatten_json(dictionary):
"""Recursively flattens a nested json.
:param dictionary: dict object to flatten
:return: dict object containing flat json.
"""
out = {}
def flatten(element, name=''):
if type(element) is dict:
for a in element:
flatten(element[a], name + a + '.')
elif type(element) is list:
i = 0
for a in element:
flatten(a, name + str(i) + '.')
i += 1
else:
out[name[:-1]] = element
flatten(dictionary)
out_ordered = OrderedDict()
for key, value in sorted(out.items()):
out_ordered[key] = value
return out_ordered
|
5552e6b10dd7a547d2082336f2045dfd9b3e58e4
| 88,309
|
def ioc_restart_pending(ioc_pv, channel_access):
"""Check if a particular IOC is restarting. Assumes it has suitable restart PV
Args:
ioc_pv: The base PV for the IOC with instrument PV prefix
channel_access (ChannelAccess): The channel access object to be used for accessing PVs
Return
bool: True if restarting, else False
"""
return channel_access.caget(ioc_pv + ":RESTART", as_string=True) == "Busy"
|
da7e7c08e2b84c89e502fd060974b4ab9ae31ad9
| 390,029
|
def dictincr(dictionary, element):
"""
Increments `element` in `dictionary`,
setting it to one if it doesn't exist.
"""
dictionary.setdefault(element, 0)
dictionary[element] += 1
return dictionary[element]
|
51e7944e04240dc260daa6ccd4ad240b92fd3405
| 70,577
|
def mc_eta_to_m12(mc, eta):
"""
Convert chirp mass and symmetric mass ratio to component masses.
Input: mc - chirp mass
eta - symmetric mass ratio
Return: m1, m2 - primary and secondary masses, m1>m2
"""
m1 = mc / eta ** 0.6 * (1 + (1 - 4 * eta) ** 0.5) / 2
m2 = mc / eta ** 0.6 * (1 - (1 - 4 * eta) ** 0.5) / 2
return m1, m2
|
8f76491ddbaae7c3d2f9118805ccdf127287870a
| 328,783
|
def read_first_line(path: str) -> str:
"""Read the first line from the given file."""
with open(path, encoding='utf8') as f:
return f.readline()
|
9e809953e620517cddf20c5510498f47ac7d2329
| 533,017
|
import logging
import json
def extract_initial_data(html):
"""Extract and parse the JSON string from the HTML of a playlist webpage.
Parameters
----------
html : str
HTML to extract the string from.
Returns
-------
Dict
Parsed JSON data.
"""
logging.info("Extracting JSON string from playlist HTML data")
for line in html.split("\n"):
if line.strip().startswith('window["ytInitialData"]'):
return json.loads(line.strip()[26:-1])
return None
|
52f66db3e341cd5f400b6e649d4ed37fed2674ff
| 55,577
|
def sign (x):
"""Return `-1` if `x < 0`, `0` if `x == 0` and `1` if `x > 0`."""
return 0 if x == 0 else (1 if x > 0 else -1)
|
abe82d47569d5d425b8f00aae9d13cc4837fda80
| 290,923
|
def list_strip_eos(list_, eos_token):
"""Strips EOS token from a list of lists of tokens.
"""
list_strip = []
for elem in list_:
if eos_token in elem:
elem = elem[:elem.index(eos_token)]
list_strip.append(elem)
return list_strip
|
ea6b6b2f5814150e863913f77d5b65f0fdce5966
| 515,617
|
def diff_sec(time1, time2):
"""Calculate the difference between two times in seconds."""
# return (as_utc(time1) - as_utc(time2)).total_seconds()
return (time1 - time2).total_seconds()
|
f964eb8a6cbdccca32668ccc67b8d4d42471dc41
| 70,098
|
def order_by_length(*items):
"""Orders items by length, breaking ties alphabetically."""
sorted_items = sorted(items, key=lambda item: (len(str(item)), str(item)))
return ' '.join(sorted_items)
|
dc0b8406c531d96e8192de5702c702fd4e533f5a
| 633,935
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.