content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
import re
def parse_rst_params(doc):
"""
Parse a reStructuredText docstring and return a dictionary
with parameter names and descriptions.
>>> doc = '''
... :param foo: foo parameter
... foo parameter
...
... :param bar: bar parameter
... :param baz: baz parameter
... baz parameter
... baz parameter
... Some text.
... '''
>>> params = parse_rst_params(doc)
>>> params['foo']
'foo parameter foo parameter'
>>> params['bar']
'bar parameter'
>>> params['baz']
'baz parameter baz parameter baz parameter'
"""
param_re = re.compile(r"""^([ \t]*):param\
(?P<param>\w+):\
(?P<body>.*\n(\1[ \t]+\w.*\n)*)""",
re.MULTILINE|re.VERBOSE)
params = {}
for match in param_re.finditer(doc):
parts = match.groupdict()
body_lines = parts['body'].strip().split('\n')
params[parts['param']] = ' '.join(s.strip() for s in body_lines)
return params
|
be35bdff9ea9aca182b44d3423c4987696e8c2c4
| 111,523
|
def get_batch(data, current_batch_index, batch_size, fixed_batch_size=True, continuous_batches=False):
"""
This function returns a batch of data with the correct size and taken from the correct place.
Input:
data: list, holds any type of data that we need to get a batch from;
current_batch_index: int, which batch are we taking from this data;
batch_size: int, how big a batch should be;
fixed_batch_size: bool, does the batch have to be in a fixed size, if False, then the batch may be in a size
that is in the range [batch_size, 2 * batch_size)
continuous_batches: bool, do the batches have to be taken in a continuous manner. For example:
data = [1,2,3,4,5,6,7,8,9]
batch_size = 3
Then with continuous_batches off we get 3 batches - [1, 2, 3], [4, 5, 6], [7, 8, 9]
Then with continuous_batches on we get 3 batches - [1, 4, 7], [2, 5, 8], [3, 6, 9].
Output:
batch: list, a batch taken from the data.
"""
# Get the number of batches
number_of_batches = len(data) // batch_size
if continuous_batches: # If the batches are continuous we need to take it with a for loop
batch = []
if fixed_batch_size:
for j in range(current_batch_index, number_of_batches * batch_size, number_of_batches):
batch.append(data[j])
else: # This might return a batch of data that is larger than batch_size
for j in range(current_batch_index, len(data), number_of_batches):
batch.append(data[j])
else: # If batches don't have to be continuous
# If this isn't the last batch, or if it is the last batch, but the batch size is fixed
if fixed_batch_size or current_batch_index != number_of_batches - 1:
batch = data[current_batch_index * batch_size: current_batch_index * batch_size + batch_size]
else:
# Take the remaining data
batch = data[current_batch_index * batch_size:]
return batch
|
c48e42c92497738beed8a4737d6e638c831ced5d
| 579,620
|
def readAsInteger(fs, n):
"""
Reads n bytes and returns them as an integer.
"""
b = fs.read(n)
if len(b) != n:
raise Exception('Can not read {} bytes only {}'.format(n, len(b)))
try:
r = int(b)
except ValueError:
raise Exception('Can not determine integer RP66 version number from {}'.format(b))
return r
|
60923c55bc5da7695a81384c27504d1d648485e6
| 315,947
|
def _fwd6(y, dt):
"""Compute the first derivative of a uniformly-spaced-in-time array with a
sixth-order forward difference scheme.
Parameters
----------
y : (7,...) ndarray
Data to differentiate. The derivative is taken along the first axis.
dt : float
Time step (the uniform spacing).
Returns
-------
dy0 : float or (...) ndarray
Approximate derivative of y at the first entry, i.e., dy[0] / dt.
"""
return (- 147*y[0] + 360*y[1] - 450*y[2]
+ 400*y[3] - 225*y[4] + 72*y[5] - 10*y[6]) / (60*dt)
|
25f7c0902966de16a1eb09cecc2fc2870982ad97
| 591,839
|
import re
def function_parse(s):
"""
example:
f_name, f_args = function_parse("clean(input_file=xxxx, output_file=yyyy)")
# f_name, f_args -- clean', {'input_file': 'xxxx', 'output_file': 'yyyy'}
"""
f_name = ""
f_args = {}
pattern = r"(\w[\w\d_]*)\((.*)\)$"
match = re.match(pattern, s)
if match:
f_name = match.group(1)
args = [e.strip() for e in match.group(2).split(",")]
for e in args:
kv = e.split("=")
if len(kv) == 2:
f_args[kv[0].strip()] = kv[1].strip()
else:
f_args[kv[0].strip()] = ""
return f_name, f_args
|
2145e179970cd49ca58669e7ee5260b66d41c44d
| 379,795
|
def calcMetrics(tp, n_std, n_test):
"""Calculate precision, recall and f1"""
# default precision and recall are set to 1
# because technically an empty test corresponding to an empty standard
# should be the correct answer
precision = (tp / float(n_test)) if n_test > 0 else 1
recall = (tp / float(n_std)) if n_std > 0 else 1
f1 = (2 * precision * recall / (precision + recall)) if (precision + recall) > 0 else 0.0
return (precision, recall, f1, tp, n_std, n_test)
|
548aebd314a12d49f1617372fb4f70b2412f61f8
| 60,228
|
def _define_names(d_t, d_y, treatment_names, output_names, feature_names, input_names, featurizer):
"""
Helper function to get treatment and output names
Parameters
----------
d_t: tuple of int
Tuple of number of treatment (exclude control in discrete treatment scenario).
d_y: tuple of int
Tuple of number of outcome.
treatment_names: None or list
The name of treatment. In discrete treatment scenario, the name should not include the name of
the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller)
output_names: None or list
The name of the outcome.
feature_names: None or list
The user provided names of the features
input_names: dicitionary
The names of the features, outputs and treatments parsed from the fit input at fit time.
featurizer: optional None or instance of featurizer
Fitted Featurizer of feature X.
Returns
-------
d_t: int
d_y: int
treament_names: List
output_names: List
feature_names: List
transformed_feature_names: List or None
"""
d_t = d_t[0] if d_t else 1
d_y = d_y[0] if d_y else 1
if treatment_names is None:
treatment_names = input_names['treatment_names']
if output_names is None:
output_names = input_names['output_names']
if feature_names is None:
feature_names = input_names['feature_names']
if featurizer is None:
transformed_feature_names = feature_names
elif featurizer is not None and hasattr(featurizer, 'get_feature_names'):
transformed_feature_names = featurizer.get_feature_names(feature_names)
else:
transformed_feature_names = None
return (d_t, d_y, treatment_names, output_names, feature_names, transformed_feature_names)
|
0a534755a659bab3eda0bf5ecb909e0dec52ca3f
| 309,273
|
def _bw_str_ ( bw ) :
"""Self-printout for Breit-Wigner function"""
return "BreitWigner (%s,%s)" % ( bw.m0() , bw.channel () )
|
fd28f9f28e416d13b36591587811e6fd82cb96d9
| 308,236
|
def transform(word, mapping):
"""change the characters in word to the letters in given dictionary mapping"""
xform = "".join(sorted(word.translate(word.maketrans(mapping))))
return xform
|
0f2d31f96a2172c7727ac6f08b12b8c7de415cc0
| 193,580
|
from pathlib import Path
def firstexisting(filelist, path=Path('.')):
"""Return the first existing file in file list."""
fullpaths = [Path(path) / filename for filename in filelist]
for fullpath in fullpaths:
if fullpath.exists():
return fullpath
raise FileNotFoundError(f'None of these files exist: {", ".join([str(x) for x in fullpaths])}')
|
617c4a95019a52ea3d4e5ca169982234dc5318ed
| 608,946
|
def k(candles, window):
"""Gets the most recent candles.
Args:
candles (list of dict): Candle data
window (int): number of candles to extract
Returns:
list of dict: Recent candle data
"""
return candles[-window:]
|
63dbfb1e6df12afe536a8d0c364158988addae6c
| 180,461
|
def make_filter_kwargs(request):
"""
Make a set of filter kwargs to hide objects inappropriate for
current user (e. g. private entries mustn't be seen by guests).
"""
if not (request.user.is_authenticated() and \
request.user.has_perm('blog.can_see_private')):
return {'private': 0}
else:
return {}
|
d5cf2ec9102542168ebced4b0cf97b5ebd9e2b07
| 61,670
|
def sanitize_parameter_overrides(parameter_overrides):
"""
Get sanitized parameter override values based on if the workflow went via a guided deploy to set the
parameter overrides for deployment. If a guided deploy was followed the parameter overrides consists
of additional information such as if a given parameter's value is hidden or not.
:param parameter_overrides: dictionary of parameter key values.
:return:
"""
return {key: value.get("Value") if isinstance(value, dict) else value for key, value in parameter_overrides.items()}
|
c7b0e4e9acb31f4680b8c6946e4c6fd0c1083a25
| 246,463
|
def _get_ips(ips_as_string):
"""Returns viable v4 and v6 IPs from a space separated string."""
ips = ips_as_string.split(" ")[1:] # skip the header
ips_v4, ips_v6 = [], []
# There is no guarantee if all the IPs are valid and sorted by type.
for ip in ips:
if not ip:
continue
if "." in ip and ":" not in ip:
ips_v4.append(ip)
else:
ips_v6.append(ip)
return ips_v4, ips_v6
|
7f1c88f3006faab44680af39c9e87661a35e4462
| 685,220
|
def encode_module_value(v):
"""
For all things not in builtins, return the module name, otherwise just return the name
"""
mod = v.__module__
v = getattr(v, "__qualname__", v.__name__)
if mod == "builtins":
return v
return {"module": mod, "name": v}
|
497b8838f8458ff973bd9d3a30b839b328d0ab11
| 16,919
|
def getrinputs(rtyper, graph):
"""Return the list of reprs of the input arguments to the 'graph'."""
return [rtyper.bindingrepr(v) for v in graph.getargs()]
|
bb0f8861a29cd41af59432f267f07ff67601460c
| 4,477
|
def to_lowercase(words):
"""Convert all given words to lowercase words.
Parameters
----------
words : string
Original word-token list
Returns
-------
new_words : list of strings
List with all remaining word-tokens
"""
new_words = []
for word in words:
new_words.append(word.lower())
return new_words
|
5ebb46304d17d427ae7b4ef125378ba66304817a
| 319,230
|
from typing import Any
def list_joined(separator_item: Any, items: list[Any]) -> list[Any]:
"""
Like ", ".join(blah), except for lists.
>>> list_joined("HI", ["yo", "cats"])
['yo', 'HI', 'cats']
"""
return [
part
for early_item in items[:-1]
for part in (early_item, separator_item) # Weird syntax to flatten tuples.
] + items[-1:]
|
280a8496360fbc870675d7c311600665541c5a47
| 326,481
|
import collections
def read_gathered_las(path):
"""Return dict of block->[las_paths].
For now, these are ws separated on each line of input.
"""
result = collections.defaultdict(list)
with open(path) as ifs:
for line in ifs:
block, las_path = line.split()
result[int(block)].append(las_path)
# LOG.warning('path={!r}, result={}'.format(
# path, pprint.pformat(result)))
return result
|
f9d32da9d1dfcf3e569c5477e6c369e65a8a6074
| 146,564
|
def frmt_time(seconds: float, short: bool = False, width: int = 0) -> str:
"""Returns a formated string for a given time in seconds.
Parameters
----------
seconds : float
Time value to format
short : bool, optional
Flag if short representation should be used.
width : int, optional
Optional minimum length of the returned string.
Returns
-------
time_str: str
"""
string = "00:00"
# short time string
if short:
if seconds > 0:
mins, secs = divmod(seconds, 60)
if mins > 60:
hours, mins = divmod(mins, 60)
string = f"{hours:02.0f}:{mins:02.0f}h"
else:
string = f"{mins:02.0f}:{secs:02.0f}"
# Full time strings
else:
if seconds < 1e-3:
nanos = 1e6 * seconds
string = f"{nanos:.0f}\u03BCs"
elif seconds < 1:
millis = 1000 * seconds
string = f"{millis:.1f}ms"
elif seconds < 60:
string = f"{seconds:.1f}s"
else:
mins, seconds = divmod(seconds, 60)
if mins < 60:
string = f"{mins:.0f}:{seconds:04.1f}min"
else:
hours, mins = divmod(mins, 60)
string = f"{hours:.0f}:{mins:02.0f}:{seconds:02.0f}h"
if width > 0:
string = f"{string:>{width}}"
return string
|
e5354b5e0769d69c9b837c745e1cf6e424e2945a
| 286,549
|
import gzip
def read_wet_file(wet_file, max_lines=-1):
"""
Args:
wet_file (str): path to input WET file (gz format).
max_lines (int): maximum number of lines to read.
Returns: WET file in the form of a list.
"""
output = []
with gzip.open(wet_file, mode='rt', encoding='utf-8') as f:
for i, line in enumerate(f):
output.append(line.strip())
if i > max_lines and max_lines > 0:
break
return output
|
c915b242100889ff4e4741514cda45cf1c0ea28f
| 68,594
|
def delete_player(name, player_list):
"""Delete a player from list."""
new_list = []
for i in range(len(player_list)):
if name != player_list[i][0]:
new_list.append(player_list[i])
return new_list
|
dd1f4baffa75e1672727e8785727c00a1780412d
| 169,235
|
import six
def is_jid(jid):
"""
Returns True if the passed in value is a job id
"""
if not isinstance(jid, six.string_types):
return False
if len(jid) != 20 and (len(jid) <= 21 or jid[20] != "_"):
return False
try:
int(jid[:20])
return True
except ValueError:
return False
|
e4dfcf00c5a02205f03b1383066703ffdb517d54
| 676,570
|
import math
def sec(angle):
"""
Returns: The secant (1/cos) of the given angle
The domain of secant excludes any number of the form PI/2.0+k*PI,
where k is an integer. That is because those are the cases where
cos(angle) is 0.
Parameter angle: The secant angle
Precondition: angle is an int or float in the domain of secant
"""
# Raise a type error if not a number
if not type(angle) in [int, float]:
raise TypeError(repr(angle)+' is not a number')
val = math.cos(angle)
if abs(val) < 0.000001: # Close enough to 0
raise ValueError(repr(angle)+' is outside of the domain of secant')
return 1/val
|
8f28f0b493997a98b297a5c82b759b7f69480f11
| 444,554
|
import re
def filterTimes(lines):
"""Filter times from the output. This data is variable, so should not be
taken into account in the comparison."""
invalid = re.compile(r"((running|Program|start|end) (t|T)(ime)|(input)|demo|timesolver)")
return list(filter(lambda x: invalid.search(x) is None, lines))
|
2c66f4569e85f509958243b46afba274eb8a474d
| 560,939
|
def get_dim_last_value(array, dim):
"""Get the value of the last element in a dimension"""
indices = tuple(0 if i != dim else array.shape[dim] - 1 for i in range(len(array.shape)))
return array[indices]
|
ff934116c48e027f5feaf02da9b6006285f10134
| 491,472
|
def calc_overturning_stf(ds,grid,doFlip=True):
"""
Only for simple domains, compute meridional overturning streamfunction
Parameters
----------
ds : xarray Dataset from MITgcm output, via
e.g. xmitgcm.open_mdsdataset
must contain 'V' or 'VVELMASS' fields
grid : xgcm grid object defined via xgcm.Grid(ds)
doFlip : if true, compute by accumulating from bottom to top
Output
------
ov_stf : xarray DataArray containing 2D field with
overturning streamfunction in Sv above
"""
# Grab the right velocity field from dataset
if 'V' in ds.keys():
vstr = 'V'
elif 'VVELMASS' in ds.keys():
vstr = 'VVELMASS'
else:
raise TypeError('Could not find recognizable velocity field in input dataset')
# Compute volumetric transport
v_trsp = ds[vstr] * ds['dxG'] * ds['drF']
if vstr != 'VVELMASS':
print(f' *** Multiplying {vstr} by hFacS***')
v_trsp = v_trsp * ds['hFacS']
v_trsp = v_trsp.sum(dim=['XC'])
# flip dim, accumulate in vertical, flip back
if doFlip:
v_trsp = v_trsp.isel(Z=slice(None,None,-1))
ov_stf = grid.cumsum(v_trsp,'Z',boundary='fill')
if doFlip:
ov_stf = -ov_stf.isel(Zl=slice(None,None,-1))
# Convert m/s to Sv
ov_stf = ov_stf * 10**-6
return ov_stf
|
d7d25368268dc16c4603a88a3a11607772f04da4
| 700,559
|
def deepgetattr(obj, attr):
"""Recurses through an attribute chain to get the ultimate value."""
for key in attr.split('.'):
obj = getattr(obj, key)
return obj
|
49975e1db8a684484a6322a5e35814ef17ea2fc6
| 476,164
|
def as_number(as_number_val: str) -> int:
"""Convert AS Number to standardized asplain notation as an integer."""
as_number_str = str(as_number_val)
if "." in as_number_str:
big, little = as_number_str.split(".")
return (int(big) << 16) + int(little)
else:
return int(as_number_str)
|
083171f89b06259e849e49cf0b89b0459bb75ac9
| 616,632
|
def read_input() -> str:
"""
Reads input from CLI
:return: str of the read input
"""
print('search: ', end="")
return input()
|
d3525299ff6409d1933780d477d88b2cb17535f1
| 474,708
|
def translation_from_matrix(matrix):
""" Return the translation vector from the affine matrix """
ndim = matrix.shape[0] - 1
translation = matrix[:ndim, ndim]
return translation
|
3d8cb15bab73471ffc844ea929920df1c5b6cecc
| 485,523
|
import torch
def compute_accuracy(net, data_loader, device, print_result=True):
"""
Computes accuracy of a given network.
:param net: torch.nn.Module, network
:param data_loader: torch.utils.data.DataLoader
:param device: torch.device
:param print_result: bool, whether to print the found accuracy or not
:return: float, accuracy in percentage
"""
correct = 0
total = 0
net.eval()
with torch.no_grad():
for data in data_loader:
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images, labels)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
net.train()
if print_result:
print('Accuracy: %.1f %%' % (
100 * correct / total))
return 100 * correct / total
|
cabc9b7bc9cf4f17b5d162f457b7ce3dde45acc3
| 323,379
|
import sqlite3
def get_categories(table="iherb", db_name="databases/items.db"):
"""
Lightweight function to extract only the unique super-categories
:param table: Table to read from
:param db_name: The path to the SQLite file
:return:
"""
query = "SELECT DISTINCT category FROM {}".format(table)
con = sqlite3.connect(db_name)
cur = con.cursor()
items = list(set([item[0].split(",")[0] for item in cur.execute(query).fetchall()]))
con.close()
return items
|
b497093fc0d9402b273b02f3574a9340fb96ffce
| 499,830
|
def even(x):
"""
even :: Integral a => a -> Bool
Returns True if the integral value is even, and False otherwise.
"""
return x % 2 == 0
|
b298173d03658bbc5be7df5b343e363dcdd93647
| 668,450
|
def calculate_age(year1: int, year2: int) -> int:
"""Find the difference between years (age)."""
age = abs(year2 - year1)
return age
|
a4b346f09ad880894852e6554827713477073f78
| 108,061
|
def swift_variable_from_key(key, variable_name):
"""Generate a Swift static variable line for a string key and variable.
Args:
key: The string key for the variable.
variable_name: The name of the variable.
Returns:
A string containing generated Swift code for a string.
"""
return ("static public var " + variable_name + ": String { return \"" + key +
"\".localized }")
|
f4ee9201bc33cb1914ff8ed1af36e6cd16224d79
| 161,350
|
def bisect_left(func, val, low, high):
"""
Like bisect.bisect_left, but works on functions.
Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in a[i:] have e >= x. So if x already appears
in the list, a.insert(x) will insert just before the leftmost x already there. Optional args lo (default 0) and hi
(default len(a)) bound the slice of a to be searched.
>>> bisect_left([1,2,3,3,4].__getitem__, 3, 0, 4)
2
>>> bisect_left([1,2,3,3,4].__getitem__, 4, 0, 4)
4
>>> bisect_left([1,2,3,6,8].__getitem__, 4, 0, 4)
3
"""
a = low
b = high
while b > a:
guess = (a+b)//2
if val > func(guess):
a = guess+1
else:
b = guess
return a
|
de7b72585657c183176b4cd1c6b5301e0f837a01
| 695,873
|
def tokenLookup(instrument_df,symbol_list):
"""Looks up instrument token for a given script from instrument dump"""
token_list = []
for symbol in symbol_list:
token_list.append(int(instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]))
return token_list
|
f4f063bed53d11c685d204715dcdcb1522b3e1ae
| 196,405
|
import re
def isWindowsDriveLetterPath(filepath):
"""
Returns True if given filepath starts with a Windows drive letter
>>> isWindowsDriveLetterPath('C:\\boot.ini')
True
>>> isWindowsDriveLetterPath('/var/log/apache.log')
False
"""
return re.search(r"\A[\w]\:", filepath) is not None
|
bc1c3950a91cec17bdb054b82f7bc9d782bce855
| 496,028
|
def chunker(seq, size):
"""
Iterates a sequence in chunks of a given size.
>>> for group in chunker(['cat', 'dog', 'rabbit', 'duck', 'bird', 'cow', 'gnu', 'fish'], 3):
>>> print(group)
Produces:
['cat', 'dog', 'rabbit']
['duck', 'bird', 'cow']
['gnu', 'fish']
:param seq: The sequence to iterate.
:param size: The size of a single chunk.
:return: a single chunk
"""
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
|
7a8ee76cce7c0bfa7ec3e09e02a4fa2c7fa0afa5
| 449,999
|
def get_subcritical_coeffs(b, q0, hL, g):
"""2nd- & 0th-order term coefficients of subcritical Bernoulli relation.
Args:
-----
b: a scalar or 1 1D numpy.ndarray; the topo elevation at target locations.
q0: a scalar; the conservative quantity hu at x = 0 (the left) boundary.
hL: a scalar; the depth h at x = L (the right) boundary.
g: gravity.
Returns:
--------
C0: the coefficient of the constant term of the polynomial.
C2: the coefficient of the quadratic term of the polynomial.
"""
C0 = q0 * q0 / (2. * g)
C2 = b - C0 / (hL * hL) - hL
return C0, C2
|
d769a97e603f7b66a742e04a714d1b6ec1e79fc6
| 539,141
|
import math
def to_degree7(r):
"""
Convert radians to 'degrees' with a 0-7 scale.
:param r:
:return: direction in 7-value degrees
"""
return round(r * 4 / math.pi)
|
832a5bac58114f2b121cf0bd5dc92bc124ad3381
| 643,830
|
from typing import Counter
def build_vocab(annotations, threshold=1):
""" Create vocabulary for text captions.
Args:
annotations: Dictionary with data, including text captions.
threshold: Threshold on word occurrences in all captions.
Returns:
word_to_idx: Vocabulary as python dictionary.
"""
counter = Counter()
max_len = 0
for i, caption in enumerate(annotations['caption']):
words = caption.split(' ') # caption contrains only lower-case words
for w in words:
counter[w] +=1
if len(caption.split(" ")) > max_len:
max_len = len(caption.split(" "))
vocab = [word for word in counter if counter[word] >= threshold]
print('Filtered %d words to %d words with word count threshold %d.' % (len(counter), len(vocab), threshold))
word_to_idx = {u'<PAD>': 0, u'<START>': 1, u'<END>': 2}
idx = 3
for word in vocab:
word_to_idx[word] = idx
idx += 1
print("Max length of caption: %d" %max_len)
return word_to_idx
|
48063d9079befe33d9d53803ecf7a06ee61f3942
| 594,113
|
def print_color(text):
"""
Print text in yellow :)
:param text: String to be colored
:return: Colored text
"""
return '\033[0;33m' + text + '\033[0m'
|
467014b19b346c0151e9a97127b1ae6420d5cbef
| 511,367
|
import math
def manning_equation(hydraulic_radius, manning_coefficient, slope):
"""Manning formula estimating the average velocity of a liquid driven by gravity.
:param hydraulic_radius: hydraulic radius of pipe or channel [m]
:param manning_coefficient: Gauckler–Manning coefficient
:param slope: slope of the hydraulic grade line [-]
"""
return math.pow(hydraulic_radius, 2 / 3) * math.pow(slope, 0.5) / manning_coefficient
|
313db8e85c74d2f346b24617ea6228627f589e57
| 51,213
|
from typing import List
from pathlib import Path
import random
def randomly_choose_files(target_dir: str, num_files: int) -> List[Path]:
"""Return a list of randomly chosen files in the target_dir.
Args:
target_dir (str): Directory to search for files.
num_files (int): Number of files to return.
Returns:
List[Path]: List of randomly chosen files.
"""
target = Path(target_dir)
file_list = list(target.glob("*"))
return [
file_list[i]
for i in sorted(list(range(len(file_list))), key=lambda x: random.random())[
:num_files
]
]
|
e0c64836f465fba58b66cb0df3b5557263bdd919
| 478,013
|
def linspace(first, last, n):
""" returns a linear range from first to last with n elements"""
return [(last-first)*x/(n-1)+first for x in range(n)]
|
2f126d8bcf89ebcab89876798eb5d03c98221959
| 345,474
|
def zero(vals):
""" Return a value of 0
"""
return 0
|
f294eb88c5e0680fb8e5196c2259d288c9682ecd
| 400,393
|
def camelcase(text):
"""Convert text to camel case
Notes:
The algorithm sets the first letter of each word to uppercase.
Existing uppercase letters are left unchanged.
Words are split on whitespace.
Args:
text: string, text to convert
Returns:
string, converted text.
"""
if text is None:
return
if not text:
return text
words = [x[0].upper() + x[1:] for x in text.split() if x]
return ''.join(words)
|
e27ccce9c9fffe62e73a552270a9358f248b759a
| 78,197
|
def insert_query(db_conn, group_id, query, domain=None):
"""
Insert query and optional domain.
If the pair already exists, simply return the ID of the existing pair.
Args:
db_conn (psycopg2.extensions.connection): Connection to a database
group_id (int): A unique integer identifier for the group of results
query (str): A query string
domain (str): Optional domain (eg. "data.cityofchicago.org")
Returns:
The query ID for the newly inserted query (or the existing query if its already in the DB)
"""
with db_conn.cursor() as cur:
insert = "INSERT INTO arcs_query (query, domain) " \
"SELECT %s, %s WHERE NOT EXISTS " \
"(SELECT id FROM arcs_query WHERE query=%s AND domain=%s) " \
"RETURNING id"
cur.execute(insert, (query, domain, query, domain))
last_result = cur.fetchone()
if not last_result:
cond_select = "SELECT id FROM arcs_query WHERE query=%s AND domain=%s"
cur.execute(cond_select, (query, domain))
last_result = cur.fetchone()
query_id = last_result[0]
insert2 = "INSERT INTO arcs_query_group_join (query_id, group_id) " \
"VALUES (%s, %s)"
cur.execute(insert2, (query_id, group_id))
return query_id
|
4b231ba8c40ac576f8851a820840e63fc2a5e19f
| 290,234
|
def get_fill(card):
"""Returns the card's fill
Args:
card (webelement): a visible card
Returns:
str: card's fill
"""
fill = card.find_element_by_xpath(".//div/*[name()='svg']/*[name()='use'][1]").get_attribute("fill")
if (fill == "transparent"):
fill = "clear"
else:
mask = card.find_element_by_xpath(".//div/*[name()='svg']/*[name()='use'][1]").get_attribute("mask")
if (mask == "url(#mask-stripe)"):
fill = "half"
else:
fill = "solid"
return fill
|
fe02bcf071f6c5bf57fb949e5859a834b303f7e8
| 124,017
|
def product_exclude_itself(nums):
"""
Compute the product of given array exclude itself
:param nums: given array
:type nums: list[int]
:return: A long long array B and B[i] = A[0] * ... * A[i-1] * A[i+1] * ... * A[n-1]
:rtype: list[int]
"""
result = [1] * len(nums)
# solve the left part (A[0] * ... * A[i-1]) first
for i in range(1, len(nums)):
result[i] = result[i - 1] * nums[i - 1]
# solve the right part (A[i+1] * ... * A[n-1])
temp = 1
for i in range(len(nums) - 1, -1, -1):
result[i] *= temp
temp *= nums[i]
return result
|
1d755a1415331203a314a3410155f3b5184a12c9
| 216,453
|
import time
def fetch_retry(headers, func, *args):
"""Error 429 from Azure is from throttling. This will wait the allotted time and retry the fetch."""
if "Retry-After" in headers:
time.sleep(headers["Retry-After"])
return func(*args)
else:
time.sleep(30)
return func(*args)
|
749e0c59be906291f0714576b32f80965228cce1
| 599,378
|
def Ventilator(P1, m1, Eta, V, airDensity, specificHeatCapacityDryAir, calc):
"""
This function calculates the power of a ventilator as well as the increase in air temperature caused by the ventilator.
:param P1: float: nominal power of the ventilator [W]
:param m1: float: nominal air mass flow of the ventilator [kg/s]
:param Eta: float: efficiency coefficient of the ventilator
:param V: float: current volume flow of the ventilator [m^2/s]
:param airDensity: float: air density [kg/m^3]
:param specificHeatCapacityDryAir: float: specific heat capacity of dry air [J/kg]
:param calc: float: defines the return value(s). (Power, TemperatureIncrease, Power&TemperatureIncrease)
:return P float: power demand of the ventilator [W]
:return delta_T_out: float: temperature increase due to ventilation [°C]
"""
m = airDensity * V
P = ((m / m1) ** 3) * P1
delta_T_out = (P * Eta) / (m * specificHeatCapacityDryAir)
if calc == 'Power':
return P
elif calc == 'TemperatureIncrease':
return delta_T_out
elif calc == 'Power&TemperatureIncrease':
return P, delta_T_out
else:
raise Exception('Wrong input for "calc". Allowed are: Power, TemperatureIncrease, Power&TemperatureIncrease')
|
86a7826739125e7637fa86ad99614c7e67583b22
| 356,962
|
def allsame(iterable):
"""Returns whether all elements of 'iterable' are the same.
"""
it = iter(iterable)
try:
first_item = next(it)
except StopIteration:
raise ValueError("iterable cannot be empty")
return all(element == first_item for element in it)
|
be48aeb16d37cd04d5dcaad56042788e884546d3
| 337,591
|
def node2vec_format(path, edges):
"""
function that stores edges in a file in the format expected by node2vec algorithms.
:param path: path to the file.
:param edges: list of edges
"""
with open(path, 'w') as file:
for edge in edges:
u, v = edge
file.write(f"{u} {v}\n")
print(f"Test edges stored in node2vec expected format, at: {path}")
return None
|
71ee0c763a3720d0b3a889ce293390c0a9216a48
| 529,489
|
from typing import Union
def safe_name(id: Union[str, int], unsafe_name: str) -> str:
"""
Remove unsafe characters from Canvas object names. It's common for course and assignment titles
to contain spaces, slashes, colons, etc. Also, prepend the ID so that two items with the same name
don't conflict.
:param id: Object ID
:param unsafe_name: Object name, possibly containing illegal characters
:return: Safe version of the unsafe name with "{ID}_" prepended
"""
return f"{id}_{unsafe_name.lower().strip().replace(' ', '_').replace('/', '-').replace(':', '').replace('?', '')}"
|
d130ac052e5f91730a16f86b32aa03b5c4722645
| 471,499
|
def lat_to_km(latitude) -> float:
"""Expresses given latitude in kilometers to the north
Args:
latitude (float): Latitude in degrees.
Returns:
float: Latitude expressed in kilometers to the north
"""
km_north = 110.574 * latitude
return km_north
|
c6772eef5a920be0d9bd09f08e544725562d2e50
| 543,255
|
def get_offer_poster_name(html_parser):
"""
This method returns the poster's name (and surname if available).
:param html_parser: a BeautifulSoup object
:rtype: string
:return: The poster's name
"""
element = html_parser.find(class_="box-person-name") or html_parser.find(class_="seller-box__seller-name")
if not element:
return
name = element.text.strip()
return name
|
a9e79a23bfe869fa15a7e690cd0a6887d085809d
| 581,410
|
def niceformat(ret):
"""
Converts to percentage and format to 1 decimal place
"""
return round(ret*100,1)
|
aa9916a6db48f5e024e0027e5b9d892326cfbfe4
| 194,566
|
def make_jump_url(base_message, dispand_message, extra_messages):
"""
make jump url which include more information
:param base_message: メッセージリンクが貼られていたメッセージ
:param dispand_message: 展開中のメッセージ
:param extra_messages: 展開する際にでた二つ目以降のメッセージ(e.g. 画像やembed)
:return: 混入が完了したメッセージリンク
"""
# base_aid: メッセージリンクで飛べる最初のメッセージの送信者のid
# aid: メッセージリンクを送信したユーザーのid
return "{0.jump_url}?base_aid={1.id}&aid={2.id}&extra={3}".format(
dispand_message,
dispand_message.author,
base_message.author,
",".join([str(i.id) for i in extra_messages])
)
|
10a11b84eb40d5723413f2e7316a895ab57887c5
| 522,931
|
def flatten(array, connectivity):
"""Reshape SPM buffer array to 2-dimensional map for connectivity processing
Parameters
----------
array : ndarray
N-dimensional array (with non-adjacent dimension at first position).
connectivity : Connectivity
N-dimensional connectivity.
Returns
-------
flat_array : ndarray
The input array reshaped if necessary, making sure that input and output
arrays share the same underlying data buffer.
"""
if array.ndim == 2 or not connectivity.custom:
return array
else:
out = array.reshape((array.shape[0], -1))
assert out.base is array
return out
|
f16a532ad232556903ce0ea7c518a60c01ad5dad
| 549,273
|
def pyfloat(v_str):
"""Convert string repr of Fortran floating point to Python double."""
# NOTE: There is no loss of information from SP to DP floats
return float(v_str.lower().replace('d', 'e'))
|
e3467d81691302787daa68559755ccb452ac7365
| 311,070
|
def negative(n: int, /) -> int:
"""
Convert `n` to its negative form: -|n|
E.g:
>>> negative(123)
-123
>>> negative(-123)
-123
"""
return -abs(n)
|
cdf8acf68426dc5970d64bccd90053778a55f42d
| 302,368
|
def floatValidator(text):
"""
TextEdit validator for floats.
"""
return not text or (text.replace('.', '').isdigit() and text.count('.') <= 1) or (text[0] == '-' and (len(text) == 1 or text[1:].replace('.', '').isdigit()) and text.count('.') <= 1)
|
e4898bbf827b95fe4899b2d71d491698b344a612
| 614,726
|
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
|
ea67781ea5ceb31c107857ccddc534c4f71ce587
| 473,109
|
def easy_lt(a, b):
"""Takes two numbers and return whether num1 is less than num2"""
return a < b
|
1558913263b5fd2d899ba07d80d0a0f22a3ac1d7
| 285,283
|
def _rename(dataframe):
"""Rename columns or reindex to make sense after deletion or addition of a
new line."""
dataframe = dataframe.reset_index(drop=True)
dataframe.columns = (i for i, _ in enumerate(dataframe.columns))
return dataframe
|
c458435c7ecbd14b94741f20e35a29cbab137279
| 160,507
|
def latest_no_overlap(jobs, n):
"""
Find the job before the nth job which does not
overlap with the nth job
Return -1 if no such job found
"""
for j in range(n - 1, -1, -1):
if jobs[j][1] <= jobs[n][0]:
return j;
return -1;
|
0d0369a076fc7499537b9d74a066972236a0a2fc
| 287,151
|
def n_points(request):
"""
Number of points in the test data set.
"""
return request.param
|
cc93bfdb90b5a085b27c5b99228334b7f3f2841e
| 475,347
|
import math
def characteristic_dimension(bb):
""" Returns the length of the bounding box diagonal
"""
x1,y1,z1 = bb[0]
x2,y2,z2 = bb[1]
return math.sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2))
|
8be126d80c84d875c3dccf7f4a0901b428605396
| 433,842
|
def parse_int_list(range_string, delim=',', range_delim='-'):
"""
Returns a sorted list of positive integers based on
*range_string*. Reverse of :func:`format_int_list`.
Args:
range_string (str):
String of comma separated positive integers or ranges
(e.g. '1,2,4-6,8'). Typical of a custom page range string used
in printer dialogs.
delim (char):
Defaults to ','. Separates integers and contiguous ranges of
integers.
range_delim (char):
Defaults to '-'. Indicates a contiguous range of integers.
>>> parse_int_list('1,3,5-8,10-11,15')
[1, 3, 5, 6, 7, 8, 10, 11, 15]
"""
output = []
for element in range_string.strip().split(delim):
# Range
if range_delim in element:
range_limits = list(map(int, element.split(range_delim)))
output += list(range(min(range_limits), max(range_limits)+1))
# Empty String
elif not element:
continue
# Integer
else:
output.append(int(element))
return sorted(output)
|
892e87fb50842a106a17492bac07b94a3d19e791
| 513,928
|
def contain_any(iterable, elements):
"""
Return `True` if any of the `elements` are contained in the `iterable`,
`False` otherwise.
"""
for e in elements:
if e in iterable:
return True
return False
|
cc458e91094a7396cc216332da0e8d697efd1bbe
| 272,589
|
def value_or_none(dictionary: dict, key: str):
"""returns value of key from dictionary otherwise if not found None"""
return dictionary[key] if key in dictionary else None
|
0f305f59842f0a1a499fb916cd7bb5b61dddb174
| 283,641
|
def make_fixed_length(text, desired_len):
"""
Takes a length of text and either cuts it down to
size or adds a space buffer to the end of it.
"""
if len(text) == desired_len:
return text
if len(text) > desired_len:
return text[0:desired_len]
if len(text) < desired_len:
x = len(text)
while x < desired_len:
text = text + ' '
x+=1
return text
|
31da1c756593bb3ede571ed57c0a2316a8c3069e
| 323,218
|
from typing import Any
import json
def compact_json(obj: Any) -> str:
"""Encode into JSON, but in a more compacted form."""
return json.dumps(obj, separators=(",", ":"))
|
0d0ad626eabea97e547f5181a083ba0646c4d83b
| 692,152
|
from typing import Dict
from typing import Any
import uuid
def make_test_experiment_config(config: Dict[str, Any]) -> Dict[str, Any]:
"""
Create a short experiment that based on a modified version of the
experiment config of the request and monitors its progress for success.
The short experiment is created as archived to be not user-visible by
default.
The experiment configuration is modified such that:
1. The training step takes a minimum amount of time.
2. All checkpoints are GC'd after experiment finishes.
3. The experiment does not attempt restarts on failure.
"""
config_test = config.copy()
config_test.update(
{
"description": "[test-mode] {}".format(
config_test.get("description", str(uuid.uuid4()))
),
"batches_per_step": 1,
"min_validation_period": 1,
"checkpoint_storage": {
**config_test.get("checkpoint_storage", {}),
"save_experiment_best": 0,
"save_trial_best": 0,
"save_trial_latest": 0,
},
"searcher": {
"name": "single",
"metric": config_test["searcher"]["metric"],
"max_steps": 1,
},
"resources": {**config_test.get("resources", {"slots_per_trial": 1})},
"max_restarts": 0,
}
)
config.setdefault(
"data_layer", {"type": "shared_fs", "container_storage_path": "/tmp/determined"}
)
return config_test
|
72ee62a9b6fa977aaff6f0621e18e0cdb0a8a0bb
| 691,419
|
def get_colour_set(n_colours, rng):
"""
Generates a (unique) list of randomly generated RGB colours.
# Arguments:
n_colours: the number of colours to generate.
rng: an instance of numpy.random.Generator.
# Returns:
colours: a list of randomly generated colours.
"""
colours = []
for _ in range(n_colours):
cand = None
while cand is None or cand in colours:
cand = (rng.uniform(), rng.uniform(), rng.uniform())
colours.append(cand)
return colours
|
ff72461744996ced60cc677be56acfbf573f47e7
| 647,918
|
import base64
def featB64encode(feat):
"""Base64 encode feature.
:param feat: feature
:type feat: :class:`numpy.ndarray`
:return: str
"""
return base64.b64encode(feat)
|
af509e6f3c67739f374c2762735d25b4ab80185c
| 52,291
|
def zsum(s, *args, **kwargs):
"""
pandas 0.21.0 changes sum() behavior so that the result of applying sum
over an empty DataFrame is NaN.
Meant to be set as pd.Series.zsum = zsum.
"""
return 0 if s.empty else s.sum(*args, **kwargs)
|
e71307b0d67a5f1a5b6c8f329dad895a336f2171
| 405,343
|
def get_points(result):
"""get respective points win = 3, lose = 0, draw = 1 point"""
if result == 'W':
return 3
elif result == 'D':
return 1
else:
return 0
|
fe4597176f00b9c07ea1fcc1322bdaec895299bc
| 142,734
|
def sgi_1973_to_2016(sgi_id: str) -> str:
"""
Convert the slightly different SGI1973 to the SGI2016 id format.
:examples:
>>> sgi_1973_to_2016("B55")
'B55'
>>> sgi_1973_to_2016("B55-19")
'B55-19'
>>> sgi_1973_to_2016("E73-2")
'E73-02'
"""
if "-" not in sgi_id:
return sgi_id
start, end = sgi_id.split("-")
return start + "-" + end.zfill(2)
|
8109c96b66ca7b0707536ec61d4d457410f5967e
| 648,884
|
def IpBinaryToDecimal(bin_ip):
"""
:param bin_ip: IPv4 in binary notation, e.g. 00001010000000000000000000000001
:return: IPv4 in decimal notation, e.g. 167772161
"""
return int(bin_ip, 2)
|
20366a1667fd1f9c1f17e7c13c2292bd4a7e74b0
| 47,027
|
def in_circle(radius):
"""Returns ``abs(z) < radius`` boolean value function for a given ``z``"""
return lambda z: z.real**2 + z.imag**2 < radius**2
|
6a9b634a1e2d57022cb2915a5670f9402f441bb0
| 158,890
|
import math
def week_of_month(dt):
""" Returns the week of the month for the specified date.
"""
first_day = dt.replace(day=1)
dom = dt.day
adjusted_dom = dom + first_day.weekday()
return int(math.ceil(adjusted_dom / 7.0))
|
416120eace7dbf2b8790d07781d99824bee4998d
| 606,408
|
def int_input(prompt="Text for prompt"):
"""This function prompts for an integer, and loops until it gets one"""
while True:
try:
return int(input(prompt))
except ValueError:
print("Input not a valid integer, please try again...")
|
47107542254d8497f4ab5dbb8085853cd10806bb
| 339,375
|
from dateutil import tz
from datetime import datetime
def tofrom_utc(timestamp, parseformat, from_utc=True):
"""
Convert a timestamp to/from UTC time
:param str timestamp: Date/time to modify
:param str parseformat: Format of the timestamp to parse
:param bool from_utc: True if source stamp is UTC; otherwise False
:return: Converted timestamp
:rtype: str
"""
utc_zone = tz.tzutc()
local_zone = tz.tzlocal()
time_obj = datetime.strptime(timestamp, parseformat)
new_time = time_obj.replace(tzinfo=(local_zone, utc_zone)[from_utc])
new_time = new_time.astimezone((utc_zone, local_zone)[from_utc])
return new_time.strftime(parseformat)
|
feaf28653500bf9df58f73e86d19690098f1951d
| 23,637
|
def get_bnumber_from_user_input(user_input):
"""
The key used in the Sierra adapter VHS is the seven-digit form of a b-number.
This function takes the user input, which could include the 'b' prefix or
the check digit, and reduces it to the seven-digit form.
"""
if (
len(user_input) == len("b1234567x") or len(user_input) == len("b1234567")
) and user_input.startswith("b"):
return user_input[1:8]
elif len(user_input) == len("1234567") and user_input.isnumeric():
return user_input
else:
raise ValueError(f"Not a valid bnumber: {user_input}")
|
1fcac3014833620e1f53629f6e4b51c2e8b1d53d
| 538,183
|
import collections
def columnarize(instances):
"""Columnarize inputs.
Each line in the input is a dictionary of input names to the value
for that input (a single instance). For each input "column", this method
appends each of the input values to a list. The result is a dict mapping
input names to a batch of input data. This can be directly used as the
feed dict during prediction.
For example,
instances = [{"a": [1.0, 2.0], "b": "a"},
{"a": [3.0, 4.0], "b": "c"},
{"a": [5.0, 6.0], "b": "e"},]
batch = prediction_server_lib.columnarize(instances)
assert batch == {"a": [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
"b": ["a", "c", "e"]}
Arguments:
instances: (list of dict) where the dictionaries map input names
to the values for those inputs.
Returns:
A dictionary mapping input names to values, as described above.
"""
columns = collections.defaultdict(list)
for instance in instances:
for k, v in instance.iteritems():
columns[k].append(v)
return columns
|
5b027546090e53832a1185e36a163489cee55ff2
| 413,827
|
def results_contains_wordform(wordform: str, search_results) -> bool:
"""
Returns True if the wordform is found in the search results.
"""
return any(r.wordform.text == wordform for r in search_results)
|
a984c5d4952399d7c2e098eb857c9a10f53424cf
| 174,731
|
import torch
def _train_step_mlp(x_batch, y_batch, model, optimizer):
"""
Perform single standard training step.
Args:
x_batch: Batch of inputs
y_batch: Batch of labels
model: torch.nn.Module
optimizer: torch.optim.Optimizer for model parameters
Returns:
Model output, batch loss
"""
# Compute forward pass on current batch
output = model(x_batch)
# Compute batch loss as cross entropy loss
batch_loss = torch.nn.functional.cross_entropy(output, y_batch)
# Compute gradients wrt current batch loss and perform parameter update
optimizer.zero_grad()
batch_loss.backward()
optimizer.step()
return output, batch_loss
|
f117e4b259aaaecea8bfea7ed15a85c831209a9b
| 167,457
|
def get_box_index(i_cur, j_cur, i_max, j_max):
""" Return the location of the box (index n) in the design array.
Args:
i_cur(int or float): index of box in x direction
j_cur(int or float): index of box in y direction
Returns:
n(int): index of box in design_array
'-1' if i_cur and j_cur are '-1'
"""
if ((i_cur == -1) and (j_cur == -1)):
n = -1
else:
n = i_cur * (j_max + 1) + j_cur - 1 # index starts with zero
return int(n)
|
ae810b7ccc579ad6e7811c3ca144d09b37ad7195
| 148,237
|
def _transpose(in_data, keys, field):
"""Turn a list of dicts into dict of lists
Parameters
----------
in_data : list
A list of dicts which contain at least one dict.
All of the inner dicts must have at least the keys
in `keys`
keys : list
The list of keys to extract
field : str
The field in the outer dict to use
Returns
-------
transpose : dict
The transpose of the data
"""
out = {k: [None] * len(in_data) for k in keys}
for j, ev in enumerate(in_data):
dd = ev[field]
for k in keys:
out[k][j] = dd[k]
return out
|
fc8137c06cbae2b2c39d1902acf271690d213b70
| 283,053
|
def output_fn(prediction, response_content_type):
"""
Serialize and prepare the prediction output
"""
return str(prediction)
|
fadb431ea4f9faa1079aafd2e81e26a2943da6e6
| 249,593
|
def _parse_raw_lesson(raw_lesson):
"""
Args:
raw_lesson: 'BeautifulSoup' objects contains lesson info
Returns:
dict: info about lesson
{
'number': str,
'time_bounds': str,
'info': str
}
"""
info = raw_lesson.find_all('td')
lesson_number = info[0].get_text()
time_bounds = "".join(str(item) for item in info[1].contents)\
.replace('<br/>', ' - ')
lesson_info = info[2].get_text()
return {
'number': lesson_number,
'time_bounds': time_bounds,
'info': lesson_info
}
|
20420665b45a171551a24ba083c087fdc7ab9b5f
| 216,878
|
def nonnegative_int(argument):
"""
Check for a nonnegative integer argument; raise ``ValueError`` if not.
(Directive option conversion function.)
"""
value = int(argument)
if value < 0:
raise ValueError('negative value; must be positive or zero')
return value
|
5df66fe8104b3d3e93240bc3609dc68e1b4c17a2
| 615,593
|
def GetAllCombinations(choices,noDups=1,which=0):
""" Does the combinatorial explosion of the possible combinations
of the elements of _choices_.
**Arguments**
- choices: sequence of sequences with the elements to be enumerated
- noDups: (optional) if this is nonzero, results with duplicates,
e.g. (1,1,0), will not be generated
- which: used in recursion
**Returns**
a list of lists
>>> GetAllCombinations([(0,),(1,),(2,)])
[[0, 1, 2]]
>>> GetAllCombinations([(0,),(1,3),(2,)])
[[0, 1, 2], [0, 3, 2]]
>>> GetAllCombinations([(0,1),(1,3),(2,)])
[[0, 1, 2], [0, 3, 2], [1, 3, 2]]
"""
if which >= len(choices):
res = []
elif which == len(choices)-1:
res = [[x] for x in choices[which]]
else:
res = []
tmp = GetAllCombinations(choices,noDups=noDups,
which=which+1)
for thing in choices[which]:
for other in tmp:
if not noDups or thing not in other:
res.append([thing]+other)
return res
|
47b796b33d5a968bce41a3469aee2b52396ca199
| 535,249
|
import hashlib
def md5(v1, *values):
"""Create a hash over a sequence of values."""
result = hashlib.md5(v1)
for value in values:
result.update(value)
return result
|
e3b5da0f813445e45adee162a66f3a5a819971e0
| 392,987
|
def data_context_topology_context_topologyuuid_nodenode_uuid_node_rule_groupnode_rule_group_uuid_node_edge_pointtopology_uuidnode_edge_point_node_uuidnode_edge_point_uuid_get(uuid, node_uuid, node_rule_group_uuid, topology_uuid, node_edge_point_node_uuid, node_edge_point_uuid): # noqa: E501
"""data_context_topology_context_topologyuuid_nodenode_uuid_node_rule_groupnode_rule_group_uuid_node_edge_pointtopology_uuidnode_edge_point_node_uuidnode_edge_point_uuid_get
returns tapi.topology.NodeEdgePointRef # noqa: E501
:param uuid: Id of topology
:type uuid: str
:param node_uuid: Id of node
:type node_uuid: str
:param node_rule_group_uuid: Id of node-rule-group
:type node_rule_group_uuid: str
:param topology_uuid: Id of node-edge-point
:type topology_uuid: str
:param node_edge_point_node_uuid: Id of node-edge-point
:type node_edge_point_node_uuid: str
:param node_edge_point_uuid: Id of node-edge-point
:type node_edge_point_uuid: str
:rtype: TapiTopologyNodeEdgePointRef
"""
return 'do some magic!'
|
4e797aca99b30beacdbdd4e6a8ce98c3691fbbc1
| 513,979
|
def normalized_total_time(p, max_time=3600000): # by default 1 h (in ms)
"""If time was longer than max_time, then return max_time, otherwise return time."""
if p["result.totalTimeSystem"] == "3600.0":
v = 3600000 # convert to ms (error in logging)
else:
v = int(float(p["result.totalTimeSystem"]))
return max_time if v > max_time else v
|
35cefa35b790d48fdd82bb68426f74ecadec6fa6
| 359,070
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.