content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import jinja2
def render(path, variables={}):
"""Render the given template using the given variables."""
env = jinja2.Environment(
loader=jinja2.PackageLoader('gutenberg'),
trim_blocks=True,
lstrip_blocks=True)
template = env.get_template(path)
return template.render(variables)
|
a0f7fbe1a8165c99264ba93834df40077b6ba8b6
| 59,803
|
def list_sets_for_entry(nsets):
"""
Parameters
----------
nsets: List of integers, sets to select from
Returns
-------
str1: sring variable, the list of sets (for a simple dialog)
"""
str1 = '%d' % nsets[0]
if len(nsets) > 1:
for loop in range(1, len(nsets)):
str1 = str1 + ', %d' % (nsets[loop])
return str1
|
3cbc52dc13c7747d0641f60d34e97548141cea75
| 59,805
|
def Fname_data(Fpath):
""" takes both pulse data file names and h5 file names and returns UTC_time, station_name, Fpath"""
Fname = Fpath.split('/')[-1]
data = Fname.split('_')
timeID = data[1]
station_name = data[2]
if len(data[3][1:])==0:
file_number = 0
else:
file_number = int(data[3][1:])
return timeID, station_name, Fpath, file_number
|
90940234e97a97a41b20f83de1f6160776a7f708
| 59,810
|
def __add_abcd_counts(x, y):
"""
Adds two tuples. For example.
:math:`x + y = (x_a + y_a, x_b + y_b, x_c + y_c, x_d + y_d)`
:param x: Tuple (a, b, c, d).
:param y: Tuple (a, b, c, d).
:return: Tuple (a, b, c, d).
"""
return x[0] + y[0], x[1] + y[1], x[2] + y[2], x[3] + y[3]
|
9453058aae1e72c297ce6343ec80fcecaed76f74
| 59,814
|
def get_set_time(deadline: int):
"""Return time-step set:
T = {1,2...tau}, tau = deadline"""
T = list(range(1, deadline + 1))
return T
|
cb54ca541ebe65e7693794cbfc4190a217b951f9
| 59,816
|
def A_cells_um(nc, rho, A_c_rho1=800):
"""
Returns the area of `nc` cells at density `rho` in
micrometers^2.
`A_c_rho1` is the area of each cell at `rho=1` in
micrometers^2.
"""
return nc * A_c_rho1 / rho
|
db667cf9a72cf05a70d271c502b8d604feeef7d5
| 59,819
|
def GetZipPath(file, dir, dirname):
"""Given an absolute file path, an absolute directory path, and the dirname
of the directory, return a relative path to the file from the parent
directory of the given directory, with unix-style path separators.
"""
file = dirname + file[len(dir):]
return file.replace("\\", "/")
|
8fa0d97ae5668588329e84c0f9695b57567aa459
| 59,821
|
def word_count(phrase):
"""
given a phrase, count the occurrences of each word in that phrase
words comprise alphanumeric characters and are separated by anything else
"""
counts = {}
for word in ''.join(
map(lambda c: c.lower() if c.isalnum() else ' ', phrase)).split():
try:
counts[word] = counts[word] + 1
except KeyError:
counts[word] = 1
return counts
|
94bc88eb158699391f85484ef2c40368ce929858
| 59,822
|
def n(T: float) -> float:
"""
T = 360° / T
:param T: translation period
:type T: float
:return: mean motion in degrees
:rtype: float
"""
return 360 / T
|
0c52f9b82c8054dd4282ddb9cdc23f4fd53c636f
| 59,823
|
def word_splitter(df):
"""returns pandas DataFrame with a new column 'Split Tweets' that contains
tweets as list of seperate words.
Args:
df (pandas(pd) DataFrame): pd.DataFrame object with atleast one column:
'Tweets': Contains strings (pd.Series objects) of individual tweets (one tweet per value).
Return:
new pandas DataFrame with a new column:
'Split Tweets':A list of seperate words, from *.split().
"""
words = df['Tweets']
df['Split Tweets'] = [word.lower().split() for word in words]
return df
|
c75939719d136b8a267d2e7039cfe5426e601022
| 59,824
|
def remove_suffix(s: str, suffix: str) -> str:
"""Remove `suffix` from the end of `s` if it exists."""
if suffix and s.endswith(suffix):
return s[:-len(suffix)]
return s[:]
|
b47d5e193ebb3b522dfc546ed264bb65b117a1b0
| 59,826
|
def remove_unnecessary(string):
"""Removes unnecessary symbols from a string and returns the string."""
string = string.replace('@?', '')
string = string.replace('?@', '')
return string
|
56965ef4b60d302e659bafd4874b8c72790c5ec3
| 59,827
|
import sympy
def solve(eq, target, **kwargs):
"""
solve(expr, target, **kwargs)
Algebraically rearrange an equation w.r.t. a given symbol.
This is a wrapper around ``sympy.solve``.
:param eq: The :class:`sympy.Eq` to be rearranged.
:param target: The symbol w.r.t. which the equation is rearranged.
:param kwargs: (Optional) Symbolic optimizations applied while rearranging
the equation. For more information. refer to
``sympy.solve.__doc__``.
"""
# Enforce certain parameters to values that are known to guarantee a quick
# turnaround time
kwargs['rational'] = False # Avoid float indices
kwargs['simplify'] = False # Do not attempt premature optimisation
return sympy.solve(eq, target, **kwargs)[0]
|
9972338f0e967b343cc6ce2b8919e297ac7542d9
| 59,829
|
def downright(i,j,table):
"""Return the product to down-right-diagonal"""
product = 1
for num in range(4):
if i+num>19 or j+num>9: product*=1
else: product *= int(table[i+num][j+num])
return product
|
efa618d32892b3fb68217fd41907f5d874172ecf
| 59,833
|
def affiche_etat(etat):
"""Retourne _etat_ avec des espaces entre chaque lettre
e.g.: "E_AN" => "E _ A N"
"""
return " ".join(etat)
|
da8ea70fcd9ac03fa5f6ee493fc934b81382f6a4
| 59,834
|
def get_movies_by_year(hs_movie, year):
"""
Showing the movies that released that year
Parameters:
hs_movie (dic): information about movies
year (int): The year that movie released
returns:
list: return
"""
if year in hs_movie:
return hs_movie[year]
else:
return []
|
c9ed75908105f061ad6a81f4e1c363ac7d39383f
| 59,838
|
def crowded_comparison_operator(self, other, pareto):
"""
The crowded-comparison operator guides the selection process at the various
stages of the algorithm toward a uniformly spread-out Pareto-optimal front.
The operator returns True if *self* is better than *other* and
False otherwise.
:param self: First individual of the comparison
:param other: Second individual of the comparison
:param pareto: A ParetoInfo object with the information regarding
the Pareto fronts defined by the current population
:return: True if *self* is better than *other* and False otherwise.
"""
# Between two solutions with differing nondomination ranks, we prefer the
# solution with the lower (better) rank. Otherwise, if both solutions
# belong to the same front, then we prefer the solution that is located in
# a lesser crowded region, i.e., with the larger crowding distance.
if (pareto.rank[self] < pareto.rank[other]) or \
(pareto.rank[self] == pareto.rank[other] and
pareto.crowding_distance[self] > pareto.crowding_distance[other]):
return True
else:
return False
|
4e8667c1b2409a43f84b2a59cb0ea4acbebfeae3
| 59,841
|
from typing import Dict
from typing import Any
def _get_batchmm_opts(a_shape, a_strides, b_shape, b_strides, c_shape,
c_strides) -> Dict[str, Any]:
"""
Detects whether a matrix multiplication is a batched matrix multiplication
and returns its parameters (strides, batch size), or an empty dictionary if
batched multiplication is not detected.
:param a: Data descriptor for the first tensor.
:param b: Data descriptor for the second tensor.
:param c: Data descriptor for the output tensor (optional).
:return: A dictionary with the following keys: sa,sb,sc (strides for a, b,
and c); and b (batch size).
"""
if len(a_shape) > 3 or len(b_shape) > 3 or (c_shape and len(c_shape) > 3):
raise ValueError('Tensor dimensions too large for (batched) matrix '
'multiplication')
if len(a_shape) <= 2 and len(b_shape) <= 2:
return {}
batch = None
stride_a, stride_b, stride_c = 0, 0, 0
if len(a_shape) == 3:
batch = a_shape[0]
stride_a = a_strides[0]
if len(b_shape) == 3:
if batch and batch != b_shape[0]:
raise ValueError('Batch size mismatch for matrix multiplication')
batch = b_shape[0]
stride_b = b_strides[0]
if c_shape and len(c_shape) == 3:
if batch and batch != c_shape[0]:
raise ValueError('Batch size mismatch for matrix multiplication')
batch = c_shape[0]
stride_c = c_strides[0]
if batch is None:
return {}
return {'sa': stride_a, 'sb': stride_b, 'sc': stride_c, 'b': batch}
|
29f50020b7b27a0e17984fdc127d55361dfaf28a
| 59,844
|
def intervalOverlapCheck(
interval, cmprInterval, percentThreshold=0, timeThreshold=0, boundaryInclusive=False
):
"""
Checks whether two intervals overlap
Args:
interval (Interval):
cmprInterval (Interval):
percentThreshold (float): if percentThreshold is greater than 0, then
if the intervals overlap, they must overlap by at least this threshold
timeThreshold (float): if greater than 0, then if the intervals overlap,
they must overlap by at least this threshold
boundaryInclusive (float): if true, then two intervals are considered to
overlap if they share a boundary
Returns:
bool:
"""
startTime, endTime = interval[:2]
cmprStartTime, cmprEndTime = cmprInterval[:2]
overlapTime = max(0, min(endTime, cmprEndTime) - max(startTime, cmprStartTime))
overlapFlag = overlapTime > 0
# Do they share a boundary? Only need to check if one boundary ends
# when another begins (because otherwise, they overlap in other ways)
boundaryOverlapFlag = False
if boundaryInclusive:
boundaryOverlapFlag = startTime == cmprEndTime or endTime == cmprStartTime
# Is the overlap over a certain percent?
percentOverlapFlag = False
if percentThreshold > 0 and overlapFlag:
totalTime = max(endTime, cmprEndTime) - min(startTime, cmprStartTime)
percentOverlap = overlapTime / float(totalTime)
percentOverlapFlag = percentOverlap >= percentThreshold
# Is the overlap more than a certain threshold?
timeOverlapFlag = False
if timeThreshold > 0 and overlapFlag:
timeOverlapFlag = overlapTime > timeThreshold
overlapFlag = (
overlapFlag or boundaryOverlapFlag or percentOverlapFlag or timeOverlapFlag
)
return overlapFlag
|
c129487dae826909d55ba7bdee4c8fc5a0edff69
| 59,845
|
def get_graph_names(accessor):
"""
given an instance of an accessor, get the configured graph names
"""
graph_documents = accessor.get_all_documents_from_collection("graph")
graph_names = [graph["title"] for graph in graph_documents]
return graph_names
|
3f2555d49c30f7adae1ad9720572e2cf6b3943c4
| 59,852
|
def _seconds_to_time(seconds):
"""
Convert seconds into a time-string with the format HH:MM:SS. Seconds should
be an integer or float (rounded to nearest second and then cast to int).
"""
# Represent as integer
try:
if not type(seconds) is int:
seconds = int(round(seconds,0))
except TypeError:
err = "seconds must be able to be converted to an integer\n"
raise ValueError(err)
# Make sure the it is not too large to represent
max_value = 99*3600 + 59*60 + 59
if seconds > max_value:
err = "times longer than {} (99:59:59) cannot be represented in HH:MM:SS.\n".format(max_value)
raise ValueError(err)
# Convert seconds to hours, minutes, seconds
hours = seconds // 3600
leftover = seconds - hours*3600
minutes = leftover // 60
seconds = leftover - minutes*60
# Make sure the resulting time is sane.
try:
assert seconds >= 0 and seconds < 60
assert minutes >= 0 and minutes < 60
assert hours >= 0 and hours < 100
except AssertionError:
err = "time could not be converted to HH:MM:SS format.\n"
err += "gave: {} hours, {} minutes, {} seconds\n".format(hours,minutes,seconds)
raise RuntimeError(err)
return "{:02}:{:02}:{:02}".format(hours,minutes,seconds)
|
275bdd2c11d7c4b92b880bbfc4ab6cd3916b67b0
| 59,853
|
def href_finder(soup_ele):
"""
Finds all a["href"] in beautifulSoup object
"""
return [a["href"] for a in soup_ele.findAll("a", href=True)]
|
25de3093f07e58ba38037b213a0710eee062c9a2
| 59,854
|
import re
def remove_special(s):
"""Remove all special characters from the given string"""
return re.sub(r"[?|:*/\\<>\"]+", '', s)
|
5db21e06ea74927120be73b87ef20868d561f413
| 59,857
|
def _detect(fun, seq):
"""Return the first element that satisfies the predicate fun."""
for item in seq:
if fun(item):
return item
return None
|
e6d6a4e87cbd696dbe3390ca31cbb267d716b9c5
| 59,859
|
from typing import Any
import importlib
def import_python_object_from_string(function_string: str) -> Any:
"""
Based on https://stackoverflow.com/questions/3061/calling-a-function-of-a-module-by-using-its-name-a-string
""" # pylint: disable=line-too-long
mod_name, func_name = function_string.rsplit(".", 1)
mod = importlib.import_module(mod_name)
func = getattr(mod, func_name)
return func
|
924f4ce0dfcf5cf902f351b3a04545efbc535407
| 59,860
|
import math
def newmark_overpressure(energy_mttnt, radius_m):
"""
Newmark-Hansen Overpressure formula. Intended for surface blasts, but adapted to air-bursts.
:param energy_mttnt: Energy in Megatons TNT
:param radius_m: Actual distance from blast in m (hypotenuse distance for airburst events).
:returns: overpressure in bar
:Reference: NuclearBlastOverpressure.pdf, Equation 3
"""
energy_tnt = energy_mttnt * 1000000
return (6784 * (energy_tnt / (radius_m ** 3))) + (93 * (math.sqrt(energy_tnt / (radius_m ** 3))))
|
dff7fb6eb807772f035d40410088f715662eb65b
| 59,864
|
def remove_prefix(text, prefix):
"""
Removes the prefix string from the text
Args:
text (str): The base string containing the prefixed string
prefix (str): Prefix to remove
Returns:
str: the base string with the prefix removed
"""
if text.startswith(prefix):
return text[len(prefix):]
return text
|
d29435dba6872c162dfbd0d9634ae65ef2d43619
| 59,870
|
def check_value(arr, shape, name=None):
"""
Check that the given argument has the expected shape. Shape dimensions can
be ints or -1 for a wildcard. The wildcard dimensions are returned, which
allows them to be used for subsequent validation or elsewhere in the
function.
Args:
arr (np.arraylike): An array-like input.
shape (list): Shape to validate. To require an array with 3 elements,
pass `(3,)`. To require n by 3, pass `(-1, 3)`.
name (str): Variable name to embed in the error message.
Returns:
object: The wildcard dimension (if one) or a tuple of wildcard
dimensions (if more than one).
Example:
>>> vg.shape.check_value(np.zeros((4, 3)), (-1, 3))
>>> # Proceed with confidence that `points` is a k x 3 array.
Example:
>>> k = vg.shape.check_value(np.zeros((4, 3)), (-1, 3))
>>> k
4
"""
def is_wildcard(dim):
return dim == -1
if any(not isinstance(dim, int) and not is_wildcard(dim) for dim in shape):
raise ValueError("Expected shape dimensions to be int")
if name is None:
preamble = "Expected an array"
else:
preamble = f"{name} must be an array"
if arr is None:
raise ValueError(f"{preamble} with shape {shape}; got None")
try:
len(arr.shape)
except (AttributeError, TypeError):
raise ValueError(f"{preamble} with shape {shape}; got {arr.__class__.__name__}")
# Check non-wildcard dimensions.
if len(arr.shape) != len(shape) or any(
actual != expected
for actual, expected in zip(arr.shape, shape)
if not is_wildcard(expected)
):
raise ValueError(f"{preamble} with shape {shape}; got {arr.shape}")
wildcard_dims = [
actual for actual, expected in zip(arr.shape, shape) if is_wildcard(expected)
]
if len(wildcard_dims) == 0:
return None
elif len(wildcard_dims) == 1:
return wildcard_dims[0]
else:
return tuple(wildcard_dims)
|
1adea9e8cb6d0db4976dd31335f6abaf53599efd
| 59,874
|
def classify_attachments(files):
""" Return an (audio_files, related_docs) tuple. """
audio = []
related = []
for f in files:
if 'audio' in f['file_mime']:
audio.append(f)
else:
related.append(f)
return audio, related
|
28f78c420cc2ac54823f69f6eb0e2f6f810c5f5c
| 59,876
|
def _dohome_percent(value: int) -> float:
"""Convert dohome value (0-5000) to percent (0-1)."""
return value / 5000
|
8889c8af7240a919eed1f45143d2302e206ec9b7
| 59,877
|
def _iou(p1, p2):
"""Computes intersection over union of two intervals.
Args:
p1 ((int,int)): First interval as (first ts, last ts)
p2 ((int,int)): Second interval as (first ts, last ts)
Returns:
float: intersection over union of the two intervals.
"""
i_start = max(p1[0],p2[0])
i_end = min(p1[1],p2[1])
i_len = max(0,i_end-i_start)
o_start = min(p1[0],p2[0])
o_end = max(p1[1],p2[1])
o_len = o_end-o_start
return float(i_len)/o_len
|
2fbe75748a131ce2cfbfe7cee5a9fff300e617e1
| 59,882
|
import socket
def getfqdn4(name=None):
"""return (a) IPv4 FQDN (Fully Qualified Domain Name)
if name is not given, returns local hostname"""
if name is None:
return socket.getfqdn()
return socket.getfqdn(name)
|
e13a098bfaa9e1ca442adb8200c82dc558030f02
| 59,883
|
def two_gaussian_potential_bc(vnew, f2, coords):
"""
Apply Boundary Condition to the potential, force, and coordinates.
Parameters:
-----------
vnew : float (or array of floats)
Potential Energy
f2 : float (or array of floats)
Force
coords : float
coordinates
Returns:
--------
vnew : float (or array of floats)
Adjusted potential energy from boundary condition
F : float (or array of floats)
Adjusted force from boundary condition
coords : float
adjusted coordinates from boundary condition
bcbias : float
bias applied strictly from the boundary condition
"""
vold = vnew
bcbias = 0
is_periodic = False
if (coords < -4.3193):
vnew = 100.0 * (coords+4.0)**4.0 - 1.690133
f2 = -100.0 * 4.0 * (coords+4.0)**3.0
bcbias = vnew - vold
elif (coords > 4.25882):
vnew = 100.0 * (coords-4.0)**4.0 - 0.845067
f2 = -100.0 * 4.0 * (coords-4.0)**3.0
bcbias = vnew - vold
return (vnew, f2, coords, bcbias, is_periodic)
|
2d9eda2eb4db4a800f72f7b7eaf61c8508c5f848
| 59,885
|
def is_int_list(value):
"""Checks if a value's type is a list of integers."""
return value and isinstance(value, list) and isinstance(value[0], int)
|
752ab515da58bccfb06d8f72cf0da43a909ce6db
| 59,889
|
def get_sp_benchmarks(fmtstr):
"""
Return the list of input SinglePrefix benchmark files with their destinations.
Applies to SinglePrefix fatXPol, fatXMaintenance and spX benchmarks.
"""
common = [(4, 10), (8, 12), (10, 13), (12, 64), (16, 0), (20, 0)]
benches = [(fmtstr.format(sz), d) for (sz, d) in common]
return benches
|
f2ce47b852f3abaa0099ce64ec68ca264eb73c75
| 59,890
|
def regex_result_escape_duplicate_quotes(result):
"""
Regex search handler: Escape sections where we find duplicate empty
quotes surrounding objects.
Args:
result (MatchObject): Match result from a previous regex match.
"""
return "{}\"{}\"".format(result.group(1), result.group(3))
|
3464559e514358e151a14dfa8d1a77109233ccfc
| 59,895
|
def get_status_code(code):
"""
Table B-1.1 CIP General Status Codes
:param code: Code to get the Status from
:return: Status Message
"""
if code == 0x00: return 'Success'
elif code == 0x01: return 'Connection failure'
elif code == 0x02: return 'Resource unavailable'
elif code == 0x03: return 'Invalid Parameter value'
elif code == 0x04: return 'Path segment error'
elif code == 0x05: return 'Path destination unknown'
elif code == 0x06: return 'Partial transfer'
elif code == 0x07: return 'Connection lost'
elif code == 0x08: return 'Service not supported'
elif code == 0x09: return 'Invalid attribute value'
elif code == 0x0A: return 'Attribute List error'
elif code == 0x0B: return 'Already in requested mode/state'
elif code == 0x0C: return 'Object state conflict'
elif code == 0x0D: return 'Object already exists'
elif code == 0x0E: return 'Attribute not settable'
elif code == 0x0F: return 'Privilege violation'
elif code == 0x10: return 'Device state conflict'
elif code == 0x11: return 'Reply data too large'
elif code == 0x12: return 'Fragmentation of a primitive value'
elif code == 0x13: return 'Not enough data'
elif code == 0x14: return 'Attribute not supported'
elif code == 0x15: return 'Too much data'
elif code == 0x16: return 'Object does not exist'
elif code == 0x17: return 'Service fragmentation sequence not in progress'
elif code == 0x18: return 'No stored attribute data'
elif code == 0x19: return 'Store operation failure'
elif code == 0x1A: return 'Routing failure, request packet too large'
elif code == 0x1B: return 'Routing failure, response packet too large'
elif code == 0x1C: return 'Missing attribute list entry data'
elif code == 0x1D: return 'Invalid attribute value list'
elif code == 0x1E: return 'Embedded service error'
elif code == 0x1F: return 'Vendor specific error'
elif code == 0x20: return 'Invalid parameter'
elif code == 0x21: return 'Write-once value or medium atready written'
elif code == 0x22: return 'Invalid Reply Received'
elif code == 0x23: return 'Buffer overflow'
elif code == 0x24: return 'Message format error'
elif code == 0x25: return 'Key failure path'
elif code == 0x26: return 'Path size invalid'
elif code == 0x27: return 'Unecpected attribute list'
elif code == 0x28: return 'Invalid Member ID'
elif code == 0x29: return 'Member not settable'
elif code == 0x2A: return 'Group 2 only Server failure'
elif code == 0x2B: return 'Unknown Modbus Error'
else: return 'unknown'
|
3622da583ae55785322828a6cd1ba7601d125ac9
| 59,897
|
def get_counts(df, field, save_path=None):
"""Get the topic counts by a particular field"""
counts_df = df[['#topic', field, 'doc']].copy()
counts = counts_df.groupby(['#topic', field]).count()
counts.rename(columns={'doc': 'Number of Docs'}, inplace=True)
counts = counts.unstack(fill_value=0)
counts.columns = counts.columns.get_level_values(1)
counts.columns.name = ''
counts.index.name = ''
if save_path is not None:
counts.to_csv(save_path)
return counts
|
fa988ad73b0014837c81f3e7dcf7443341e47697
| 59,898
|
def unique(sequence):
"""Return unique items in sequence, preserving order."""
# https://www.peterbe.com/plog/fastest-way-to-uniquify-a-list-in-python-3.6
return list(dict.fromkeys(sequence))
|
880cbcdadfb2c0214674c86afb357096abf5be47
| 59,899
|
def quotewrap(item):
"""wrap item in double quotes and return it.
item is a stringlike object"""
return '"' + item + '"'
|
91acd54664b9408832b105a01b47165b53fdf4f5
| 59,900
|
import re
def MakeAlphaNum( str ):
"""Return a version of the argument string, in which all non-alphanumeric chars have been replaced
by underscores.
"""
return re.sub( '\W+', '_', str )
|
d0eb0b4e4d7be0e592fa5517fa088dc10c1d455e
| 59,902
|
def line() -> str:
"""
Add line md style.
:return: Formatted line in md style.
:rtype: str
"""
return f"---"
|
c406a2fa5ea8452b10c3826bc7ac24674ca2cdf5
| 59,916
|
def transform_subject_number(subject_number, loop_size):
"""
Args:
subject_number (int)
loop_size (int)
Returns:
int:
The resulting subject number.
"""
return pow(subject_number, loop_size, 20201227)
|
13cb3bc0a5ef094e6578bf6f54b9e8a0854416d2
| 59,920
|
def is_action_execution_under_action_chain_context(liveaction):
"""
The action execution is executed under action-chain context
if it contains the chain key in its context dictionary.
"""
return liveaction.context and "chain" in liveaction.context
|
eca64bd25ecf8cfd6e1232e36084262f674e1dbd
| 59,927
|
def make_kernel_names(kern_list):
"""
Take a list of kernels and return a list of strings, giving each kernel a
unique name.
Each name is made from the lower-case version of the kernel's class name.
Duplicate kernels are given training numbers.
"""
names = []
counting_dict = {}
for k in kern_list:
raw_name = k.__class__.__name__.lower()
# check for duplicates: start numbering if needed
if raw_name in counting_dict:
if counting_dict[raw_name] == 1:
names[names.index(raw_name)] = raw_name + '_1'
counting_dict[raw_name] += 1
name = raw_name + '_' + str(counting_dict[raw_name])
else:
counting_dict[raw_name] = 1
name = raw_name
names.append(name)
return names
|
97c589ea271d110e2578d82182d198a52773bc6e
| 59,930
|
def sign(n):
""" Returns -1 if n is negative, 1 if n is positive, and 0 otherwise. """
if n > 0:
return 1
elif n < 0:
return -1
else:
return 0
|
de619f0ac36ffb963b37fb46b4da262b3f8a535c
| 59,938
|
import re
def extract_title(textbody):
"""
Returns the title of the .tex file, assuming that the
title is set in \title{TITLE}. Otherwise, returns "TITLE"
"""
title_re = re.compile(r"^\s*\\title{([^}]*)}", re.MULTILINE)
title = title_re.findall(textbody)
if title:
return title[0]
return "TITLE"
|
0e1055c8644823e0edfd4bfb22a1d3dcc2428570
| 59,940
|
def linear_variability(csr, var_frac):
"""Return an array with a linear relation between clearsky ratio and
maximum variability fraction. Each value in the array is the maximum
variability fraction for the corresponding clearsky ratio.
Parameters
----------
csr : np.ndarray
REST2 clearsky irradiance without bad or missing data.
This is a 2D array with (time, sites).
var_frac : float
Maximum variability fraction (0.05 is 5% variability).
Returns
-------
out : np.ndarray
Array with shape matching csr with maximum variability (var_frac)
when the csr = 1 (clear or thin clouds). Each value in the array is
the maximum variability fraction for the corresponding clearsky ratio.
"""
return var_frac * csr
|
2dcb8f87fa0782b751e4697a6da024aa3103b0f2
| 59,941
|
def get_paths_threshold(plist, decreasing_factor):
"""
Get end attributes cutting threshold
Parameters
----------
plist
List of paths ordered by number of occurrences
decreasing_factor
Decreasing factor of the algorithm
Returns
---------
threshold
Paths cutting threshold
"""
threshold = plist[0][1]
for i in range(1, len(plist)):
value = plist[i][1]
if value > threshold * decreasing_factor:
threshold = value
return threshold
|
d25624ddfde6505ec6e26de12feb8c33a2eb5bf2
| 59,943
|
import shutil
from pathlib import Path
def get_tc_exec(command):
"""Returns the location of the RDAAM_He or ketch_aft executable"""
if shutil.which(command) is not None:
tc_exec = command
elif Path("bin/" + command).is_file():
tc_exec = "bin/" + command
else:
raise FileNotFoundError(
f"Age calculation program {command} not found. See Troubleshooting in tcplotter docs online."
)
return tc_exec
|
e9d4a75781bf8aee9f98ab94a59f48cae9eeb0a6
| 59,944
|
def get_image_format(filename):
"""Returns image format from filename."""
filename = filename.lower()
if filename.endswith('jpeg') or filename.endswith('jpg'):
return 'jpeg'
elif filename.endswith('png'):
return 'png'
else:
raise ValueError('Unrecognized file format: %s' % filename)
|
e003474275607e56f011a6410aeb4a9dfd7adcbd
| 59,945
|
def pd_columns_to_string(df):
""" Returns a single string with a list of columns, eg: 'col1', 'col2', 'col3' """
columns = "".join("'" + column + "', " for column in df.columns)
return columns[:-2]
|
57b2a24d8a0f88fcd465928465f32dc358c1a020
| 59,946
|
def split_commas(ctx, param, value):
"""
Convert from a comma-separated list to a true list.
"""
# ctx, param, value is the required calling signature for a Click callback
try:
values = value.split(',')
except AttributeError:
# values is None
values = None
return values
|
8aeb193ef83abf3d328988b07a7f014f6b5b2179
| 59,950
|
def _apply_prefix(prefix, model):
"""
Prefix all path entries in model with the given prefix.
"""
if not isinstance(model, dict):
raise TypeError("Expected dict for model, got %s" % type(model))
# We get unwanted leading/trailing slashes if prefix or model['path'] are
# '', both of which are legal values.
model['path'] = '/'.join((prefix, model['path'])).strip('/')
if model['type'] in ('notebook', 'file'):
return model
if model['type'] != 'directory':
raise ValueError("Unknown model type %s." % type(model))
content = model.get('content', None)
if content is not None:
for sub_model in content:
_apply_prefix(prefix, sub_model)
return model
|
5560247061c7827990c8a8496023e70c385ec7ef
| 59,952
|
def computeDerivatives(oldEntry, newEntry):
"""
Computes first-order derivatives between two corresponding sets of metrics
:param oldEntry: Dictionary of metrics at time t - 1
:param newEntry: Dictionary of metrics at time t
:return: Dictionary of first-order derivatives
"""
diff = {k: v - oldEntry[k] for k, v in newEntry.items()}
return diff
|
1f4f2fe17cebc98e5f136f2a68b9b5c772367f40
| 59,953
|
def float_to_python(self, value):
"""
Convert a 'float' field from solr's xml format to python and return it.
"""
return float(value)
|
74bc65259288261c47f0168c586b1bff7a7ae615
| 59,958
|
def exec_file(path):
"""Execute a python file and return the `globals` dictionary."""
namespace = {}
with open(path, 'rb') as f:
exec(f.read(), namespace, namespace)
return namespace
|
9567a7c5c98a1cc9e71b8fdc563377dd89d68278
| 59,959
|
def _gt_get_all_nodes(self):
"""
Get all the nodes in this tree
"""
return self.tree.get_all_nodes()
|
92793d01b0f4a0c347a7e5a717d3bf760b27288b
| 59,960
|
def delete_old_account_key(self) -> bool:
"""Delete old account key from Cloud Portal.
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - spPortal
- DELETE
- /spPortal/account/oldKey
Acts as "commit" for a previous account key change. Make sure all
managed appliances have the new account key before calling this.
Requires Portal connectivity.
:return: Returns True/False based on successful call
:rtype: bool
"""
return self._delete(
"/spPortal/account/oldKey",
expected_status=[204],
return_type="bool",
)
|
d2510744c28d586aa31046c371fbd55f75d69074
| 59,969
|
def fixture_old_tool_version() -> str:
"""Return an old version of an existing tool"""
_old = "3.1"
return _old
|
809e6fd91b35bc3ffa989312dc8c7f5785d4f8c2
| 59,972
|
from typing import Callable
def make_f(flavor: int) -> Callable[[int], int]:
"""Return a 1-bit constant or balanced function f. 4 flavors."""
# The 4 versions are:
# f(0) -> 0, f(1) -> 0 constant
# f(0) -> 0, f(1) -> 1 balanced
# f(0) -> 1, f(1) -> 0 balanced
# f(0) -> 1, f(1) -> 1 constant
flavors = [[0, 0], [0, 1], [1, 0], [1, 1]]
def f(bit: int) -> int:
"""Return f(bit) for one of the 4 possible function types."""
return flavors[flavor][bit]
return f
|
c51d9ea7748550ef41ab2d49385f678bbebf5c8d
| 59,974
|
import math
def get_board_size(game_board):
"""(str) -> int
Precondition: board_size is a perfect square
Return the side length of the tic tac toe board
>>>get_board_size('x-ox')
2
>>>get_board_size('xoxoxxoxx')
3
"""
return int(math.sqrt(len(game_board)))
|
5f60832686654d5fc788d9dcf0154de561f1ecb6
| 59,975
|
from datetime import datetime
def float2timestamp(float_timestamp):
"""Converts POSIX timestamp to datetime.datetime object."""
return datetime.fromtimestamp(float_timestamp)
|
fd62b0a1a4f4b71acf86db9d3f9d849c339f87a4
| 59,982
|
def get_identifiers(code_string):
"""
Return all valid identifiers found in the C++ code string by finding
uninterrupted strings that start with a character or an underscore and
continues with characters, underscores or digits. Any spaces, tabs,
newlines, paranthesis, punctuations or newlines will mark the end of
an identifier (anything that is not underscore or alphanumeric).
The returned set will include reserved keywords (if, for, while ...),
names of types (float, double, int ...), names of functions that are
called (cos, sin ...) in addition to any variables that are used or
defined in the code.
"""
identifiers = set()
identifier = []
for c in code_string + ' ':
if identifier and (c == '_' or c.isalnum()):
identifier.append(c)
elif c == '_' or c.isalpha():
identifier = [c]
elif identifier:
identifiers.add(''.join(identifier))
identifier = None
return identifiers
|
b1a22a5218caab57a4c2541acd18ba9027a477d4
| 59,984
|
def get_existing_boundary_indices(mesh):
"""
Returns
----------
list, int.
The boundary indices (data['boundary']>0) that exist on the mesh vertices.
"""
indices = []
for vkey, data in mesh.vertices(data=True):
if data['boundary'] > 0:
if data['boundary'] not in indices:
indices.append(data['boundary'])
boundary_indices = sorted(indices)
return boundary_indices
|
0ba24f8d8f4b2cf36cc16c2d810145e41ac3c229
| 59,989
|
def index_to_voxel(index, Y_size, Z_size):
"""
index to voxel, eg. 0 -> (0,0,0).
"""
i = index % (Y_size)
index = index // (Y_size)
j = index % (Z_size)
index = index // (Z_size)
k = index
return (i, j, k)
|
f61bea14bec11b0faae15bb119b14fc116953d54
| 59,993
|
def get_fk_from_field(m, f_name, f_value):
"""Read an object to be used as foreign key in another record.
It return a formatted message if it finds an error
"""
msg = ""
try:
obj = m.objects.get(**{f_name: f_value})
except m.DoesNotExist:
msg = f'{f_name} "{f_value}"'
obj = None
except ValueError:
msg = f'{f_name} "{f_value}": wrong type. \n'
obj = None
return obj, msg
|
97c5898702f023e8634457127029df4d38aa708c
| 59,995
|
def get_input_layer_variables(ann, variables, descriptors):
"""
Given a list of descriptor names and a tuple of
variables of the MILP_Relu model,
create and return a dictionary that for each descriptor
gives the corresponding variable
"""
# unpack the variables
x, y, z = variables
# Initialize an empty dictionary
sol = dict()
# Iterate over the input layer and the list of descriptors
# at the same time
for v, name in zip(ann.input_layer, descriptors):
sol[name] = y[v]
return sol
|
a9767e348adc9d396c949867cec0f58ac8893226
| 59,996
|
def makeiter(var):
"""Converts a variable into a list of it's not already an iterable (not including strings.
If it's already an iterable, don't do anything to it.
Parameters
------------
var
the variable to check.
Returns
------------
var
an iterable version of the parameter (if it's not already one)
"""
if not hasattr(var, '__iter__') or isinstance(var, str):
return [var]
return var
|
f2259d918e9e1b150d59bca9b6c1351b3daea8b9
| 60,000
|
from pathlib import Path
def data_to_dir(data_dir):
"""Ensures that the procided data destination is valid."""
data_dir = Path(data_dir)
if data_dir.is_dir():
is_empty = not any(data_dir.iterdir())
if is_empty:
return data_dir
else:
raise FileExistsError(
f"{data_dir.absolute()} already exists and is non-empty. "
"Please provide a new path for data output through `-d new_dir`."
)
data_dir.mkdir(parents=True)
return data_dir
|
35b85f0e5cf5693beeb1aacd56018b0229279229
| 60,002
|
def normalize_bcast_dims(*shapes):
"""
Normalize the lengths of the input shapes to have the same length.
The shapes are padded at the front by 1 to make the lengths equal.
"""
maxlens = max([len(shape) for shape in shapes])
res = [[1] * (maxlens - len(shape)) + list(shape) for shape in shapes]
return res
|
64776b52dd6ddcc226eb6db8a8373765d5637f88
| 60,004
|
import csv
def getListOfJiraUsersFromCsv(jiraUsersToSearchFor="jira_users.csv"):
"""
Gets list of jira users expected to find.
The csv should contain their name in the format "FIRSTNAME LASTNAME"
and the type "STUDENT" or "ADMIN".
Expects first row to be a header so ignores it.
Ignores blank lines or those starting with a #
"""
row = 0
users = []
with open(jiraUsersToSearchFor) as users_csv:
for u in users_csv:
if row > 0:
u = u.strip()
if len(u) > 0 and u[0] != '#':
x = list(csv.reader([u], delimiter=',', quotechar='"'))[0]
o = {"name": x[1], "type": x[2], "year": x[0]}
users.append(o)
row += 1
return users
|
17d34848ca56d7631d5dcb1544425f4b0e372007
| 60,007
|
import torch
from typing import Union
from typing import Type
from typing import Tuple
def count_module_instances(module: torch.nn.Module, module_class: Union[Type[torch.nn.Module],
Tuple[Type[torch.nn.Module], ...]]) -> int:
"""Counts the number of instances of ``module_class`` in ``module``, recursively.
.. rubric:: Example
.. testsetup::
from composer.utils.module_surgery import count_module_instances
.. doctest::
>>> from torch import nn
>>> module = nn.Sequential(nn.Linear(16, 32), nn.Linear(32, 64), nn.ReLU())
>>> count_module_instances(module, nn.Linear)
2
>>> count_module_instances(module, (nn.Linear, nn.ReLU))
3
Args:
module (torch.nn.Module): The source module.
module_class (Type[torch.nn.Module] | Tuple[Type[torch.nn.Module], ...]):
The module type (or tuple of module types) to count.
Returns:
int: The number of instances of ``module_class`` in ``module``
"""
count = 0
for _, child in module.named_children():
if isinstance(child, module_class):
count += 1
count += count_module_instances(child, module_class)
return count
|
a991db4d1ac7d916c36fdbd993e28a31e858d62f
| 60,008
|
import random
def random_indexes(a, b, feats_in_plot):
"""Support function for tSNE_vis
Args:
a: start index
b: end index
feats_in_plot: # of featuers to be plotted per class
Returns:
Random list of feats_in_plot indexes between a and b
"""
randomList = []
# Set a length of the list to feats_in_plot
for i in range(feats_in_plot):
# any random numbers from a to b
randomList.append(random.randint(a, b - 1))
return randomList
|
2fad244becdc378dc3cc36ccc786634c7ec0d832
| 60,013
|
def copy_node(node):
"""Makes a copy of a node with the same attributes and text, but no children."""
element = node.makeelement(node.tag)
element.text = node.text
element.tail = node.tail
for key, value in node.items():
element.set(key, value)
return element
|
179cc1277c3e5a9b41fdd6ec6392273c8a3c4c64
| 60,021
|
import base64
def _base64(text):
"""Encodes string as base64 as specified in the ACME RFC."""
return base64.urlsafe_b64encode(text).decode("utf8").rstrip("=")
|
a52dd0a05d00f71b9246d59320cb709ac9f53236
| 60,024
|
import time
import json
import base64
def parse_scitoken(token_string: str):
"""Parse a scitoken string and return the header, payload, and signature
:param token_string: A scitoken as 3 '.'-separated base64 strings
:return: header (dict), payload (dict), signature (str)
:raises: ValueError if there is an error decoding the token
"""
def prettytime(attr):
return time.strftime("%F %T %z", time.localtime(int(payload[attr])))
try:
# b64decode errors if there's not enough padding; OTOH, no harm in too much padding
header_b64, payload_b64, signature_b64 = [x + "==" for x in token_string.split(".")]
header = json.loads(base64.urlsafe_b64decode(header_b64))
payload = json.loads(base64.urlsafe_b64decode(payload_b64))
signature = base64.urlsafe_b64decode(signature_b64)
payload['exp'] = prettytime('exp')
if 'nbf' in payload:
payload['nbf'] = prettytime('nbf')
if 'iat' in payload:
payload['iat'] = prettytime('iat')
return header, payload, signature
except (AttributeError, TypeError, ValueError, json.JSONDecodeError) as err:
raise ValueError(f"invalid token: {err}") from err
|
ba34d0416e355821ac3f321f2ea729d7ff5460dd
| 60,034
|
def _get_reference_bodyreference(referencebody_element):
"""Parse ReferenceInput BodyReference element
"""
return referencebody_element.attrib.get(
'{http://www.w3.org/1999/xlink}href', '')
|
c0baeec99e3d9d4a54f17a721d00202e214defcc
| 60,038
|
import torch
def to_original_tensor(sorted_tensor, sorted_idx, sort_dim, device):
"""
Restore tensor to its original order.
This is used after applying pad_packed_sequence.
:param sorted_tensor: a sorted tensor
:param sorted_idx: sorted index of the sorted_tensor
:param sort_dim: the dimension of sorted_tensor where it is sorted
:device: calculation device
:return: the original unsorted tensor
"""
original_idx = torch.LongTensor(sorted(
range(sorted_idx.size(0)), key=lambda x: sorted_idx[x])).to(device)
tensor = sorted_tensor.index_select(dim=sort_dim, index=original_idx)
return tensor
|
80308e9ef98060ab72f90b27cd4949a103b136f6
| 60,040
|
def host_and_port(host_or_port):
"""
Return full hostname/IP + port, possible input formats are:
* host:port -> host:port
* : -> localhost:4200
* :port -> localhost:port
* host -> host:4200
"""
if ':' in host_or_port:
if len(host_or_port) == 1:
return 'localhost:4200'
elif host_or_port.startswith(':'):
return 'localhost' + host_or_port
return host_or_port
return host_or_port + ':4200'
|
f31df10657f0eba0a40ca3de5fd43006944501bb
| 60,046
|
def last(coll):
"""
Return the last item in ``coll``, in linear time.
"""
e = None
for item in coll:
e = item
return e
|
e4b7ba628bf7fe1cf68ed208dd14aa431aa08b6d
| 60,050
|
from typing import Dict
def _parse_schemata_file_row(line: str) -> Dict[str, str]:
"""Parse RDTAllocation.l3 and RDTAllocation.mb strings based on
https://github.com/torvalds/linux/blob/9cf6b756cdf2cd38b8b0dac2567f7c6daf5e79d5/arch/x86/kernel/cpu/resctrl/ctrlmondata.c#L254
and return dict mapping and domain id to its configuration (value).
Resource type (e.g. mb, l3) is dropped.
Eg.
mb:1=20;2=50 returns {'1':'20', '2':'50'}
mb:xxx=20mbs;2=50b returns {'1':'20mbs', '2':'50b'}
raises ValueError exception for inproper format or conflicting domains ids.
"""
RESOURCE_ID_SEPARATOR = ':'
DOMAIN_ID_SEPARATOR = ';'
VALUE_SEPARATOR = '='
domains = {}
# Ignore emtpy line.
if not line:
return {}
# Drop resource identifier prefix like ("mb:")
line = line[line.find(RESOURCE_ID_SEPARATOR) + 1:]
# Domains
domains_with_values = line.split(DOMAIN_ID_SEPARATOR)
for domain_with_value in domains_with_values:
if not domain_with_value:
raise ValueError('domain cannot be empty')
if VALUE_SEPARATOR not in domain_with_value:
raise ValueError('Value separator is missing "="!')
separator_position = domain_with_value.find(VALUE_SEPARATOR)
domain_id = domain_with_value[:separator_position]
if not domain_id:
raise ValueError('domain_id cannot be empty!')
value = domain_with_value[separator_position + 1:]
if not value:
raise ValueError('value cannot be empty!')
if domain_id in domains:
raise ValueError('Conflicting domain id found!')
domains[domain_id] = value
return domains
|
359672902330e30c8b188f6565b4242f8730ecea
| 60,053
|
def force_str_length(string, length):
"""Truncates and/or pads a string to be a specific length"""
string = str(string)
while len(string) < length:
string += " "
return string[0:length]
|
352560d6fb60e21968f556c96ba357ac84e60aa7
| 60,055
|
def value2str(value):
"""
format a parameter value to string to be inserted into a workflow
Parameters
----------
value: bool, int, float, list
Returns
-------
str
the string representation of the value
"""
if isinstance(value, bool):
strval = str(value).lower()
elif isinstance(value, list):
strval = ','.join(map(str, value))
elif value is None:
strval = value
else:
strval = str(value)
return strval
|
af94778f70b08f696007bcb42713b35d4d786525
| 60,056
|
def get_family_from(hmm_filepath):
"""Get family name from the name of an individual family hmm filename.
Args:
hmm_filepath: string. Of the form '~/family_name'.hmm, like
hmm_files/PF00001.21.hmm.
Returns:
string. Family name.
"""
hmm_filename = hmm_filepath.split('/')[-1]
return '.'.join(hmm_filename.split('.')[0:2])
|
93ae5f3f7da6d987daaedc45a869c8fc40c5eb3d
| 60,058
|
def run_queue_analysis(trace, threshold):
"""
Plot the queuing delay distribution and
severely delayed tasks.
:param trace: input Trace object
:type trace: :mod:`libs.utils.Trace`
:param threshold: plot transactions taken longer than threshold
:type threshold: int
"""
df = trace.data_frame.queue_df()
trace.analysis.binder_transaction.plot_samples(df, "delta_t",
"transaction samples",
"wait time (microseconds)")
trace.analysis.binder_transaction.plot_tasks(df, threshold, "__comm_x",
"delta_t", "tasks",
"wait time (microseconds)")
return df
|
aa61892ccf21a2fe4b962630d2c0f4401a3754ef
| 60,062
|
def read_file(filename):
"""
Returns content of file provided
:param filename: filename to read
:return: content of the file
"""
with open(filename) as f:
content = f.readlines()
f.close()
return ''.join([l.strip() for l in content])
|
fe34f14a9022c44d6da191cf1720ba8f0358c168
| 60,063
|
def fail_suffix(i):
"""Filename suffix for the ith failing file"""
if i == 0:
return '_fail'
else:
return '_fail_' + str(i+1)
|
4b4c5344bf510a2c167ced57c78e47ad79f1da7e
| 60,068
|
from typing import Tuple
def split_key(ivkey: str) -> Tuple[str, str]:
"""Split IV and key."""
iv = ivkey[:32]
key = ivkey[-64:]
return iv, key
|
eb5f02b8441aed55d330680450d619c0e1840693
| 60,069
|
def delete_user(connection, id):
"""Delete user for specific user id.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
id(string): User ID.
Returns:
HTTP response object returned by the MicroStrategy REST server.
"""
return connection.session.delete(url=f'{connection.base_url}/api/users/{id}')
|
7a4433c775a4cf233f436bd85c40978a17b75309
| 60,070
|
def ParseBool(value):
"""Parse a string representation into a True/False value."""
if value is None:
return None
value_lower = value.lower()
if value_lower in ('true', '1'):
return True
elif value_lower in ('false', '0'):
return False
else:
raise ValueError(value)
|
2bac384894e29aaaba4bee42207d89db11b1badf
| 60,073
|
import time
def get_time() -> float:
"""Return time in ms"""
return time.perf_counter() * 1000
|
e4a63e14b1107c5ac00279019bd1f1623a3a75cc
| 60,074
|
import torch
def pseudo_huber_loss(truth, pred, scale=2.0):
"""Pseudo-Huber loss used in StereoNet.
Described in 2019 Barron - A General and Adaptive Robust Loss Function.
"""
diff2 = ((pred - truth) / scale)**2
loss = torch.mean(torch.sqrt(diff2 + 1.0) - 1.0)
return loss
|
4fc533599d10925f270895df1c1e3b8a51a42ed7
| 60,080
|
def make_param_name_single_index(param_parts):
"""
Make the key name from param parts.
For example, ("param", "tag", "1") -> ("param", "1").
"""
return (param_parts[0], param_parts[-1])
|
e38d49c93b7cc0f1306839d9fbff4eefce808433
| 60,082
|
def parse_sample_value_map(arguments, samples, default, sample_map, type):
"""Combine arguments specified for a default value and sample-value map file.
Parameters
----------
arguments
Parsed arguments containing some the default value argument
and the optionally the sample-value map file argument.
samples : list
List of sample names
default : str
Name of argument with default value.
sample_map : str
Path of file containing tab-seperated per sample values.
type : type
Type of the specified values.
Returns
-------
sample_values : dict
Dict mapping samples to values.
"""
sample_value = dict()
assert hasattr(arguments, default)
# sample value map
if hasattr(arguments, sample_map):
path = getattr(arguments, sample_map)[0]
if path:
with open(path) as f:
for line in f.readlines():
sample, value = line.strip().split("\t")
sample_value[sample] = type(value)
# default value
default_value = getattr(arguments, default)[0]
for sample in samples:
if sample in sample_value:
pass
else:
sample_value[sample] = default_value
return sample_value
|
2e4639f27cc2b47164389dc2085c932792d24f58
| 60,086
|
def tile_bonds(bonds, reps, num_solvent_atoms):
"""
Like the Numpy tile function, tile the bonds array.
The Numpy tile function cannot be used because the
values in the bonds array need to be incremented by
the number of atoms in the solvent molecules.
bonds : Psi-OMM Bonds array
The bonds array for the solvent molecule.
reps : int
Number of repeats of the bonds array. For a simple
array, arr=[1,2], tile(arr, 3) would result in
[1,2,1,2,1,2].
num_solvent_atoms : int
Number of atoms in the solvent molecule. This value
is used to increment every atom index in the bonds array
(this is every value except bond orders).
Returns
-------
List of lists of lists in the form of the bonds array.
"""
ret_bonds0 = bonds[0].copy()
ret_bonds1 = bonds[1].copy()
for tile_ix in range(1, reps):
for b0 in bonds[0]:
working_bond = b0.copy()
# Increment the indices at working_bond[0] and [1] by
# the num_solvent_atoms. working_bond[2] is the bond
# order and should be left alone.
working_bond[0] += num_solvent_atoms * tile_ix
working_bond[1] += num_solvent_atoms * tile_ix
ret_bonds0.append(working_bond)
for b1 in bonds[1]:
working_bond = b1.copy()
# Increment every value in the list by num_solvent_atoms.
for ix in range(len(working_bond)):
working_bond[ix] += num_solvent_atoms * tile_ix
ret_bonds1.append(working_bond)
# Return the new tiled bond array in the form of a typical bonds
# array, (bonds0, bonds1)
return (ret_bonds0, ret_bonds1)
|
57251fd5f543a1b270422d214305596765289f1b
| 60,090
|
def strings_AND_bitwise(string1, string2):
"""Returns the bitwise AND of two equal length bit strings.
Parameters
----------
string1 : str
First string
string2 : str
Second string
Returns
-------
string_out : str
bitwise AND of the two input strings
"""
string_out = ''
if len(string1) != len(string2):
raise Exception('When taking the logical AND of two strings they must both have the same length')
for count in range(len(string1)):
i = (string1)[count]
j = (string2)[count]
k = '0'
if i == '0':
if j == '1':
k = '1'
if i == '1':
if j == '0':
k = '1'
string_out = string_out + k
return(string_out)
|
1fb83f15d0b086914667555b5e150e7697792f53
| 60,092
|
from pathlib import Path
import re
def get_decoder_id(decoder_dir):
"""
Get information about a learned decoder from its output directory name.
"""
decoder_dir = decoder_dir.name if isinstance(decoder_dir, Path) else decoder_dir
model, run, step, subject = re.findall("^([\w_]+)-(\d+)-(\d+)-([\w\d]+)$", decoder_dir)[0]
return model, int(run), int(step), subject
|
48ca1ac7cc5076618ba2c76308813a914e4f3bbd
| 60,096
|
from typing import Tuple
import itertools
def part1(values: Tuple[int, ...]) -> int:
""" Identify a value which is not the sum of the 25 preceding values, and return it """
for i in range(len(values) - 25):
target = values[i + 25]
for j, k in itertools.product(range(i, i + 25), range(i, i + 25)):
if j != k and values[j] + values[k] == target:
break
else:
return target
raise RuntimeError('Failed to find a value!')
|
280ec40df37b76e9957ef92280eaa577d47030ee
| 60,097
|
import re
def first_char_index(lines, regex):
"""Get the index of the first position for a description character
Use this to find where the first character after the : should be placed so
that each comment can line up.
:param lines: list of strings to find the index for
:param regex: Regex to identify the line to compare with
:returns: The index of the first position for description characters,
or None if no lines matched the given regex
"""
for line in lines:
if re.match(regex, line):
parts = line.split(":")
before = len(parts[1])
after = len(parts[1].strip())
return len(parts[0]) + 1 + (before - after)
return None
|
e31794f46f67f79008e761267b42c6766b5dd370
| 60,098
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.