content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
from typing import List
import base64
def ToBase64(array: List[int]) -> bytes:
"""Convert array of bytes to base64 string."""
return base64.standard_b64encode(bytes(array))
|
8b0aa490ff3d28f9c2cd7b33ed6b3e4a1cd13b4b
| 57,919
|
from typing import Optional
import re
import string
def get_dart_well_index(coordinate: Optional[str]) -> Optional[int]:
"""Determines a well index from a coordinate; otherwise returns None. Well indices are determined by evaluating the
row position, then column position. E.g. A04 -> 4, B04 -> 16.
Arguments:
coordinate {Optional[str]} -- The coordinate for which to determine the well index
Returns:
int -- the well index
"""
if not coordinate:
return None
regex = r"^([A-Z])(\d{1,2})$"
m = re.match(regex, coordinate)
# assumes a 96-well plate with A1 - H12 wells
if m is not None:
col_idx = int(m.group(2))
if 1 <= col_idx <= 12:
multiplier = string.ascii_lowercase.index(m.group(1).lower())
well_index = (multiplier * 12) + col_idx
if 1 <= well_index <= 96:
return well_index
return None
|
3faa1feb59e6fd5fdce0590283097706188564ac
| 57,932
|
def make_season(year):
"""
:param year string of year (e.g. 2012, 2013)
:return season valid string of season used by the API. \
(e.g. 2015-16, 2012-13)
"""
next_yr = str(year+1)[-2:]
return '{0}-{1}'.format(year, next_yr)
|
6da625b755dd925227b024aa1475ad7acadfcd47
| 57,934
|
def simulateAndPlot(r, **kwargs):
""" Simulate with r.simulate with given arguments and plot with tellurium.
:returns: simulation result
:rtype: NamedArray
"""
s = r.simulate(**kwargs)
r.plot(s)
return s
|
ec408e13abd34da5f9f6a7c0b0522cbd51fe0083
| 57,938
|
def add_http_parameters(url: str, params: dict) -> str:
"""
Adds HTTP parameters to url.
:param url: url address
:param params: http parameters
:return: url with added http parameters
"""
result: str = url + "?"
params_added: int = 0
for param in params:
result += param + "=" + params[param].replace(" ", "%20")
if params_added < len(params) - 1:
result += "&"
params_added += 1
return result
|
0918c263d764efe8b1fc668f73a70dc953f2ba90
| 57,946
|
import random
def shuffle(s):
"""Return a randomly shuffled version of string s."""
s = list(s)
random.shuffle(s)
return "".join(s)
|
aac3e9c79e193090f831f77599168df6a1d63a04
| 57,948
|
def sefl(c, f, f0):
"""
Semi-empirical normalized force limit.
Parameters
----------
c : scalar
Constant based on experience, typically around 1.5
f : scalar
Frequency of interest, typically lower end of band.
f0 : scalar
Fundamental frequency in direction of interest.
Returns
-------
nfl : scalar
The normalized force limit:
.. code-block:: none
nfl = c f <= f0
nfl = c / (f/f0) f > f0
Notes
-----
See reference [#fl1]_ for more information on force limiting.
References
----------
.. [#fl1] Scharton, T.D. (1997). 'Force Limited Vibration Testing
Monograph'. NASA. Reference Publication RP-1403, JPL.
See also
--------
:func:`ntfl`, :func:`ctdfs`, :func:`stdfs`.
Examples
--------
Compute force limit for a s/c attached to a launch vehicle, where
the interface acceleration specification level is 1.75 g, and:
- frequency of interest starts at 75 Hz
- fundamental axial mode of s/c is 40 Hz
>>> from pyyeti import frclim
>>> m = 6961 # s/c mass
>>> faf = 40 # fundamental axial frequency of s/c
>>> spec = 1.75
>>> m*spec*frclim.sefl(1.5, 75, faf)
9745.4
"""
if f <= f0:
return c
return f0 * c / f
|
3430641850aa7fc8a27d2fff3dd15b0ca2d7c35b
| 57,949
|
import math
def total_delay_seconds(media_info, args):
"""Computes the total seconds to skip (beginning + ending).
"""
start_delay_seconds = math.floor(media_info.duration_seconds * args.start_delay_percent / 100)
end_delay_seconds = math.floor(media_info.duration_seconds * args.end_delay_percent / 100)
delay = start_delay_seconds + end_delay_seconds
return delay
|
13a8590f84c820587e21a3e411adf7216fad8f60
| 57,950
|
def dict_slice(Dict, *keys):
"""Returns a shallow copy of the subset of a given dict (or a dict-like
object) with a given set of keys.
The return object is a dict.
No keys may be missing from Dict.
Example: if d = {'abc': 12, 'def': 7, 'ghi': 32, 'jkl': 98 }
then dict_slice(d, 'abc', 'ghi') will yield {'abc': 12, 'ghi': 32 }
"""
return dict([ (k, Dict[k]) for k in keys ])
|
4c5fc3a548b2e46853810487fe77655d00584761
| 57,953
|
def assertion_with_name(name):
"""Returns a control assertion by name."""
all_control_assertions = ["Confidentiality", "Integrity", "Availability",
"Security", "Privacy"]
return next(assertion for assertion in all_control_assertions
if assertion == name.title())
|
6e04cfc94904d6a9ccb818bb89ccde5d6d3ec9f2
| 57,954
|
from typing import Iterable
def all_indices(it: Iterable, item: object) -> Iterable[int]:
""" All indices of item in it """
return (i for i, x in enumerate(it) if x == item)
|
0e22987e91d5aaafd4fc0eae70949b1086fa3abe
| 57,957
|
from typing import List
def construct_service_principal_names(services: List[str], hostnames: List[str]) -> List[str]:
""" Given a list of services and hostnames, construct the kerberos server principle names for them. """
spns = []
for serv in services:
for hostname in hostnames:
spns.append(serv + '/' + hostname)
return spns
|
b431cb6e6c9db39f965b4614a80948cfc8f07875
| 57,961
|
def remove_padding(X,pad_value):
"""Convience function used to remove padding from inputs which have been
padded during batch generation"""
return X[X!=pad_value]
|
968de77558270bad4858db63374eb2c2e7a1148c
| 57,965
|
def html_movie_embed_wmp(moviefile, width=400, height=400):
"""Return HTML text for embedding a movie file
(Windows Media Player code)."""
text = """
<object id="MediaPlayer1" width="180" height="200"
classid="CLSID:22D6F312-B0F6-11D0-94AB-0080C74C7E95"
codebase="https://activex.microsoft.com/activex/controls/mplayer/en/nsmp2inf.cab#Version=5,1,52,701"
standby="Loading Microsoft Windows Media Player components..."
type="application/x-oleobject" align="middle">
<param name="FileName" value="%(moviefile)s">
<param name="ShowStatusBar" value="True">
<param name="DefaultFrame" value="mainFrame">
<param name="autostart" value="false">
<embed type="application/x-mplayer2"
pluginspage = "https://www.microsoft.com/Windows/MediaPlayer/"
src="%(moviefile)s"
autostart="false"
align="middle"
width="%(width)s"
height="%(height)s"
loop="100"
defaultframe="rightFrame"
showstatusbar="true">
</embed>
</object>
<!--
<a href="%(moviefile)s"><font size="2">Download movie file</font></a>
<a href="https://www.microsoft.com/windows/windowsmedia/mp10/default.aspx">
<font size="1">Download Windows Media Player</font></a></p>
-->
<!--
Attributes of the <embed> tag are:
src - tells what file to use.
autostart="true" - tells the computer to start the Video playing upon loading the page.
autostart="false" - tells the computer not to start the Video playing upon loading the page. You must click the start button to make the Video play.
align=middle - tells the computer to put the start/stop buttons to the middle.
width= and height= - are the dimensions of a small button panel that will appear when the page loads and contains both a START & STOP button so the visitor can start/stop the Video.
loop=2 - will play the Video for two complete loops.
-->
""" % vars()
return text
|
757a5ea70c81da9b071336cfeee5c38055f33645
| 57,972
|
def intersection(r1, r2):
"""
Helper method to obtain intersection of two rectangles
r1 = [x1,y1,w1,h1]
r2 = [x2,y2,w2,h2]
returns [x,y,w,h]
"""
assert len(r1) == 4 and len(r2) == 4, "Rectangles should be defined as [x,y,w,h]"
rOut = [0, 0, 0, 0]
rOut[0] = max(r1[0], r2[0])
rOut[1] = max(r1[1], r2[1])
rOut[2] = min(r1[0] + r1[2] - 1, r2[0] + r2[2] - 1) - rOut[0] + 1
rOut[3] = min(r1[1] + r1[3] - 1, r2[1] + r2[3] - 1) - rOut[1] + 1
if rOut[2] <= 0 or rOut[3] <= 0:
return None
return rOut
|
189513e88db7c3db1855afb11dd5dfdf3f649333
| 57,979
|
import torch
def detection_collate(batch):
"""
Collate data of different batch, it is because the boxes and gt_classes have changeable length.
This function will pad the boxes and gt_classes with zero.
Arguments:
batch -- list of tuple (im, boxes, gt_classes)
im_data -- tensor of shape (3, H, W)
boxes -- tensor of shape (N, 4)
gt_classes -- tensor of shape (N)
num_obj -- tensor of shape (1)
Returns:
tuple
1) tensor of shape (batch_size, 3, H, W)
2) tensor of shape (batch_size, N, 4)
3) tensor of shape (batch_size, N)
4) tensor of shape (batch_size, 1)
"""
# kind of hack, this will break down a list of tuple into
# individual list
bsize = len(batch)
im_data, boxes, gt_classes, num_obj = zip(*batch)
max_num_obj = max([x.item() for x in num_obj])
padded_boxes = torch.zeros((bsize, max_num_obj, 4))
padded_classes = torch.zeros((bsize, max_num_obj,))
for i in range(bsize):
padded_boxes[i, :num_obj[i], :] = boxes[i]
padded_classes[i, :num_obj[i]] = gt_classes[i]
return torch.stack(im_data, 0), padded_boxes, padded_classes, torch.stack(num_obj, 0)
|
de388ae800f84c2d8c95b16d0c30d4ad47c33406
| 57,982
|
def get_json_patch(object_kind: str, index: int, image_name: str):
"""
Gives different JSONPatches as a `dict`, depending on the `object_kind`.
The JSONPatch replaces the image at index `index` with
`image_name`.
"""
if object_kind == "Pod":
return {
"op": "replace",
"path": f"/spec/containers/{str(index)}/image",
"value": image_name,
}
elif object_kind == "CronJob":
return {
"op": "replace",
"path": (
f"/spec/jobTemplate/spec/template/spec/containers/"
f"{str(index)}/image"
),
"value": image_name,
}
else:
return {
"op": "replace",
"path": f"/spec/template/spec/containers/{str(index)}/image",
"value": image_name,
}
|
8f22b8211c158eb5ab8d0de7b94a33bb420c73c2
| 57,984
|
def combine_costs(df, func):
""" Given df: a dataset with multiple cost values for each rollout/search,
func: a function from series of costs to one value (perhaps e.g. "min"),
returns a dataset with only one cost for each rollout/search. """
return (
df.groupby(
[
col
for col in df.columns
# we need to exclude columns that may contain nan
if col not in ["cost", "start_cost", "best_cost", "optimality"]
]
)
.aggregate({"cost": func})
.reset_index()
)
|
96f739036358eb267777f7ead2283a86e1527499
| 57,989
|
def unique_timestamps(timestamps):
"""
Remove duplicate timestamps.
"""
return list(set(timestamps))
|
d2a9ede6214370fc1a123feee85246987df46d64
| 57,990
|
def arr_to_dict(arr):
"""
takes in an numpy array or list of lists (tuple of tuples) and returns a dictionary with indices, values
Example
arr_to_dict([['a','b'],['c','#']]) == {(0, 0): 'a', (0, 1): 'b', (1, 0): 'c', (1, 1): '#'}
"""
d = {}
if isinstance(arr, str):
print('only works with list of lists or np.ndarray. Use grid_to_dict if input is a string grid ')
return
for i in range(len(arr)):
for j in range(len(arr[0])):
d[(i,j)] = arr[i][j]
return d
|
a351617d9bd46b748ac7376effc4576f0ba3a4b2
| 57,993
|
import re
def get_num(name):
"""
If a test case name contains a number, just return the number.
Useful for sorting.
"""
result = re.search(r"\d+", name)
if result:
return int(result.group(0))
else:
return 0
|
0e96fcf549ae7e3a4b3bae804dfc416864a6eb53
| 57,994
|
def standardize_template_names(t):
"""Standardize names of templates for matching purposes."""
return t.strip().lower().replace(" ", "_")
|
40e2eb298f9968bd5fd6e2c5a38ace0ab2d6d945
| 57,997
|
import ast
def _is_celery_dict_task_definition(dict_: ast.Dict) -> bool:
"""
determine whether the Dict is a Celery task definition
"""
celery_task_dict_target_keys = {"task", "schedule"}
# We are looking for the `task` and `schedule` keys that all celery tasks
# configured via a Dict have
if len(dict_.keys) >= 2:
for key in dict_.keys:
if isinstance(key, ast.Str):
if key.s in celery_task_dict_target_keys:
celery_task_dict_target_keys.remove(key.s)
if not celery_task_dict_target_keys:
return True
return len(celery_task_dict_target_keys) == 0
|
7e4051f22d6bb6b6317adee21e3906642f8a3cbd
| 58,000
|
def getIndex(header, item):
"""Get index of variable
"""
for i, elem in enumerate(header):
if elem.lower() == item.lower():
return i
return None
|
c7ca274add641cb6efea4d507448eb78e1eec12b
| 58,003
|
def redact_config_dict(data):
"""
This function redacts secret values in the configuration dict.
This prevents that logging prints plain api keys.
"""
for key, value in data.items():
# Redact the secret values
if key == "api_key" and data[key]:
data[key] = "<REDACTED>"
# Check if there are still more dicts to loop over
if isinstance(value, dict):
redact_config_dict(value)
return data
|
2543ab7ffac77c5f85b43abf5df1f46cdd9a85e8
| 58,005
|
def parse_line(line):
"""
parse the integers from:
#1196 @ 349,741: 17x17
"""
coords, dimensions = line.split(' @ ')[1].split(': ')
coords = coords.split(',')
x, y = int(coords[0]) + 1, 0 - (int(coords[1]) + 1)
dimensions = dimensions.split('x')
x_length, y_length = int(dimensions[0]), int(dimensions[1])
return [x, y, x_length, y_length]
|
03b66653e6a17c0693a073561a968a6f114c53ff
| 58,006
|
import json
def _error_message(msg=None):
"""
Helper function to convert error messages into the JSON format.
"""
return json.dumps({"error": msg})
|
850693ae9ae2629a208eb9bc0ca7ba1cbf808918
| 58,010
|
def partial(func, *pargs, **pkwargs):
"""
Performs partial application on `func`
:param func: function to apply
:param pargs: default positional parameters
:param pkwargs: default keyword parameters
:return: Proxy functor
"""
def comp(*cargs, **ckwargs):
# Concatenate positional parameters
args = pargs + cargs
# Merge kwargs
kwargs = pkwargs.copy()
kwargs.update(ckwargs)
# Call function
return func(*args, **kwargs)
return comp
|
d54f23d7389fa32aac2bd2a8255fae9b03f475fe
| 58,016
|
def obsfateverb(successorset, markers):
""" Return the verb summarizing the successorset and potentially using
information from the markers
"""
if not successorset:
verb = 'pruned'
elif len(successorset) == 1:
verb = 'rewritten'
else:
verb = 'split'
return verb
|
fa0fc183e829d90f11a060562dcf07d373727744
| 58,018
|
def _is_parent_pathway(shortest_path, targets):
"""Returns true if ShortestPath is a parent pathway, false if not.
A ShortestPath object is the parent of a branch if its terminal residue is the
only surface exposed residue in the path. For example, if targets=[A,B,C] and
the pathway is [H,I,C], then this pathway is a parent pathway. In contrast, if
the pathway is [H,B,A], then this pathway is not a parent pathway.
Parameters
----------
shortest_path: ShortestPath
ShortestPath object
targets: list of str
List of surface exposed residues
Returns
-------
bool
True if path is ShortestPath object is a parent pathway
"""
count = 0
for res in shortest_path.path:
if res in targets:
count += 1
return count == 1
|
87543303c2957e67303b348597f66ef5a02366ae
| 58,021
|
def get_ratings_tuple(entry):
""" Parse a line in the ratings dataset
Args:
entry (str): a line in the ratings dataset in the form of UserID::MovieID::Rating::Timestamp
Returns:
tuple: (UserID, MovieID, Rating)
"""
items = entry.split('::')
return int(items[0]), int(items[1]), float(items[2])
|
0d55ec513ff83bd8026e2a43201b140e5ec4e6fc
| 58,022
|
import io
import tokenize
def _decode_source(source_bytes):
"""
Decode bytes representing source code and return the string. Universal newline support is used in the decoding.
Based on CPython's implementation of the same functionality:
https://github.com/python/cpython/blob/3.9/Lib/importlib/_bootstrap_external.py#L679-L688
"""
# Local imports to avoid bootstrap issues
# NOTE: both modules are listed in compat.PY3_BASE_MODULES and collected into base_library.zip.
source_bytes_readline = io.BytesIO(source_bytes).readline
encoding = tokenize.detect_encoding(source_bytes_readline)
newline_decoder = io.IncrementalNewlineDecoder(decoder=None, translate=True)
return newline_decoder.decode(source_bytes.decode(encoding[0]))
|
1bd6efc154cfb6bf384d0f70aff68ff2cb5a21e9
| 58,029
|
import math
def scanette_status(intval, floatval):
"""Convert Event status value into Scanette status/result field.
Scanette result values are sometimes int, sometimes float, sometimes "?".
"""
if intval == floatval:
return intval
if math.isnan(floatval):
return "?"
return floatval
|
59018b3f1198d2a7d45b8909f3f82db083b50690
| 58,031
|
def determine_axes(f, *vars):
"""
Determine the axes along which the FT should be performed.
"""
if len(vars) != len(f.shape):
raise TypeError('The number of variables has to match the dimension of '
'`f`. Use `None` for axis with respect to which no '
'transform should be performed.')
return [i for i, var in enumerate(vars) if var is not None]
|
324605cebf0550824eacb048e6ea6e94d5c293a6
| 58,033
|
def to_lower(string):
"""The lower case version of a string"""
return string.lower()
|
d985909da66a186c2af27e04245938a9903dfd89
| 58,036
|
def _GetValueAndIndexForMin(data_array):
"""Helper function to get both min and argmin of a given array.
Args:
data_array: (list) array with the values.
Returns:
(tuple) containing min(data_array) and argmin(data_array).
"""
value_min = min(data_array)
ind_min = data_array.index(value_min)
return (value_min, ind_min)
|
3f5a67455ec86c16d5792e2f4e92925b7cfddbf5
| 58,038
|
def runs(runs: list) -> dict:
"""Generate Github runs like dict."""
return {"total_count": len(runs), "workflow_runs": runs}
|
f2c02988154764099bcb0c7b64601a5257c7c7e4
| 58,040
|
def _get_prop(properties: list, key: str):
"""
Get a property from list of properties.
:param properties: The list of properties to filter on
:param key: The property key
:return:
"""
result = list(filter(lambda prop: prop["name"] == key, properties))
if len(result) == 0:
raise ValueError(f"Failed to find {key} in property set")
return result[0]["value"]
|
91ff1f77843f4a857aa00fa8cb5038355816af83
| 58,042
|
def milliseconds_to_nanoseconds(value):
"""Convert from ms to ns (used for pg_stat* conversion to metrics with units in ns)"""
return value * 1000000
|
4c6d680aae3260304e9ed5865a197592b8b0bea3
| 58,047
|
def jcentury_to_jday(juliancentury: float) -> float:
"""Convert a Julian Century number to a Julian Day"""
return (juliancentury * 36525.0) + 2451545.0
|
552ab2ce6c66ffe55ba1d042bd23d0f6523529ac
| 58,049
|
import inspect
def get_func_parameter_names(func):
"""Gets the list of parameter names of a function."""
signature = inspect.signature(func)
return list(signature.parameters.keys())
|
1b67919a55db122f308e51070f1ef82a463df364
| 58,050
|
def parse_tests(input_file):
"""
Parses test commands from an input file.
Extracts one command per line.
"""
res = []
for l in open(input_file).readlines():
res.append(l.strip())
return res
|
23495b0fe2a280724f7fcfb4fd11e2d8b628e115
| 58,054
|
import torch
def calculate_gradient_penalty(D, real_samples, fake_samples):
"""Calculates the gradient penalty loss"""
# Random weight for interpolation between real and fake samples
eta = torch.rand((real_samples.size(0), 1, 1, 1), device=real_samples.device)
# Get random interpolation between real and fake samples
interpolates = (eta * real_samples + ((1 - eta) * fake_samples)).requires_grad_(True)
# calculate probability of interpolated examples
d_interpolates = D(interpolates)
# Get gradient w.r.t. interpolates
fake = torch.ones_like(d_interpolates, device=real_samples.device, requires_grad=False)
gradients = torch.autograd.grad(outputs=d_interpolates,
inputs=interpolates,
grad_outputs=fake,
create_graph=True,
retain_graph=True)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
|
ba790a4a6c8fc0088e5d0a5717e0e59c0436de87
| 58,058
|
def atleast_ndim(x, ndim):
"""Reshapes a tensor so that it has at least n dimensions."""
if x is None:
return None
return x.view(list(x.shape) + [1] * (ndim - x.ndim))
|
ee4e4b35806bfbfd54223962dd12dd2ad078881d
| 58,063
|
def auto_pad_width(
pad_width,
shape,
combine=None):
"""
Ensure pad_width value(s) to be consisting of integer.
Args:
pad_width (float|int|iterable[float|int]): Size of the padding to use.
This is useful for mitigating border effects.
If iterable, a value for each dim must be specified.
If not iterable, all dims will have the same value.
If int, it is interpreted as absolute size.
If float, it is interpreted as relative to corresponding dim size.
shape (iterable[int]): The shape to associate to `pad_width`.
combine (callable|None): The function for combining shape values.
If None, uses the corresponding dim from the shape.
Returns:
pad_width (int|tuple[tuple[int]]): The absolute `pad_width`.
If input `pad_width` is not iterable, result is not iterable.
See Also:
np.pad
Examples:
>>> shape = (10, 20, 30)
>>> auto_pad_width(0.1, shape)
((1, 1), (2, 2), (3, 3))
>>> auto_pad_width(0.1, shape, max)
((3, 3), (3, 3), (3, 3))
>>> shape = (10, 20, 30)
>>> auto_pad_width(((0.1, 0.5),), shape)
((1, 5), (2, 10), (3, 15))
>>> auto_pad_width(((2, 3),), shape)
((2, 3), (2, 3), (2, 3))
>>> auto_pad_width(((2, 3), (1, 2)), shape)
Traceback (most recent call last):
....
AssertionError
>>> auto_pad_width(((0.1, 0.2),), shape, min)
((1, 2), (1, 2), (1, 2))
>>> auto_pad_width(((0.1, 0.2),), shape, max)
((3, 6), (3, 6), (3, 6))
"""
def float_to_int(val, scale):
return int(val * scale) if isinstance(val, float) else val
try:
iter(pad_width)
except TypeError:
pad_width = ((pad_width,) * 2,)
finally:
combined = combine(shape) if combine else None
pad_width = list(
pad_width if len(pad_width) > 1 else pad_width * len(shape))
assert (len(pad_width) == len(shape))
for i, (item, dim) in enumerate(zip(pad_width, shape)):
lower, upper = item
pad_width[i] = (
float_to_int(lower, dim if not combine else combined),
float_to_int(upper, dim if not combine else combined))
pad_width = tuple(pad_width)
return pad_width
|
8c78aab7cfd30dadb1323e00da92764fe862619d
| 58,074
|
from typing import Dict
def recursive_merge(a: Dict, b: Dict, path=None) -> Dict:
""" Recursively merges dictionaries
Mostly taken from user `andrew cooke` on [stackoverflow](https://stackoverflow.com/a/7205107)
"""
path = path or []
out = a.copy()
for k in b:
if k in a:
if isinstance(a[k], dict) and isinstance(b[k], dict):
out[k] = recursive_merge(a[k], b[k], path + [str(k)])
else:
out[k] = b[k]
else:
out[k] = b[k]
return out
|
1509ecc5ea89bfbcdd5b9126cf968d7fc9960aa5
| 58,079
|
def strip_and_split(s):
"""strip trailing \x00 and split on \x00
Useful for parsing output of git commands with -z flag.
"""
return s.strip("\x00").split("\x00")
|
606228de60d0038aa7a73e843b744fddb2ef46ee
| 58,082
|
import hashlib
def get_file_hash(path: str) -> str:
"""Return the hash used for caching.
This incorporates the file contents as well as its path.
"""
md5 = hashlib.md5()
with open(path, 'rb') as infile:
md5.update(infile.read())
md5.update(path.encode())
return md5.hexdigest()
|
9e833340629461429a98142193bc18b46b262a64
| 58,093
|
from collections import defaultdict
from fnmatch import fnmatch
def groupkeys(keys, patterns):
"""Groups the given set of keys using the given patterns.
It runs through the patterns sequentially, removing those from keys.
Returns a dict with {pattern: [matching keys]}.
Unmatches keys are added with None as the key."""
ret = defaultdict(list)
for k in keys:
matched = 0
for p in patterns:
if fnmatch(k, p):
ret[p].append(k)
matched = 1
break
if not matched:
ret[None].append(k)
return dict(**ret)
|
407a289b8de1f5b1007155425213e66bac37744c
| 58,096
|
def main(args=None):
"""Sample main app."""
return 0
|
2ec12436587febf686c56ffde33ad7894fb76c71
| 58,097
|
def before_send_to_sentry(event, hint):
"""Edit event properties before logging to Sentry
Docs: https://docs.sentry.io/error-reporting/configuration/filtering/?platform=python#before-send
"""
# Report a general logger name (`ingest_pipeline`) to Sentry,
# rather than the function-specific logger name (e.g. `__main___errors`)
# used internally within Ingest Pipeline.
event["logger"] = "ingest_pipeline"
return event
|
59c5d0c90de2f1c8a42899ddb941dbce7d63c45b
| 58,099
|
def set(list, index, item):
"""
Return a list with the item at the specified index replaced with the
given item. If the index is out of bounds, return the list unmodified.
"""
if list == () or index < 0:
return list
else:
head, tail = list
if index == 0:
return (item, tail)
else:
return (head, set(tail, index - 1, item))
|
3da2c73ee26738ec686d9a7a5674bd279703f0ac
| 58,101
|
import json
def ConvertToHtmlString(trace_result):
"""Convert a trace result to the format to be output into HTML.
If the trace result is a dictionary or list, JSON-encode it.
If the trace result is a string, leave it unchanged.
"""
if isinstance(trace_result, dict) or isinstance(trace_result, list):
return json.dumps(trace_result)
elif isinstance(trace_result, str):
return trace_result
else:
raise ValueError('Invalid trace result format for HTML output')
|
178fb50394c717e9805b8dfc6c566d41536f0bc0
| 58,104
|
from typing import Any
import numbers
def is_number(thing: Any) -> bool:
"""
A function that returns whether the given value is a number, either integer or floating
point. Note that this will return ``False`` for the value, ``True``, which is different
from normal Python.
:param thing: the value to check.
:return: ``True`` if the value is a number.
"""
return isinstance(thing, numbers.Number) and not isinstance(thing, bool)
|
8e278025153839093afcc6fd3a4c7472f689fe9f
| 58,107
|
from typing import List
def smooth_data(scalars: List[float], weight: float) -> List[float]:
"""Tensorboard smoothing function to smooth noisy training data
:param scalars: data points to smooth
:type scalars: List[float]
:param weight: Exponential Moving Average weight in 0-1
:type weight: float
:return: smoothed data points
:rtype: List[float]
"""
assert weight >= 0 and weight <= 1
last = scalars[0] # First value in the plot (first timestep)
smoothed = list()
for point in scalars:
smoothed_val = last * weight + (1 - weight) * point
smoothed.append(smoothed_val)
last = smoothed_val
return smoothed
|
e24f73f9464dd47474ddd20423930687822d2356
| 58,110
|
def clean_col_names(df):
"""
cleans up the column names in the mta dataframe
:param df: mta dataframe
:return: mta dataframe with cleaned columns
"""
df = df.copy()
before = 'EXITS '
df.rename(columns={before: 'EXITS', "C/A": "CA"}, inplace=True)
return df
|
62ee58e79095823c69d18a81bb7845bb6224d1ae
| 58,113
|
def decode2string(index2token, indices, end_token="END_TOKEN", remove_END_TOKEN=False):
"""
Decode a list of indices to string.
Args:
index2token: a dictionary that maps indices to tokens
indices: a list of indices that correspond to tokens
remove_END_TOKEN: boolean indicating whether to remove the "END_TOKEN" (optional)
Returns:
the decoded string
"""
decoded = [index2token[index] for index in indices]
while True:
if remove_END_TOKEN == True and decoded != []:
if decoded[-1] == end_token:
del decoded[-1]
else:
break
else:
break
return (' ').join(decoded)
|
5eaef4a1f177b840f5b97b25aee2e9c4a4190aaf
| 58,114
|
def slqs(x_entr, y_entr):
"""
Computes SLQS score from two entropy values.
:param x_entr: entropy value of x
:param y_entr: entropy value of y
:return: SLQS score
"""
score = 1 - (x_entr/y_entr) if y_entr != 0.0 else -1.0
return score
|
049b0a0c61a6119d2dd444e7b9489907b3e0ea03
| 58,121
|
def num_to_data_len(num):
"""
Convert a permutation number to the length of data it represents
"""
# Strip off the leading 1 that was added to make the math work
data = int(bin(num)[3:], 2)
# Convert from the integer form back to a string
data = hex(data)[2:]
# Return the length of the data
return len(data) // 2
|
4b9ebe2bd3dd8b37e04df571c5131ea0cb2b31ea
| 58,127
|
def normalize_labels(labels):
""" Normalize labels to probabilities"""
s = sum(labels.values())
normalized = {}
for k, v in labels.iteritems():
if s > 0:
normalized[k] = v / s
else:
normalized[k] = v
return normalized
|
99ea3918566ca2343c4cbf87e5455e2ec207e21c
| 58,131
|
def partition(f, xs):
"""
Works similar to filter, except it returns a two-item tuple where the
first item is the sequence of items that passed the filter and the
second is a sequence of items that didn't pass the filter
"""
t = type(xs)
true = filter(f, xs)
false = [x for x in xs if x not in true]
return t(true), t(false)
|
6fc80db7a355db801bc6d58c8bb943d67663197f
| 58,132
|
def _vpd(es, ea):
"""Vapor pressure deficit
Parameters
----------
es : ee.Image or ee.Number
Saturated vapor pressure [kPa].
ea : ee.Image or ee.Number
Actual vapor pressure [kPa].
Returns
-------
ee.Image or ee.Number
Vapor pressure deficit [kPa].
"""
return es.subtract(ea).max(0)
|
c8942af990fc105886389cbbadedd9ae664d5a87
| 58,140
|
def has_marker(obj, marker_name):
"""Determine whether a task function has a certain marker."""
return any(marker.name == marker_name for marker in getattr(obj, "pytaskmark", []))
|
fc25197996e7f04108dd1b0268a9851d1d09f182
| 58,141
|
def kib(num):
"""Return number of bytes for num KiB"""
return num * 1024
|
fc947b4315e746b54b92c99731520449f6cf2096
| 58,142
|
import gzip
def opengz(file, mode="r"):
"""
Transparently open a gzipped or non-gzipped file for reading or writing.
:param file: Path of file to open for writing.
:param mode: Same mode options as python's default open.
:return: A open file handle.
"""
assert mode in ['r', 'rb', 'a', 'ab', 'w', 'wb']
if mode == 'wb' or (mode == 'w' and file.endswith('.gz')):
return gzip.open(file, 'wb')
elif mode == 'ab' or (mode == 'a' and file.endswith('.gz')):
return gzip.open(file, 'ab')
elif mode == 'w':
return open(file, 'w')
f = open(file, 'rb')
if f.read(2) == '\x1f\x8b':
f.seek(0)
return gzip.GzipFile(fileobj=f, mode=mode)
else:
f.close()
return open(file, mode)
|
941946d53289094e6f24a8e92ad7441550f5ffc9
| 58,144
|
import cmath
import math
def qft(mem):
"""Apply quantum Fourier transform to the first t qubits."""
new_amplitudes = []
N = 2**mem.t
# Calculate root of unity in two steps, as complex exponentiation is
# expensive.
w__ = cmath.exp(2 * math.pi * 1j / N)
for k, _ in enumerate(mem):
s = 0
for j in range(N):
wjk = w__**(j * k)
s += wjk * mem.amplitudes[j]
new_amplitudes.append(s / math.sqrt(N))
mem.amplitudes = new_amplitudes
return mem
|
9fea18d2b60c154e675be94f94d78b2a149fedc5
| 58,147
|
import torch
def get_pair_dist(a, b):
"""calculate pair distances between two sets of points
Parameters
----------
a,b : pytorch tensors of shape [batch,nres,3]
store Cartesian coordinates of two sets of atoms
Returns
-------
dist : pytorch tensor of shape [batch,nres,nres]
stores paitwise distances between atoms in a and b
"""
dist = torch.cdist(a, b, p=2)
return dist
|
e583d3f5feb9c3445d79b2a7ddd2b9624a123fd2
| 58,150
|
def isolatedfile_to_state(filename):
"""For a '.isolate' file, returns the path to the saved '.state' file."""
return filename + '.state'
|
8cd2456f2ec167ae1462c3b85fda2dfd52408dd7
| 58,151
|
def convert_numeric_in(value):
"""
Convert any Decimal values into floats.
"""
return float(value)
|
5d1e207d90e2e457035d9a5ecfa63430a971932f
| 58,154
|
def sigmoid_poly(x):
"""A Chebyshev polynomial approximation of the sigmoid function."""
w0 = 0.5
w1 = 0.2159198015
w3 = -0.0082176259
w5 = 0.0001825597
w7 = -0.0000018848
w9 = 0.0000000072
x1 = x
x2 = (x1 * x)
x3 = (x2 * x)
x5 = (x2 * x3)
x7 = (x2 * x5)
x9 = (x2 * x7)
y1 = w1 * x1
y3 = w3 * x3
y5 = w5 * x5
y7 = w7 * x7
y9 = w9 * x9
z = y9 + y7 + y5 + y3 + y1 + w0
return z
|
b5fef513e9722fea4693cfeac58c37e8a21ffa43
| 58,157
|
def flatten_pipes_dict(pipes_dict : dict) -> list:
"""
Convert the standard pipes dictionary into a list
"""
pipes_list = []
for ck in pipes_dict.values():
for mk in ck.values():
pipes_list += list(mk.values())
return pipes_list
|
efb4b97855b8968961fa067b51b5245d8e54cb5f
| 58,164
|
import importlib
def get_backend(module, backend_class, channel, appname):
"""Get a pubsub object. e.g.: `get_backend('backends', 'PubNubBackend')`"""
mod = importlib.import_module(module)
return getattr(mod, backend_class)(channel, appname)
|
fb46a49ee0e438580d3e7f051c034f38edb644fa
| 58,165
|
def format_string(code: str, pattern: str) -> str:
"""Format a single pattern line."""
fmt = ' ' * 8 + "'{}': '{}'"
code_len = len(code)
pattern_len = len(pattern)
if pattern_len > 63 - code_len:
index = pattern_len // 2
result = fmt.format(code, pattern[:index]) + '\n'
result += ' ' * (code_len + 12) + "'{}',\n".format(pattern[index:])
else:
result = fmt.format(code, pattern) + ',\n'
return result
|
5ad8db799f80a0f8f99ccef7817350752b1eb1d7
| 58,169
|
def format_header_for_list_commands(base_header: str, rows_count: int,
page: int, page_size: int, limit: int) -> str:
"""
Retrieve the header of the readable output for list commands.
Format the header according to the pagination use case:
'Manual Pagination' or 'Automatic Pagination'.
Args:
base_header (str): The header prefix.
rows_count (int): The number of rows in the output.
page (int): Client's page number argument.
page_size (int): number of records per page.
limit (int): Client's limit argument.
Returns:
Dict[str, Any]: Header for readable output of the command.
"""
if page_size:
total_pages = rows_count // page_size + (rows_count % page_size != 0)
if rows_count > 0:
base_header += f' \nShowing page {page} out of {total_pages} total pages.' \
f' Current page size: {page_size}.'
else:
base_header += f' \nShowing 0 to {limit} records out of {rows_count}.'
return base_header
|
f77f1c1eebe2414892c82912441c9b64c59dd81d
| 58,171
|
def Invoke(frontend, implementation):
"""Applies implementation on frontend."""
return implementation(frontend)
|
ed20b384c653d7c6d225afab680dd9c537c15c28
| 58,173
|
import re
def extract_variable(filename, variable): # type: (str, str) -> str
"""Extract the version number from a python file that contains the '__version__' variable."""
with open(filename, "r", encoding="utf8") as stream:
search_refind = r'{} = ["\'](\d+\.\d+\.\d+)["\']'.format(variable)
verdata = re.search(search_refind, stream.read())
if verdata:
return verdata.group(1)
else:
raise RuntimeError("Unable to extract version number")
|
5f36c138e74e310f26078a99a3c9abf5647a2ed4
| 58,176
|
import binascii
def reverseHex(data):
"""
Flip byte order in the given data(hex string).
"""
b = bytearray(binascii.unhexlify(data))
b.reverse()
return binascii.hexlify(b)
|
f21484a10d279eb6394fbb680ce061792604cbfe
| 58,185
|
def does_period_contain_data(period: str) -> bool:
"""Does the given period contain data or simulation?
If the period is 6 characters long, then it's data.
Args:
period: Run period to be checked.
Returns:
True if the period contains data.
"""
if len(period) == 6:
return True
return False
|
189fd7bd8f2833c1675acd04fea1cf43dfe9745b
| 58,188
|
def generate_pascal_row(row):
"""Generate the successive row in Pascal's triangle.
While there are many iterative approaches, we can zip together offsets of
the given row to generate pairs of elements, which we sum to form the final
data structure.
For example, if row = [1, 4, 6, 4, 1], then
[0] + row # => [0, 1, 4, 6, 4, 1]
row + [0] # => [1, 4, 6, 4, 1, 0]
Adding together corresponding entries with zip and a list comprehension gives
[1, 5, 10, 10, 5, 1]
Just like we wanted!
"""
if not row: return [1]
return [left + right for left, right in zip([0] + row, row + [0])]
|
e823db24a54e9cce3f8c23462f6005c644ac6334
| 58,189
|
def decaying(start, stop, decay):
"""Yield an infinite series of linearly decaying values."""
def clip(value):
return max(value, stop) if (start > stop) else min(value, stop)
nr_upd = 1.
while True:
yield clip(start * 1./(1. + decay * nr_upd))
nr_upd += 1
|
c386ab1a3b72f7e2b6290c610a5a867e15cd0f55
| 58,196
|
def str2dict(dict_string):
"""
Convert a string of key=value pairs to a dictionary.
Format is 'KEY=value KEY=other value KEY=and some more values'
For example: str2dict('key1 = 1 key2 = a b c key3=23.4) returns the dictionary
{'key1':'1' , 'key2':'a b c', 'key3':'23.4'}
"""
string_bits = dict_string.split('=')
keys = [string_bits[0].strip()]
values = []
for bits in string_bits[1:-1]:
pieces = bits.strip().rsplit(' ', 1)
if len(pieces) == 1:
key = pieces[0]
value = 'NONE'
else:
key = pieces[1]
value = pieces[0]
keys.append(key)
values.append(value)
values.append(string_bits[-1])
return dict(list(zip(keys, values)))
|
372b0e9610c80187c2ad534d2c797fc0341eb3ee
| 58,197
|
from typing import List
def _sampling_from_alias(
alias: List[int],
probs: List[float],
first_random: float,
second_random: float,
) -> int:
"""
This is aligned with the original node2vec implementation w/ 2 random numbers.
:param alias: the pre-calculated alias list
:param probs: the pre-calculated probs list
:param first_random: 1st random floating point number in the range [0.0, 1.0)
:param second_random: 2nd random floating point number in the range [0.0, 1.0)
Return the picked index in the neighbor list as next vertex in the random walk path.
"""
pick = int(first_random * len(alias))
if second_random < probs[pick]:
return pick
else:
return alias[pick]
|
1e67f5f3b7cff7c1e99fd05e31e7f17774862890
| 58,198
|
def is_csv_callback(query):
"""
Callback to identify if the button pressed was from the csv function
:param query: the button pressed
:return: if the button pressed relates to the csv
"""
return "," in query.data
|
bd1da6ee18097f60c71d09436330cc1cbea4f4db
| 58,199
|
from typing import Dict
from typing import Any
def suffix_dict_keys(in_dict: Dict[str, Any], suffix: str) -> Dict[str, Any]:
"""Adds the given suffix to all dictionary keys."""
return {key + suffix: value for key, value in in_dict.items()}
|
e16ebf829c4f5cf8421014f44a806421a04d5a86
| 58,205
|
def tag(*tags):
"""Decorator to add tags to a test class or method."""
def decorator(obj):
setattr(obj, 'tags', set(tags))
return obj
return decorator
|
3c1a1dc79cbe980450aac38d2369e3ac8ea15e53
| 58,206
|
def _sub_space(m):
"""Helper function: given a regexp match, return a string of
spaces that's the same length as the matched string."""
return ' '*(m.end()-m.start())
|
a2e48e6588e9a6eff5880c29a362922942591637
| 58,207
|
import math
def humanise_bytes(num_bytes, si=False):
"""
Make a human-readable string for a number of bytes
>>> humanise_bytes(689275)
'673.1 KB'
Taken from https://programming.guide/worlds-most-copied-so-snippet.html
:param int num_bytes:
:param int si: Whether to use SI units. Defaults to False.
"""
unit = 1000 if si else 1024
abs_bytes = abs(num_bytes)
if abs_bytes < unit:
return "{} B".format(num_bytes)
exp = int(math.log(abs_bytes) / math.log(unit))
thresh = int(math.pow(unit, exp) * (unit - 0.05))
if exp < 6 and abs_bytes >= thresh - (52 if (thresh & 0xfff) == 0xd00 else 0):
exp += 1
pre = ("kMGTPE" if si else "KMGTPE")[exp - 1] + ("" if si else "i")
if exp > 4:
num_bytes /= unit
exp -= 1
return "{:.1f} {}B".format(num_bytes / math.pow(unit, exp), pre)
|
9c8eb4aeda64b46ab04710a621d32a0a4f505a73
| 58,210
|
def get_name(node_name):
"""
Omit any parenting information from the provided node name.
:param str node_name:
:return: Name
:rtype: str
"""
return node_name.rsplit("|", 1)[-1]
|
dad13b8544110b96f6b1da7ee6a3bfcf71b83a89
| 58,212
|
def group(genes, entities):
"""
create lists for each gene relations
:param genes:
:param entities:
:return:
"""
genes_relations = []
for gen in genes:
genes_relations.append(list(filter(lambda x: x[0] == gen, entities)))
return genes_relations
|
1fd76114528930d218325228163341a1d345c689
| 58,218
|
def _GetElemByName(name, from_list):
"""Gets an element from the given list by its name field.
Raises an error if it doesn't find exactly one match.
"""
elems = [e for e in from_list if e.name == name]
if len(elems) != 1:
raise ValueError('Expected 1 item named %s, found %d' % (name, len(elems)))
return elems[0]
|
71a5dcdea6cd3f5f6f32accb7939a9b2fff38c73
| 58,220
|
def get_box_points(box):
"""
Takes in box points of form ((x1,y1), (x2, y2)) and converts it to form
[x1, y1, x2, y2].
"""
box_points = []
x1, y1 = box[0]
x2, y2 = box[1]
box_points.append(x1)
box_points.append(y1)
box_points.append(x2)
box_points.append(y2)
return box_points
|
aad2aad17b3c767f0ac6f7dcce61fb4e6370fbbe
| 58,223
|
def expand_leaf_to_label(leaf_label):
"""
Expand a leaf node label into flat structure.
"""
labels = []
parts = list(filter(None, leaf_label.split('/')))
for i, _ in enumerate(parts):
labels.append('/' + '/'.join(parts[0:i+1]))
return labels
|
e218cb50ec34f202b36db8cc6693bd45dd37ed4f
| 58,226
|
from pathlib import Path
from typing import List
def _list_files(d: Path) -> List[Path]:
""" Recursively list files in a dir and its sub dirs """
if d.is_file():
return [d]
else:
sub_dirs = [d for d in d.iterdir() if d.is_dir()]
files = [f for f in d.iterdir() if f.is_file()]
for d in sub_dirs:
files += _list_files(d)
return files
|
e684d81ab49b2b06b257d25e5b304b6e696002d0
| 58,231
|
def list_all_subclasses(superclass):
"""
Get a list of all subclasses defined for a given superclass
"""
# sorted for determinism
return sorted(
# any class object in python has a meta-function for its subclasses
[cls for cls in superclass.__subclasses__()],
# sort based on class name
key=lambda c: c.__name__
)
|
848db1ee8808d5f86f85eb2881b72859778a8185
| 58,235
|
def test_even(value):
"""Return true if the variable is even."""
return value % 2 == 0
|
961964cb2801db3d6e03f4af718c6b8a85129328
| 58,236
|
def bool2yn(b):
"""Helper function, returns \"yes\" if *b* is True, \"no\" if *b* is False."""
return ["no", "yes"][b]
|
038933006d39ad3c5c96b023968860747c36cbd3
| 58,240
|
def intervals_frac_overlap(iv1, iv2):
"""
Given two intervals, calculates the
fraction of iv1 covered by iv2
"""
return iv1.overlap_size(iv2.begin, iv2.end) / iv1.length()
|
747eaacdf41cb4e9994973ed13e2cd79164cd717
| 58,243
|
def xy_to_xyz(x, y):
"""Convert `xyY` to `xyz`."""
return [x / y, 1, (1 - x - y) / y]
|
2a9948887a6d58a0b8ea3100a0d4ad5966891891
| 58,246
|
import re
def parse_size_to_KB(size_string: str) -> float:
"""Given a size string with various unit, convert
to KB in float:
wrk binary unit reference
https://github.com/wg/wrk/blob/master/src/units.c#L29-L33
Example:
"200.56KB" -> 200.56
"50MB" -> 51200
"0.5GB" -> 524288
"""
# Group 1 - (one or more digits + optional dot + one or more digits)
# 200.56 / 50 / 0.5
# Group 2 - (All words)
# KB / MB / GB
parsed = re.split(r"(\d+.?\d+)(\w*)", size_string)
values = [val for val in parsed if val]
if values[1] == "KB":
return float(values[0])
elif values[1] == "MB":
return float(values[0]) * 1024
elif values[1] == "GB":
return float(values[0]) * 1024 * 1024
# Bytes
return float(values[0]) / 1000
|
36a71208d67ea5c105fa4dd8f9ba1633ba732911
| 58,249
|
def get_file_text(path):
""" Returns file text by path"""
file_io = open(path, "r")
text = file_io.read()
file_io.close()
return text
|
e7d165b8b62c24b8a34ef375350a67be17b8e69a
| 58,256
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.