content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def _reset() -> str:
"""Internal function returning ANSI escape sequence to reset all colors and boldness"""
return '\033[0m'
|
e5bb60a83f104d920a1c3c5be85365eba82d6b7d
| 81,908
|
def create_tf(b, a):
"""
Creates a transfer function given vectors of values for the numerator, b[i], and
denominator, a[i],
b[0]*s**n + b[1]*s**(n-1) + ... + b[n]
f(s) = --------------------------------------
a[0]*s**m + a[1]*s**(m-1) + ... + a[m]
Arguments:
b = transfer function numerator coefficients
a = transfer function denominator coefficients
Return:
tf = transfer function
"""
def tf(s):
num = 0.0*s
den = 0.0*s
for i in range(b.shape[0]):
num += b[i]*s**(b.shape[0]-i-1)
for i in range(a.shape[0]):
den += a[i]*s**(a.shape[0]-i-1)
return num/den
return tf
|
b169f6e313f7bcd07bc19f3145b0f14a57791b1a
| 81,910
|
def get_playlist(target_playlist, playlists):
"""
Searches for a playlist named `target_playlist` in the list `playlists`.
:param target_playlist:
:param playlists:
:return:
"""
for playlist in playlists:
if playlist['name'] == target_playlist:
return playlist
|
1adf8c94e497519baede0d42476d1dfc92f4f1a9
| 81,911
|
import secrets
def _create_salt() -> str:
"""Create random salt."""
salt = secrets.token_hex(8)
return salt
|
0ef07248ad8815b722285e15d2d6a702609a1498
| 81,914
|
def uniq_vals_incommon(list1, list2):
"""find unique values in common between two lists"""
return list(set([x for x in list1 if x in list2]))
|
0dc53c8e4bcca14972e43f5ecff8d7d643ef9104
| 81,923
|
def _merge_jsonb_objects(column: str) -> str:
"""
This function returns SQL that merges the top-level keys of the
a JSONB column, taking the newest available non-null value.
"""
return f"""{column} = COALESCE(
jsonb_strip_nulls(old.{column})
|| jsonb_strip_nulls(EXCLUDED.{column}),
EXCLUDED.{column},
old.{column}
)"""
|
4d0f05d0d701217f7929721f6ef500fcb23d1d1b
| 81,924
|
def should_perform_aggr_query(hesabi_body):
"""
this function specifies if we should go for agg field from sources or not
:param hesabi_body:
:return:
"""
if "agg_field" in hesabi_body:
return True
return False
|
a7f4c326ff517ab670316435a7fd8669a920cb5e
| 81,935
|
import torch
def rotate_vec_by_axisangle(vec, aa_vec):
"""
This function rotates a 3D vector @vec by the direction and angle represented by @aa_vec.
See https://stackoverflow.com/questions/32485772/how-do-axis-angle-rotation-vectors-work-and-how-do-they-compare-to-rotation-matr
for more information.
Args:
vec (tensor): (..., 3) where final dim represents (x, y, z) vectors
aa_vec (tensor): (..., 3) where final dim represents desired (ax, ay, az) axis-angle rotations
Returns:
tensor: (..., 3) where final dim is newly rotated (x, y, z) vectors
"""
# Extract angle and unit vector from axis-angle vectors
angle = torch.norm(aa_vec, dim=-1, keepdim=True)
aa_v = aa_vec / angle
# Map all NaNs to 0
aa_v[aa_v != aa_v] = 0.0
# # Record angles that are zero so we don't get nan's
# idx = torch.nonzero(angle.squeeze(dim=-1)).tolist()
# aa_v = torch.zeros_like(aa_vec)
# aa_v[idx] = aa_vec[idx] / angle[idx]
# Rotate the vector using the formula (see link above)
c, s = torch.cos(angle), torch.sin(angle)
vec_rot = vec * c + torch.cross(aa_v, vec) * s + \
aa_v * (torch.sum(aa_v * vec, dim=-1, keepdim=True)) * (1 - c)
return vec_rot
|
8ab749d7c26a94dfd9c0da80334c69e0e46702a2
| 81,938
|
import torch
def batch_pdist(X, Y, device=None):
""" Computes all the pairwise distances.
Parameters
----------
X : torch.tensor
shape [c, n, d]
Y : torch.tensor
shape [c, m, d] or [1, m, d]
Returns
-------
torch.tensor
output has shape [c, n, m]. The entry at i, j, k is the squared distance
between the d-dimensional vectors X[i, j] and Y[i, k] if Y.shape[0] is c
and Y[0, k] if Y.shape[0] is 1.
"""
if device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
c = X.shape[0]
n, m = X.shape[1], Y.shape[1]
X_norm2 = (X ** 2).sum(2)
Y_norm2 = (Y ** 2).sum(2)
X_dot_Y = torch.matmul(X, Y.transpose(1, 2))
return (
torch.matmul(X_norm2.unsqueeze(2), torch.ones((c, 1, m), device=device))
- 2 * X_dot_Y
+ torch.matmul(torch.ones((c, n, 1), device=device), Y_norm2.unsqueeze(1))
)
|
2d0801aa4abe6d2fe740d65f9f6f75645b6dde4f
| 81,942
|
def subset(d, keys=()):
""" Return a copy of dict over a subset of keys and values. """
return dict([(k, d[k]) for k in keys if k in d])
|
51e4190ca40d8b6cb924e6c30a720f0231ba85e8
| 81,945
|
def get_physnets_for_node(task):
"""Return the set of physical networks for a node.
Returns the set of physical networks associated with a node's ports. The
physical network None is excluded from the set.
:param task: a TaskManager instance
:returns: A set of physical networks.
"""
return set(port.physical_network
for port in task.ports
if port.physical_network is not None)
|
2e97b347661ab407b2609f142c937184e0fa36f1
| 81,946
|
def is_item_visible(item):
"""Returns true if the item is visible."""
for attr in ['is_deleted', 'is_archived', 'in_history', 'checked']:
if item[attr] == 1:
return False
return True
|
3ac9d18cb70b29ab6d6dc39916d89f0cdc2a035c
| 81,950
|
from datetime import datetime
import pytz
def _datetime_from_query(ts):
"""Converts the above arbitrary format back to a datetime object, complete
with a time zone. `ts` should be a string, straight out of the query.
Returns None if `ts` is missing or junk.
"""
if not ts:
return None
# Avoid using float() on the way back in, too
if u'.' in ts:
seconds, microseconds = ts.split('.', 1)
# Pad to six digits
microseconds = (microseconds + u'000000')[0:6]
else:
seconds = ts
microseconds = 0
try:
dt = datetime.fromtimestamp(int(seconds), pytz.utc)
dt = dt.replace(microsecond=int(microseconds))
return dt
except TypeError:
# Nothing reasonable to do, since this is supposed to be a value just
# for us. If someone has been dicking with it... ignore it
return None
|
c0bfb96973c2c009bd60b3a48e12a5c2d271bcaa
| 81,951
|
def PEM_split(cert_pem):
"""Split a certificate / certificate chain in PEM format into multiple
PEM certificates. This is useful for extracting the last / root PEM cert
in a chain for example. Will return a list of strings with each string
being an individual PEM certificate (including its '-----BEGIN CERTIFICATE...'
delineaters).
"""
# Split cert based on begin certificate sections, then reconstruct as an
# array of individual cert strings.
delineator = '-----BEGIN CERTIFICATE-----'
return ["{0}{1}".format(delineator, x) for x in cert_pem.strip().split(delineator) if x != '']
|
3872e7a971d2c0262add850f680732839e933d5c
| 81,952
|
def _round(value: float, places=2) -> str:
"""Rounds a value to the given number of decimal places."""
fstring = "{:.%gg}" % places # pylint: disable=consider-using-f-string
return fstring.format(value)
|
7c57e4c59d6cd3b6a435aa3988ec685a1551b87c
| 81,953
|
import inspect
def find_people(person_module):
"""
Returns the functions prefixed with `get` from a module.
"""
functions = inspect.getmembers(person_module, predicate=inspect.isfunction)
people_functions = filter(lambda x: x[0].startswith('get'), functions)
persons = []
for _, function in people_functions:
person = function()
persons.append(person)
return persons
|
c3b957051fc75acc5bd208a36d2882164e8564cd
| 81,954
|
import csv
def parse_csv(path):
"""Read in the way that dict[algorithm][world_size] --> list"""
results = dict()
with open(path) as csvfile:
iterator = csv.reader(csvfile, delimiter=',')
for row in iterator:
print(row)
algorithm, world_size, object_size, mean, std = row[0].split('_')[-1], int(row[1]), int(row[2]), float(row[3]), float(row[4])
if algorithm not in results:
results[algorithm] = {}
if world_size not in results[algorithm]:
results[algorithm][world_size] = []
results[algorithm][world_size].append([object_size, mean, std])
return results
|
ae69b2ed43d150e20f52c6c5e729980c19a58e93
| 81,955
|
def get_any_index(lst, *values):
"""Returns the index of (the first of) a set of values in lst."""
for value in values:
try:
return lst.index(value)
except ValueError:
pass
|
1676d18ac4f9968a3ffae44ef4b786242cc2f509
| 81,956
|
def prompt_string(msg):
"""Prompts the user for a value.
@param msg: Message to display to user.
@return: User's input or None.
"""
return input("\n%s: " % msg).strip(" \n") or None
|
39c1d8cdadbdb760bdc7933f1520e613e8f817b6
| 81,958
|
from typing import List
def rgb_to_cielab(rgb: List[int]) -> List[int]:
"""
Convert from RGB to CIELab color space.
Arguments:
rgb: Iterable of length 3 with integer values between 0 and 255
Returns:
A list with 3 integer scaled values of CIELab.
References:
- https://github.com/QIICR/dcmqi/blob/0c101b702f12a86cc142cb000a074fbd341f8784/libsrc/Helper.cpp#L173
- https://github.com/QIICR/dcmqi/blob/0c101b702f12a86cc142cb000a074fbd341f8784/libsrc/Helper.cpp#L256
- https://github.com/QIICR/dcmqi/blob/0c101b702f12a86cc142cb000a074fbd341f8784/libsrc/Helper.cpp#L336
"""
assert len(rgb) == 3
# RGB -> CIEXYZ (Ref 1)
tmp = tuple(min(max(x / 255.0, 0.0), 1.0) for x in rgb)
tmp = tuple(((x + 0.055) / 1.055) ** 2.4 if x > 0.04045 else x / 12.92 for x in tmp)
tmp = tuple(x * 100 for x in tmp)
tmp = (
0.4124564 * tmp[0] + 0.3575761 * tmp[1] + 0.1804375 * tmp[2],
0.2126729 * tmp[0] + 0.7151522 * tmp[1] + 0.0721750 * tmp[2],
0.0193339 * tmp[0] + 0.1191920 * tmp[1] + 0.9503041 * tmp[2],
)
# CIEXYZ -> CIELab (Ref 2)
tmp = tuple(x / y for x, y in zip(tmp, (95.047, 100, 108.883)))
tmp = tuple(x ** (1 / 3) if x > 0.008856 else (7.787 * x) + (16 / 116) for x in tmp)
tmp = ((116 * tmp[1]) - 16, 500 * (tmp[0] - tmp[1]), 200 * (tmp[1] - tmp[2]))
# CIELab -> ScaledCIELab (Ref 3)
return [
int(tmp[0] * 65535 / 100 + 0.5),
int((tmp[1] + 128) * 65535 / 255 + 0.5),
int((tmp[2] + 128) * 65535 / 255 + 0.5),
]
|
2c2b1250613a93526944e31f2572c24ee8e50c88
| 81,970
|
def bytes_to_size_string(b: int) -> str:
"""Convert a number in bytes to a sensible unit."""
kb = 1024
mb = kb * 1024
gb = mb * 1024
tb = gb * 1024
if b > tb:
return "%0.2fTiB" % (b / float(tb))
if b > gb:
return "%0.2fGiB" % (b / float(gb))
if b > mb:
return "%0.2fMiB" % (b / float(mb))
if b > kb:
return "%0.2fKiB" % (b / float(kb))
return str(b)
|
9ad3a2adb5438206b5365483210d9a230d19f9eb
| 81,973
|
import colorsys
def hsv_to_rgb(hsv):
"""
Converts the given HSV tuple to RGB values
:param hsv: The input HSV tuple ((h, s, v) ranging from 0.0 to 1.0)
:return: The converted RGB tuple ((r, g, b) ranging from 0 to 255)
"""
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(hsv[0], hsv[1], hsv[2]))
|
10fd66d9b9da75802c79e6f69d6e63ddce17c472
| 81,977
|
def sylvester(number: int) -> int:
"""
:param number: nth number to calculate in the sequence
:return: the nth number in Sylvester's sequence
>>> sylvester(8)
113423713055421844361000443
>>> sylvester(-1)
Traceback (most recent call last):
...
ValueError: The input value of [n=-1] has to be > 0
>>> sylvester(8.0)
Traceback (most recent call last):
...
AssertionError: The input value of [n=8.0] is not an integer
"""
assert isinstance(number, int), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
raise ValueError(f"The input value of [n={number}] has to be > 0")
else:
num = sylvester(number - 1)
lower = num - 1
upper = num
return lower * upper + 1
|
c9d6a4af29fc3b4ef4de145673caab8ea643355c
| 81,983
|
import torch
def get_spacer_tensor(template: torch.Tensor, length: int) -> torch.Tensor:
"""Get a tensor of zeros."""
shape = template.shape[:-1] + (length,)
return torch.zeros(
shape,
dtype=torch.float32,
device=template.device,
)
|
e1ba4793d4667e547489894724ba15127cb25530
| 81,984
|
from typing import Any
def size(obj: Any) -> int:
"""Return the length of an array or string."""
try:
return len(obj)
except TypeError:
return 0
|
71f9c01e4b9bd614df5bf839ef37c30ec1ec0752
| 81,990
|
def slicify(slc, dim):
"""
Force a slice to have defined start, stop, and step from a known dim.
Start and stop will always be positive. Step may be negative.
There is an exception where a negative step overflows the stop needs to have
the default value set to -1. This is the only case of a negative start/stop
value.
Parameters
----------
slc : slice or int
The slice to modify, or int to convert to a slice
dim : tuple
Bound for slice
"""
if isinstance(slc, slice):
# default limits
start = 0 if slc.start is None else slc.start
stop = dim if slc.stop is None else slc.stop
step = 1 if slc.step is None else slc.step
# account for negative indices
if start < 0: start += dim
if stop < 0: stop += dim
# account for over-flowing the bounds
if step > 0:
if start < 0: start = 0
if stop > dim: stop = dim
else:
if stop < 0: stop = -1
if start > dim: start = dim-1
return slice(start, stop, step)
elif isinstance(slc, int):
if slc < 0:
slc += dim
return slice(slc, slc+1, 1)
else:
raise ValueError("Type for slice %s not recongized" % type(slc))
|
06c7a4e3f5a371e5bbacb4f6a41c51041567fa08
| 82,001
|
def pad_floats(n):
"""
Formats numbers to two decimal places
:param n: number to be formatted
:return: formatted number
"""
return float("{:.2f}".format(n))
|
377fe965b6d8891c76b47ffa1cd2da099f893c3b
| 82,003
|
def flatten(x):
""" flatten(): turns nested list or tuple into single list """
result = []
if isinstance(x, str):
result = x
else:
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, str):
result.extend(flatten(el))
else:
result.append(el)
return result
|
2afbad9dc45711ac1cba96a377dd5e212d262c52
| 82,006
|
import time
def validate_counter(counter):
"""
Validates a counter ensuring it's in a sliding window.
Window is +/- 12 hours (43200 seconds)
"""
currentTime = int(time.time())
return (currentTime-43200) <= counter <= (currentTime+43200)
|
7f7866c3f1f038bbe87ffdce4f05bb385f72d030
| 82,009
|
def is_list_type(value):
"""
判断value是否列表类型,兼容python2、python3
:param value: 输入值
:type value: any
:returns: 判断结果
:rtype: bool
"""
return isinstance(value, (list, set, tuple))
|
3d858858d1ec3042f9318c9ca524197ad980bcf9
| 82,011
|
def _target_ads_in_campaign_to_user_list(
client, customer_id, campaign_id, user_list_resource_name
):
"""Creates a campaign criterion that targets a user list with a campaign.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a str client customer ID used to create an campaign
criterion.
campaign_id: a str ID for a campaign used to create a campaign
criterion that targets members of a user list.
user_list_resource_name: a str resource name for a user list.
Returns:
a str resource name for a campaign criterion.
"""
campaign_criterion_operation = client.get_type("CampaignCriterionOperation")
campaign_criterion = campaign_criterion_operation.create
campaign_criterion.campaign = client.get_service(
"CampaignService"
).campaign_path(customer_id, campaign_id)
campaign_criterion.user_list.user_list = user_list_resource_name
campaign_criterion_service = client.get_service("CampaignCriterionService")
response = campaign_criterion_service.mutate_campaign_criteria(
customer_id=customer_id, operations=[campaign_criterion_operation]
)
resource_name = response.results[0].resource_name
print(
"Successfully created campaign criterion with resource name "
f"'{resource_name}' targeting user list with resource name "
f"'{user_list_resource_name}' with campaign with ID {campaign_id}"
)
return resource_name
# [END setup_remarketing_4]
|
28944ade1b3d2d3e94fc39bf1fffc1887d8be868
| 82,012
|
import re
def get_text_refs(url):
"""Return the parsed out text reference dict from an URL."""
text_refs = {'URL': url}
match = re.match(r'https://www.ncbi.nlm.nih.gov/pubmed/(\d+)', url)
if match:
text_refs['PMID'] = match.groups()[0]
match = re.match(r'https://www.ncbi.nlm.nih.gov/pmc/articles/(PMC\d+)/',
url)
if match:
text_refs['PMCID'] = match.groups()[0]
match = re.match(r'https://www.biorxiv.org/content/([^v]+)v', url)
if match:
text_refs['DOI'] = match.groups()[0]
return text_refs
|
f24cdec70775d10e707aa8ae693dfe5a977fee93
| 82,013
|
def remove_invariant_features(dataset):
"""Returns a new dataset with all invariant features removed.
"""
return dataset[:, dataset.samples.std(axis=0).nonzero()[0]]
|
42e931d1b64813beef3481d9f909b423b08a0d55
| 82,018
|
def ravel_multiple_indices(ixs, shape):
"""
"Flattens" multiple 2D input indices into indices on the flattened matrix, similar to np.ravel_multi_index.
Does the same as ravel_index but for multiple indices at once.
Parameters
----------
ixs: array of ints shape (n, 2)
The array of n indices that will be flattened.
shape: list or tuple of ints of length 2
The shape of the corresponding matrix.
Returns
-------
array of n ints between 0 and shape[0]*shape[1]-1
The indices on the flattened matrix corresponding to the 2D input indices.
"""
return ixs[:, 0] * shape[1] + ixs[:, 1]
|
06d1078abaa8203f19680544ea5cc5b811618c54
| 82,020
|
def export_url(url):
"""
Get the 'Special:Export' XML version url of an article
"""
page = url.split("/")[-1]
return ("http://en.wikinews.org/w/index.php?title=Special:Export"
"&action=submit&pages={}".format(page))
|
e1e22896c2641865354a8a8c4ba82dcd59118da3
| 82,025
|
def delta_time(ind):
"""Create a function to return the delta time from a product-specific block."""
def inner(seq):
return seq[ind] >> 5
return inner
|
288d2fd1e677640f5166e77b77c5b015b4c15b61
| 82,037
|
import re
def represents_float(text):
"""
This function return True if the given param (string or float) represents a float
:Example:
>>> represents_float("1.0")
True
>>> represents_float("1")
False
>>> represents_float("a")
False
>>> represents_float(".0")
False
>>> represents_float("0.")
False
>>> represents_float("0.000001")
True
>>> represents_float("00000.000001")
True
>>> represents_float("0000a0.000001")
False
"""
if isinstance(text, float):
return True
elif text is None:
return False
elif isinstance(text, str):
if len(text) < 3:
return False
text = text.strip()
return re.search("^[0-9]{1,}\.[0-9]{1,}$", text) is not None
else:
return False
|
ab3f5b39800e84e79421f2b8fbf9c57ad07cb1a3
| 82,042
|
def get_comments(events, comments=None):
"""
Pick comments and pull-request review comments out of a list of events.
Args:
events: a list of (event_type str, event_body dict, timestamp).
comments_prev: the previous output of this function.
Returns:
comments: a list of dict(author=..., comment=..., timestamp=...),
ordered with the earliest comment first.
"""
if not comments:
comments = {}
else:
comments = {c['id']: c for c in comments}
comments = {} # comment_id : comment
for event, body, _timestamp in events:
action = body.get('action')
if event in ('issue_comment', 'pull_request_review_comment'):
comment_id = body['comment']['id']
if action == 'deleted':
comments.pop(comment_id, None)
else:
c = body['comment']
comments[comment_id] = {
'author': c['user']['login'],
'comment': c['body'],
'timestamp': c['created_at'],
'id': c['id'],
}
return sorted(comments.values(), key=lambda c: c['timestamp'])
|
f3b412cb36463b523fc2d9c67554b856eeb1489e
| 82,043
|
def applyReplacements(symbol_list):
"""
Apply the rules associated with each symbol to the symbols in
`symbol_list`. Symbols with no defined rule are returned as-is.
The elements of `symbol_list` must be subclasses of Symbol.
Returns a tuple of (`replaced`, `new_symbol_list`) where `replaced` is
True if any symbols had triggered a replacement rule, and
`new_symbol_list` is the new list after applying all replacement rules.
If `replaced` is False, `new_symbol_list` should be identical to
`symbol_list`
:param symbol_list:
:type symbol_list: list[Symbol]
:return:
:rtype: (bool, list[Symbol])
"""
replaced = False
new_list = []
for symbol in symbol_list:
if hasattr(symbol, 'replace'):
replaced = True
new_symbols = symbol.replace()
new_list.extend(new_symbols)
else:
new_list.append(symbol)
return replaced, new_list
|
8482dd8a5afc00f156b090289a09a615d6385b00
| 82,044
|
def errorMessage(err, location = None):
"""
Generate a standard error message.
Parameters
----------
err : str
The error message.
location : str, optional
Where the error happens. E.g. CTL.funcs.funcs.errorMessage
Returns
-------
str
The generated error message.
"""
if (location is None):
return "Error: {}".format(err)
else:
return "Error in {}: {}".format(location, err)
|
fbc1c0cee3de8d165cb4f2512e5f830cedaa0c27
| 82,047
|
def _NormalizePath(path):
"""Returns the normalized path of the given one.
Normalization include:
* Convert '\\' to '/'
* Convert '\\\\' to '/'
* Resolve '../' and './'
Example:
'..\\a/../b/./c/test.cc' --> 'b/c/test.cc'
"""
path = path.replace('\\', '/')
path = path.replace('//', '/')
filtered_parts = []
for part in path.split('/'):
if part == '..':
if filtered_parts:
filtered_parts.pop()
elif part == '.':
continue
else:
filtered_parts.append(part)
return '/'.join(filtered_parts)
|
ed6cd73f1c59eeef4d55121a99bb29c2e569a6da
| 82,049
|
def arcsecs2radians (seconds:float) ->float:
"""convert arcseconds to radians.
"""
radians = seconds * 0.000004848
return radians
|
50c5884bf4bb2fb559faede58f7a023259ba5dde
| 82,050
|
def coerce_to_list(val):
"""
For parameters that can take either a single string or a list of strings,
this function will ensure that the result is a list containing the passed
values.
"""
if val:
if not isinstance(val, (list, tuple)):
val = [val]
else:
val = []
return val
|
a26e881eea0de2b7e0a029e6a56f2f9a9f9449af
| 82,051
|
def xyxy2xywh(bbox_xyxy):
"""
Args:
bbox_xyxy: [xim, yxim, xmax, ymax]
Ruturns:
bbox_xywh: [x, y, w, h]
"""
xim, yxim, xmax, ymax = bbox_xyxy
return [xim, yxim, xmax-xim, ymax-yxim]
|
7a533a64cede8a0a4a073e547099087d906f8ef0
| 82,061
|
def train_test_split(data, test_len):
"""
Train/ Test split
:param data: (np.array) Sequence to split
:param test_len: (int) length of test sequence
:return: np.array, np.array: train sequence, test sequence
"""
# print("[DataProcessor] Train/Test split")
if test_len == 0:
return data, None
return data[:-test_len], data[-test_len:]
|
9a21e22934a6b48974ff121542ff2061115fde78
| 82,063
|
def check_numeric_type(array):
"""
Check if an array contains only numeric values. Accepted formats: int64, int32, float64, float32
Args:
array: array-like, shape=(n_samples, n_features)
Returns:
boolean, True if the array is one of accepted types.
"""
is_numeric = array.dtype == 'float64' or \
array.dtype == 'int64' or \
array.dtype == 'float32' or \
array.dtype == 'int32'
if not is_numeric:
raise TypeError("Error: Array is of type %s but expected a 'float' or 'int' format" % array.dtype)
return is_numeric
|
803e5c4fe7349aa165f197ade735ed5cb37f1604
| 82,064
|
def exponentiate(base, exp, p):
"""
uses the square and multiply algorithm to
get (base^exp)%p
:param base: base of the exponentiation
:param exp: power that the base is being raised to
:param p: prime modulus
:returns: (base^exp) mod p
"""
e = "{0:b}".format(exp) # bitstring of the exponent
z = 1
for c in e:
z = z * z % p
if int(c) == 1:
z = z * base % p
return z
|
48b656a62b819e71c2135918c63e5b1445feebb9
| 82,067
|
def get_stack_name(stack_formatted):
"""
Get the stack name (eg. HDP) from formatted string that may contain stack version (eg. HDP-2.6.1.0-123)
"""
if stack_formatted is None:
return None
if '-' not in stack_formatted:
return stack_formatted
return stack_formatted.split('-')[0]
|
59e9fc45ceea1e032036b8098cf36dc4acf25d0f
| 82,070
|
import copy
def nonuniform_mutation(random, candidate, args):
"""Return the mutants produced by nonuniform mutation on the candidates.
The function performs nonuniform mutation as specified in
(Michalewicz, "Genetic Algorithms + Data Structures = Evolution
Programs," Springer, 1996). This function also makes use of the
bounder function as specified in the EC's ``evolve`` method.
.. note::
This function **requires** that *max_generations* be specified in
the *args* dictionary. Therefore, it is best to use this operator
in conjunction with the ``generation_termination`` terminator.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Required keyword arguments in args:
- *max_generations* -- the maximum number of generations for which
evolution should take place
Optional keyword arguments in args:
- *mutation_strength* -- the strength of the mutation, where higher
values correspond to greater variation (default 1)
"""
bounder = args['_ec'].bounder
num_gens = args['_ec'].num_generations
max_gens = args['max_generations']
strength = args.setdefault('mutation_strength', 1)
exponent = (1.0 - num_gens / float(max_gens)) ** strength
mutant = copy.copy(candidate)
for i, (c, lo, hi) in enumerate(zip(candidate, bounder.lower_bound, bounder.upper_bound)):
if random.random() <= 0.5:
new_value = c + (hi - c) * (1.0 - random.random() ** exponent)
else:
new_value = c - (c - lo) * (1.0 - random.random() ** exponent)
mutant[i] = new_value
return mutant
|
c002fc2faeaa9a2e1a2d270158a0535814d2bf15
| 82,071
|
def get_outbound_layers(layer):
"""Return outbound layers.
Parameters
----------
layer: Keras.layers
A Keras layer.
Returns
-------
: list[Keras.layers]
List of outbound layers.
"""
try:
# noinspection PyProtectedMember
outbound_nodes = layer._outbound_nodes
except AttributeError: # For Keras backward-compatibility.
outbound_nodes = layer.outbound_nodes
return [on.outbound_layer for on in outbound_nodes]
|
c38c4f49e6abd4ddf445e32bbeac0320a409bece
| 82,075
|
def calc_gb_size(num_rows, row_size):
"""Calculate the size of table in GB
given row size and number of rows in bytes"""
total_byte = row_size * num_rows
total_gb = ((total_byte / 1024) / 1024) / 1024
return total_gb
|
98768350c15ed87527e6a29dc95ed13c49bd6a25
| 82,076
|
def maybe_get(obj, i):
"""
:param obj: object
:param i: the index
:return: the i-th item if the `obj` instantiates the __getitem__ function
"""
return obj[i] if hasattr(obj, "__getitem__") else obj
|
701535c1401bf5fcf4f4c1ced9bacd6558f479e2
| 82,081
|
def get_index_name(app_id, namespace, name):
""" Gets the internal index name.
Args:
app_id: A str, the application identifier.
namespace: A str, the application namespace.
name: A str, the index name.
Returns:
A str, the internal name of the index.
"""
return '{}_{}_{}'.format(app_id, namespace, name)
|
b7f5dc80a8a0b649d7d0009d34ab9da4f432b54f
| 82,084
|
def pad_lines_after_first(prefix, s):
"""Apply a prefix to each line in s after the first."""
return ('\n' + prefix).join(s.splitlines())
|
2c990180ad4e6d276b1a18e934406f5ab89d73fd
| 82,085
|
import copy
def merge_dict(original_data, new_data):
"""
Merge two dictionaries.
Merge the content of a `new` dictionary into another, `original` dictionary.
A new dictionary with the merged content is created. Values are preserved
and a ValueError is raised if incompatible content is encountered.
Overlapping lists are extended and nested dictionaries are merged
recursively.
Parameters
----------
original_data : dict
data to merge in the already existing json file
new_data : dict
the already existing json file convert to dict
Raises
----------
ValueError
if the data type of the value is neither iterable or basic dtype
"""
# deep copying input dictionary to not overwrite existing values in-place
result = copy.deepcopy(original_data)
for key in new_data.keys():
if key not in original_data:
# new entry that does not exist -> just added it
result[key] = new_data[key]
else:
# deal with simple data types
if not isinstance(original_data[key], (list, dict)):
if new_data[key] == original_data[key]:
continue
else:
# contradicting values can not be merged
raise ValueError(f"Error different values for the same key "
f"{key}: {new_data[key]} "
f"{original_data[key]}")
# merge lists by concatenation of values
if type(original_data[key]) == list:
result[key].extend(new_data[key])
# merge dictionaries recursively
elif type(original_data[key]) == dict:
result[key] = merge_dict(result[key], new_data[key])
else:
raise ValueError(f"Can not merge unexpected data type: "
f"{type(original_data[key])}")
return result
|
71d8e19d9d5e7f640a741dd8a815e0f9ad324588
| 82,086
|
def quote(text):
"""Change " to \\"."""
try:
return text.replace('"', '\\"')
except TypeError:
return text
|
cd3018062985a08923122028552d9b1e9f68b1f5
| 82,090
|
def check_return_code(error_message, expected_code=0):
"""Create a callable to verify the return code of a response.
To be used with the :meth:`run` function. The callable will raise an
:class:`AssertionError` if the return code of the response is different from the
given one.
Args:
error_message (str): the message that will be displayed if the check fails.
expected_code (int, optional): a bash return code, to check against the one of
the given response.
Returns:
callable: a condition that will check its given response against the parameters
of the current function.
"""
def validate(response):
details = (
f" Expected return code: {expected_code}."
f" Observed return code: {response.returncode}."
)
assert response.returncode == expected_code, error_message + details
return validate
|
17302dad9f2d46ca4dbef16429595c5b7107f6ca
| 82,091
|
import base64
def encode(data: bytes) -> str:
"""Encode data to base64url (RFC 4648) text
:param data: Raw data
:returns: Base64-encoded data
:raises TypeError: if `data` is not a bytes instance
>>> encode(b'test')
'dGVzdA'
"""
if not isinstance(data, bytes):
raise TypeError("Can only encode() bytes.")
return base64.b64encode(data).decode("ascii").rstrip("=")
|
88d361a8ab4429119fdb03e57a2054d04491d150
| 82,094
|
import base64
import binascii
def _decode_telegram_base64(string):
"""
Decodes a url-safe base64-encoded string into its bytes
by first adding the stripped necessary padding characters.
This is the way Telegram shares binary data as strings,
such as Bot API-style file IDs or invite links.
Returns `None` if the input string was not valid.
"""
try:
return base64.urlsafe_b64decode(string + '=' * (len(string) % 4))
except (binascii.Error, ValueError, TypeError):
return None
|
faae09c4fbb3430681f83037ac4ac653ce54b87f
| 82,098
|
def to_product(product_tuple):
"""Parse a tuple into valid 'Product' object.
Args:
product_tuple: Tuple containing StockCode, Description, Quantity and UnitPrice.
Returns:
Product (dictionary).
"""
return {
'id': str(product_tuple[0]),
'description': str(product_tuple[1]),
'quantity': int(product_tuple[2]),
'unitPrice': float(product_tuple[3])
}
|
52b01932a58791bfea44ae47ca1b23acaa437744
| 82,100
|
from typing import List
from typing import Union
import hashlib
def integer_seed(objs: List[Union[str, int]]) -> int:
"""Reproducibly and readably generate a seed value, from strings and integers.
"""
assert isinstance(objs, list), isinstance(objs, tuple)
h = hashlib.sha256()
for obj in objs:
if isinstance(obj, str):
h.update(obj.encode('utf-8'))
else:
assert isinstance(obj, int)
h.update(obj.to_bytes(16, 'little', signed=False))
return int(h.hexdigest(), 16) % 2**64
|
2397c74e7455f66cad07dcc9745c69639cf7b750
| 82,102
|
import re
def _remove_redundant_parts(word: str) -> str:
"""
This function removes redundant parts in a given Neo-Assyrian word in ORACC.
:param word: A given word
:return: The given word after removing redundant parts
"""
word = re.sub(r"lu₂[a-z]*", "lu₂", word)
return re.sub(r"@?v|\\[a-z]*", "", word)
|
1650c7c2d339a4945910480c519ea1176af1da14
| 82,107
|
import hashlib
def partition(example, num_buckets):
""" Partition examples into multiple buckets """
def determine_hash(input_bytes):
m = hashlib.blake2b(input_bytes, digest_size=4)
address = 0
for i, b in enumerate(m.digest()):
address += b * (2 ** (i * 8))
return address
address = determine_hash(example)
bucket = address % num_buckets
return bucket
|
b005c81a07b9975356ed04753db157211f3214b4
| 82,108
|
import math
def acos(theNumber):
"""Returns math.acos(theNumber)."""
return math.acos(theNumber)
|
254ebf05c850f220a35ce21a7a03ab61edc2804b
| 82,112
|
def gene_synonyms(ncrna):
"""
Find all gene synonyms, if they exist.
"""
gene = ncrna.get("gene", {})
synonyms = gene.get("synonyms", [])
if "symbol" in gene:
synonyms.append(gene["symbol"])
return synonyms
|
95c1759f86831e23f097c1a6d5525380e8dab51b
| 82,115
|
def ms2min_sec(ms: int):
"""Convert milliseconds to 'minutes:seconds'."""
min_sec = f'{int(ms / 60000):02d}:{int(ms / 1000) % 60:02d}'
return min_sec
|
0d08ba003223e0f5e36e01c14f40a96328154f99
| 82,117
|
from datetime import datetime
def date2jd(date):
"""
Convert datetime object to JD"
Args:
date (datetime.datetime): date to convert
Returns:
float: Julian date
"""
jd_td = date - datetime(2000, 1, 1, 12, 0, 0)
jd = 2451545.0 + jd_td.days + jd_td.seconds/86400.0
return jd
|
a5033fb2b1a4d9295127329eec541c389837cdcc
| 82,118
|
def jaccard(r_tokens: list, s_tokens: list) -> float:
"""Computes jaccard similarity.
JAC(r, s) = |r ∩ s| / |r ∪ s|
Parameters
----------
r_tokens : list
First token list.
s_tokens : list
Second token list.
Returns
-------
Jaccard similarity of r and s.
"""
r_set = set(r_tokens)
s_set = set(s_tokens)
return len(r_set.intersection(s_set)) / len(r_set.union(s_set))
|
427bd308e153cf6781ada6ba45f4bdd0b8f73220
| 82,120
|
def rank_zero_method(f):
"""Decorator that executes f only if the passed experiment has rank 0. This
is used with distributed data parallel to train on multiple nodes."""
def inner(self, experiment):
if experiment.rank != 0:
return
return f(self, experiment)
return inner
|
e4ee6d55898de8ae5667bd4e483e3f084bdcbacd
| 82,121
|
def extract_files_to_lint(file_diffs):
"""Grab only files out of a list of FileDiffs that have a ACMRT status."""
if not file_diffs:
return []
lint_files = [f.name for f in file_diffs if f.status in b'ACMRT']
return lint_files
|
3594bd942f132f7c815df3423eaef6c0de882588
| 82,123
|
from typing import Mapping
from typing import Any
from typing import Iterable
from typing import Dict
import itertools
def _combinations_of_selections(selections: Mapping[str, Any]) -> Iterable[Dict[str, Any]]:
"""Find all permutations of combinations of selections.
This is useful for passing the selections as kwargs to a function (perhaps for formatting).
As a concrete example,
```python
>>> selections = {"a": [1], "b": [2, 3]}
>>> list(_combinations_of_selections(selections))
[{'a': 1, 'b': 2}, {'a': 1, 'b': 3}]
```
Note:
The arguments are validated such that if there is only a single value for a given selection,
it is converted to a list of length 1. Of course, it will be returned as a single value in
the arguments.
Args:
selections: Selections from the dataset.
Returns:
Iterable whose elements contain dicts containing the arguments for each combination.
"""
# Validation
sels = {k: [v] if not isinstance(v, list) else v for k, v in selections.items()}
# Return all combinations in a dict, such that they can be used as kwargs
# See: https://stackoverflow.com/a/15211805
return (dict(zip(sels, v)) for v in itertools.product(*sels.values()))
|
8d71d519e7183aaf66d286fda6e4e79dc842bb96
| 82,125
|
def cropFrequencies(frequencies, Z, freqmin=0, freqmax=None):
"""
Trim out all data points below the X-axis
Parameters
----------
frequencies : np.ndarray
Array of frequencies
Z : np.ndarray of complex numbers
Array of complex impedances
freqmin : float
Minimum frequency, omit for no lower frequency limit
freqmax : float
Max frequency, omit for no upper frequency limit
Returns
-------
frequencies_final : np.ndarray
Array of frequencies after filtering
Z_final : np.ndarray of complex numbers
Array of complex impedances after filtering
"""
frequencies_min = frequencies[frequencies >= freqmin]
Z_min = Z[frequencies >= freqmin]
# If no maximum is specified, return only samples filtered by minimum
if freqmax:
frequencies_final = frequencies_min[frequencies_min <= freqmax]
Z_final = Z_min[frequencies_min <= freqmax]
else:
frequencies_final = frequencies_min
Z_final = Z_min
return frequencies_final, Z_final
|
d289c55bca938f377c31feacb102c47ecb615d5e
| 82,126
|
from typing import Union
def is_inet_address(addr: Union[tuple[str, int], str]) -> bool:
"""Check whether addr is of type tuple[str, int]."""
return (
isinstance(addr, tuple)
and len(addr) == 2
and isinstance(addr[0], str)
and isinstance(addr[1], int)
)
|
825c0e0f71d8e0165d93b0915074e8f4ef68c4ce
| 82,129
|
from typing import Counter
def get_word_freqs(captions):
""" Calculates word frequencies
:param captions: list of captions
"""
word_freqs = Counter()
for caption in captions:
word_freqs.update(list(filter(None, caption.split(' '))))
return word_freqs
|
43fe7bd3ac85955da9efa3f88009bb4397b24a4e
| 82,132
|
def _is_windows(repository_ctx):
"""Returns true if the host OS is Windows."""
return repository_ctx.os.name.startswith("windows")
|
4101d53a6cfaee74688d10b3902f5665d8362aa5
| 82,135
|
def is_engine_in_list(engines_list, engine_class):
"""Checks if engine in the list
:param list engines_list: list of engines
:param engine_class: engine class
:returns: True if engine in the list
False if engine not in the list
"""
engines = filter(
lambda engine: isinstance(engine, engine_class),
engines_list)
if engines:
return True
return False
|
e3299c5dc84f881b10b2a3767ac869724b249da1
| 82,143
|
import hashlib
def CalculateHash(file_path):
"""Calculates and returns the hash of the file at file_path."""
sha1 = hashlib.sha1()
with open(file_path, 'rb') as f:
while True:
# Read in 1mb chunks, so it doesn't all have to be loaded into memory.
chunk = f.read(1024 * 1024)
if not chunk:
break
sha1.update(chunk)
return sha1.hexdigest()
|
5c3b9a13aab3e607d9b4f34277c57fbec2b86ed2
| 82,147
|
import torch
def _normalize_images(images):
"""
Given a tensor of images, uses the torchvision
normalization method to convert floating point data to integers. See reference
at: https://pytorch.org/docs/stable/_modules/torchvision/utils.html#save_image
The function uses the normalization from make_grid and save_image functions.
Args:
images (Tensor): Batch of images of shape (N, 3, H, W).
Returns:
ndarray: Batch of normalized images of shape (N, H, W, 3).
"""
# Shift the image from [-1, 1] range to [0, 1] range.
min_val = float(images.min())
max_val = float(images.max())
images.clamp_(min=min_val, max=max_val)
images.add_(-min_val).div_(max_val - min_val + 1e-5)
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
images = images.mul_(255).add_(0.5).clamp_(0, 255).permute(0, 2, 3, 1).to(
'cpu', torch.uint8).numpy()
return images
|
bf3737fc9ac64f3bf8454f2626806f0bffb57361
| 82,151
|
def getGrayDiff(img,currentPoint,tmpPoint):
"""This function does gray difference between two points.
Parameters
----------
img : numpy array
Array containing the image data.
currentPoint : numpy array
Array containing the position of one seed point.
tmpPoint : numpy array
Array containing the position of one of the 8 neighboring points to the seed point.
"""
return abs(int(img[currentPoint.x,currentPoint.y]) - int(img[tmpPoint.x,tmpPoint.y]))
|
9191501fe16775067521092366b3a39f75ec0254
| 82,153
|
def gen_explicit_map_nn_maxpool2d(params_pt, args_pt):
"""
Generate explicit_map for nn.MaxPool2d.
Args:
params_pt (dict): Params for APIPt.
args_pt (dict): Args for APIPt.
Returns:
dict, map between frames.
"""
if 'padding' in args_pt:
padding = args_pt['padding']
else:
padding = params_pt['padding']
if padding.strip() in ("0", "(0,0)", "(0, 0)"):
pad_mode = "'valid'"
else:
pad_mode = "'same'"
if 'stride' in args_pt:
stride = args_pt['stride']
else:
stride = args_pt['kernel_size']
return {"pad_mode": pad_mode,
"stride": stride}
|
23e9ccc1f415aa73de8a123a15466537eb002d29
| 82,155
|
def wrap(text, line_length):
"""Wrap a string to a specified line length.
Args:
text: The string to wrap.
line_length: The line length in characters.
Returns:
A wrapped string.
Raises:
ValueError: If line_length is not positive.
"""
# DON'T DO THIS:
# assert line_length > 0, "line_length must be positive"
# INSTEAD, DOCUMENT EXCEPTIONS
if line_length < 1:
raise ValueError("line_length {} is not positive".format(line_length))
words = text.split()
if max(map(len, words)) > line_length:
raise ValueError("line_length must be at least as long as the longest word")
lines_of_words = []
current_line_length = line_length
for word in words:
if current_line_length + len(word) > line_length:
lines_of_words.append([]) # new line
current_line_length = 0
lines_of_words[-1].append(word)
current_line_length += len(word) + len(' ')
lines = [' '.join(line_of_words) for line_of_words in lines_of_words]
result = '\n'.join(lines)
assert all(len(line) <= line_length for line in result.splitlines())
return result
|
170e9fff91d4ee79e342efee1ee5c8dabbab6070
| 82,156
|
def _imag_1d_func(x, func):
"""Return imag part of a 1d function."""
return func(x).imag
|
bc5d4eb810f56ca0e9131980f85c34794d91076a
| 82,157
|
def Dist(p1,p2):
"""
Euclidean distance between 2 points
"""
x1, y1 = p1
x2, y2 = p2
return (((x1-x2)*(x1-x2)) + ((y1-y2)*(y1-y2)))**0.5
|
974f397515cf1ce37e925cb60a435a557b94bfaf
| 82,159
|
def cross(a, b):
"""Cross product between a 3-vector or a 2-vector"""
assert len(a) == len(b), 'Vector dimensions should be equal'
if len(a) == 3:
return (
a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0])
elif len(a) == 2:
return a[0] * b[1] - a[1] * b[0]
else:
raise ValueError('Vectors must be 2D or 3D')
|
c3ae97b472560f7704310fff8b163b0572e68859
| 82,165
|
import itertools
def nth(iterable, n):
"""Returns the nth item from iterable."""
try:
return iterable[n]
except TypeError:
try:
return next(itertools.islice(iterable, n, None))
except StopIteration:
raise IndexError('index out of range')
|
6ca5f1aa0f78607a9f0d31b43fe9ec5cd8a30623
| 82,167
|
def rgb_xyz(rgb):
"""
Convert tuple from the sRGB color space to the CIE XYZ color space.
The XYZ output is determined using D65 illuminate with a 2° observer angle.
https://en.wikipedia.org/wiki/Illuminant_D65
sRGB (standard Red Green Blue): https://en.wikipedia.org/wiki/SRGB
CIE XYZ: https://en.wikipedia.org/wiki/CIE_1931_color_space
"""
r = rgb[0] / 255.0
g = rgb[1] / 255.0
b = rgb[2] / 255.0
x = r * 0.4124564 + g * 0.3575761 + b * 0.1804375
y = r * 0.2126729 + g * 0.7151522 + b * 0.0721750
z = r * 0.0193339 + g * 0.1191920 + b * 0.9503041
x = x * 100.0
y = y * 100.0
z = z * 100.0
return x, y, z
|
7f7146a7a91c1574c96f13539e51aaf373919451
| 82,168
|
def identity_transform(memory, image_key):
"""Simple function to return the image at "image_key" in the memory.
Args:
memory (dictionnary): memory dict.
image_key (string): image key.
Returns:
np.array: the image.
"""
return memory[image_key]
|
149203758798e5cee189179326e12faaf2eeaf20
| 82,170
|
def strip_byte_order_mark(text):
"""Return text with byte order mark (BOM) removed."""
try:
return text.encode('utf-8').decode('utf-8-sig')
except UnicodeError:
return text
|
9f7734cd9b07312ab35fff4473bf8acdb6991718
| 82,179
|
def get_blockname_from_block_id(lookup_dict: dict, block_id: str) -> str:
"""
Pycifrw makes datablock names all lower case. When I make up a pd+block_id from a dataname, sometimes the user has a
case-dependent reference somewhere in their CIF, and so I can't find the datablock. This looks in my lookup dictionary
two ways to see if it can be found.
:param lookup_dict: the dictionary mapping block_id to datablock
:param block_id: the block id I want to match
:return: the matching datablock name
"""
try:
return lookup_dict[block_id]
except KeyError:
return lookup_dict[block_id.lower()]
|
f7489588181c9a51f8ece8649fda26a1e6b8afa2
| 82,180
|
def sub_print(stream_id, data, state, log):
"""!@brief Default stream data callback. Prints data.
@param stream_id data stream id
@param data data to be printed
@param log logging object
"""
log.info("[{}]: {}".format(stream_id, data))
return state
|
61865e31f04a1f52561b72c1dae78f1d2bf8437f
| 82,181
|
def flatten(categories):
"""Flatten incoming categories list
>>> flatten({'name':'test'})
[{'name': 'test', 'parent': None}]
>>> flatten({'name': 'Category 8', 'children': [{'name': 'Category 22'}, {'name': 'Category 23'}]})
[{'name': 'Category 8', 'parent': None}, {'name': 'Category 22', 'parent': 'Category 8'}, {'name': 'Category 23', 'parent': 'Category 8'}]
>>> flatten({'name': 'c1', 'children': [{'name': 'c2', 'children': [{'name': 'c3'}]}]})
[{'name': 'c1', 'parent': None}, {'name': 'c2', 'parent': 'c1'}, {'name': 'c3', 'parent': 'c2'}]
>>> flatten({})
Traceback (most recent call last):
...
ValueError: name field is required.
"""
result = []
stack = [categories]
while stack:
category = stack.pop()
name = category.get('name')
if not name:
raise ValueError("name field is required.")
for child in reversed(category.get('children', [])):
child['parent'] = name
stack.append(child)
result.append({'name': name, 'parent': category.get('parent', None)})
return result
|
018e068af6203736d7f70623dd2d24be12e2ea06
| 82,189
|
def lca(node1, node2):
""" Returns least common ancestor of {node1, node2}. """
nodes = sorted(
[(node1.uid, node1), (node2.uid, node2)],
key=lambda x: -x[0]
)
while nodes[0][0] != nodes[1][0]:
lower_node = nodes[1][1]
par = lower_node.parent
assert par is not None
nodes[1] = (par.uid, par)
nodes = sorted(nodes, key=lambda x: -x[0])
return nodes[0][1]
|
f9b210be7366ebbc700b0efbf3eee892f30dcc8e
| 82,192
|
import bisect
def find_ge(a, x):
"""Find leftmost item greater than or equal to x"""
i = bisect.bisect_left(a, x)
if i != len(a):
return i
raise ValueError
|
36c87a92d3beaa6ef54935be3ea830cac4a530c8
| 82,194
|
def sort_python_measurement_files(folder):
"""Sort a folder of measurement data by order of measurements.
This doesn't strictly preserve measurement order but rather groups files by
measurement type in the experiment order.
Parameters
----------
folder : pathlib.Path
Folder of data to sort.
Returns
-------
sorted_files : list
Sorted folder of data.
"""
# get the set of unique relative file names excluding extensions
unique_device_file_names = {f.parts[-1].split(".")[0] for f in folder.iterdir()}
# loop over this set of unique devices and sort by measurement order
search_exts = [".vt.tsv", ".div*.tsv", ".liv*.tsv", ".mppt.tsv", ".it.tsv"]
sorted_list = []
for device_file_name in unique_device_file_names:
for ext in search_exts:
sorted_list.extend(sorted(list(folder.glob(f"{device_file_name}{ext}"))))
return sorted_list
|
3c6ecabfd198d033a784c16b4b751e325d77fc35
| 82,196
|
def identify_technique(target, obstype, slit, grating, wavmode, roi):
"""Identify whether is Imaging or Spectroscopic data
Args:
target (str): Target name as in the keyword `OBJECT` this is useful in
Automated aquisition mode, such as AEON.
obstype (str): Observation type as in `OBSTYPE`
slit (str): Value of `SLIT` keyword.
grating (str): Value of `GRATING` keyword.
wavmode (str): Value of `WAVMODE` keyword.
roi (str): Value of `ROI` keyword.
Returns:
Observing technique as a string. Either `Imaging` or `Spectroscopy`.
"""
if 'Spectroscopic' in roi or \
obstype in ['ARC', 'SPECTRUM', 'COMP'] or \
slit not in ['NO_MASK', '<NO MASK>'] or \
grating not in ['NO_GRATING', '<NO GRATING>'] or \
'_SP_' in target:
technique = 'Spectroscopy'
elif 'Imaging' in roi or \
obstype in ['EXPOSE'] or\
wavmode == 'IMAGING' or '_IM_' in target:
technique = 'Imaging'
else:
technique = 'Unknown'
return technique
|
07e825692cc1bee9580383651ac76ef6bb5c3f92
| 82,198
|
def c200_to_v200(c200=1., rs=1.0, H0=73):
"""get C200
H0 in km/s/Mpc
rs in Mpc
c200 is dimensionless
Return V in km/s
"""
return 10. * c200 * rs * H0
|
87b7d5f78f1b14cb3f83f34a1c5d18ffd912308a
| 82,202
|
def gen_podcast_episode_id(podcast_episode_obj):
"""
Generates the Elasticsearch document id for a Podcast
Args:
podcast_episode_obj (PodcastEpisode): The PodcastEpisode object
Returns:
str: The Elasticsearch document id for this object
"""
return "podcast_ep_{}".format(podcast_episode_obj.id)
|
cd6a76e61bd761d93545af88a248dd097e6689ca
| 82,205
|
import pathlib
def get_full_img_path(img_root_path, csv_path):
"""Merge csv root path and image name."""
root_dir = pathlib.Path(csv_path).parent
img_path = root_dir / pathlib.Path(img_root_path)
return str(img_path)
|
582c86466cfd886099b34736c40d440195434396
| 82,206
|
def find_identifier(qualifiers):
"""Finds an identifier from a dictionary of feature qualifiers.
This function selects for the following fields in decreasing order:
protein_id, locus_tag, ID and Gene. This should cover most cases where CDS
features do not have protein ID's.
Args:
qualifiers (dict): Feature qualifiers parsed with genome2json.
Returns:
Identifier, if found, otherwise None.
"""
for field in ("protein_id", "locus_tag", "ID", "Gene"):
try:
return qualifiers[field]
except KeyError:
pass
return None
|
d2f7cdaaa1b7907f7846100704d05d9558d5c123
| 82,210
|
def find_user(user_name, source):
"""
Search for a user by name on the given object
"""
name = user_name.lower()
for user in source.users:
if user.nickname.lower() == name:
return user
return None
|
f08dee7d5ea3a0667ce4aafefb3ab99cf73fb459
| 82,219
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.