content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
from typing import List
from typing import Tuple
def edge_path_to_node_path(path: List[Tuple]) -> List[str]:
"""
Convert a path of edges to a simple list of nodes (without direction information)
:param path: the path of edges which will be convert
:return: a path of nodes
"""
if len(path) == 0:
return []
node_path = [path[0][0]] # first node is the source of the path
# the remaining nodes are the destination
node_path += [e[1] for e in node_path[1:]]
return node_path
|
079a7352c7084329d19616070bcc51f82f029a5b
| 87,218
|
def hex_to_rgb(value='FFFFFF'):
"""Converts hex color to RGB color.
Args:
value (str, optional): Hex color code as a string. Defaults to 'FFFFFF'.
Returns:
tuple: RGB color as a tuple.
"""
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i+lv//3], 16) for i in range(0, lv, lv//3))
|
15fc7fa15a567675a32399490002b998b9044b9a
| 87,227
|
def filter_invalid_coords(
df, latitude_col, longitude_col, inclusive=False, inplace=False
):
"""
Filters latitude and longitude of a DataFrame to lie within the latitude range of [-90, 90] or (-90, 90) and longitude range of [-180, 180] or (-180, 180)
Parameters
----------
df : pd.DataFrame
The DataFrame to filter
latitude_col : str
The name of the column that contains latitude values
longitude_col : str
The name of the column that contains longitude values
inclusive : bool, default=False
True if you would like the bounds of the latitude and longitude to be inclusive e.g. [-90, 90]. Do note that these bounds may not work with certain GIS software and projections.
inplace : bool, default=False
Whether to return a new DataFrame. If True then no DataFrame copy is not returned and the operation is performed in place.
Returns
-------
pd.DataFrame or None
A DataFrame with invalid latitude and longitude entries removed. If `inplace=True` it returns None.
"""
if not inplace:
df = df.copy()
if inclusive:
mask = (
(df[latitude_col] >= -90)
& (df[latitude_col] <= 90)
& (df[longitude_col] <= 180)
& (df[longitude_col] >= -180)
)
else:
mask = (
(df[latitude_col] > -90)
& (df[latitude_col] < 90)
& (df[longitude_col] < 180)
& (df[longitude_col] > -180)
)
if not inplace:
return df[mask]
else:
df.mask(~mask, inplace=True)
df.dropna(inplace=True)
|
23e3c7b84e3f8e724f67793aed32ed5b7e4f0037
| 87,232
|
def get_diamond_c_from_original_coords(x,y, a,b, width, height,padding =22, radius=22):
"""
formula is
ax + by + c = 0
x = (xorig + padding - wc) / norm
y = (yorig + padding - hc) / norm
where x, y is in diamond space
"""
wc = (width + padding * 2 - 1) / 2
hc = (height + padding * 2 - 1) / 2
norm = max(wc, hc) - padding
c = -(a * (x+padding -wc)) / norm - (b * (y +padding - hc)) / norm
return c
|
e6144a4969642f16ee92953edfb74ed902854f25
| 87,234
|
def get_backend_op_pair(test):
"""Returns the target backend and operation pair of the test."""
test_suite_backends = {
'check_vmla_vmla': 'vmla',
'check_dylib-llvm-aot_dylib': 'dylib-llvm-aot',
'check_vulkan-spirv_vulkan': 'vulkan-spirv'
}
for (test_suite, backend) in test_suite_backends.items():
if test_suite in test:
# Format: ...TEST_SUITE_OP.mlir
start_idx = test.index(test_suite) + len(test_suite) + 1
return backend, test[start_idx:-len('.mlir')]
raise LookupError(f'Can not find a backend to match {test}')
|
b872f434f89c0d9d971a964405da8b152a1e173a
| 87,238
|
def _weight_function(G, weight):
"""Returns a function that returns the weight of an edge.
The returned function is specifically suitable for input to
functions :func:`_dijkstra` and :func:`_bellman_ford_relaxation`.
Parameters
----------
G : NetworkX graph.
weight : string or function
If it is callable, `weight` itself is returned. If it is a string,
it is assumed to be the name of the edge attribute that represents
the weight of an edge. In that case, a function is returned that
gets the edge weight according to the specified edge attribute.
Returns
-------
function
This function returns a callable that accepts exactly three inputs:
a node, an node adjacent to the first one, and the edge attribute
dictionary for the eedge joining those nodes. That function returns
a number representing the weight of an edge.
If `G` is a multigraph, and `weight` is not callable, the
minimum edge weight over all parallel edges is returned. If any edge
does not have an attribute with key `weight`, it is assumed to
have weight one.
"""
if callable(weight):
return weight
# If the weight keyword argument is not callable, we assume it is a
# string representing the edge attribute containing the weight of
# the edge.
if G.is_multigraph():
return lambda u, v, d: min(attr.get(weight, 1) for attr in d.values())
return lambda u, v, data: data.get(weight, 1)
|
77b3b7884ba855241abcdd4a2353ac8c0c1b77b4
| 87,240
|
def convert_metadata_pairs_to_array(data):
"""
Given a dictionary of metadata pairs, convert it to key-value pairs
in the format the Nylas API expects: "events?metadata_pair=<key>:<value>"
"""
if not data:
return data
metadata_pair = []
for key, value in data.items():
metadata_pair.append(key + ":" + value)
return metadata_pair
|
2375005a57321d73b87a05e0fe95e750aace12c7
| 87,247
|
def create_log_file_name(randomString):
""" Function that creates a log file with a random name """
log = "augustify_log_" + randomString
return(log)
|
f8c916ef0c112144aafa4df893bd44a8cc1d1da7
| 87,252
|
def where(iterable):
"""Get the positions where conditions are not False.
Parameters
----------
iterable : collections.Iterable[any]
The conditions to be checked.
Returns
-------
list[int]
Positions where the conditions are not False.
"""
return [i for i, c in enumerate(iterable) if c]
|
58a1369743aa2782519e3b3ccbdc85e7abe95772
| 87,254
|
def cosalpha(a, b, c):
"""
Calculate the cosign of an angle in a triangle a, b, c
:param a: length of the side opposing the angle
:param b: length of the side adjacent to the angle
:param c: length of the side adjacent to the angle
:return: cosign
"""
return (b ** 2 + c ** 2 - a ** 2) / (2 * b * c)
|
31b1922fa8ca20e2451d0558849f717ab288b984
| 87,260
|
def get_movies_in_a_year(movies, year):
"""
Query helper to get all the MOVIES that released that year.
:param movies: Dictionary (name of the movie --> Movie Node for the movie)
:param year: The year to look for MOVIES in.
:return: List of MOVIES that released in the given year.
"""
movies_in_year = [movie_name for movie_name, movie_node in movies.items() if movie_node.year_released == year]
return movies_in_year
|
148c3eb9a21931f6ce02f4351c4fdf10d914369e
| 87,262
|
def validate_lustreconfiguration_deploymenttype(lustreconfiguration_deploymenttype):
"""
Validate DeploymentType for LustreConfiguration
Property: LustreConfiguration.DeploymentType
"""
VALID_LUSTRECONFIGURATION_DEPLOYMENTTYPE = (
"PERSISTENT_1",
"SCRATCH_1",
"SCRATCH_2",
)
if (
lustreconfiguration_deploymenttype
not in VALID_LUSTRECONFIGURATION_DEPLOYMENTTYPE
):
raise ValueError(
"LustreConfiguration DeploymentType must be one of: %s"
% ", ".join(VALID_LUSTRECONFIGURATION_DEPLOYMENTTYPE)
)
return lustreconfiguration_deploymenttype
|
831413e8731ea4e5c9d199a32d970542e14abc49
| 87,263
|
def mean_estimator(data_variance, n, ddof=1):
"""
Get the variance of the mean from a data variance term (e.g. an eigenvalue)
and return an estimator of the precision of the mean (e.g. the variance of
the mean itself.)
Note: this is not used in the actual calculation of PCA planar
fitting errors; it is present for testing purposes.
"""
return data_variance/(n-ddof)
|
9763db959076fbe1b6582b09be32abc1b254f08b
| 87,269
|
import zipfile
def extract_file_from_zip(bytes_io, expected_file):
"""Extracts a file from a bytes_io zip. Returns bytes"""
zipf = zipfile.ZipFile(bytes_io)
return zipf.read(expected_file)
|
86ddf274dd96970083f047d0cdc9b29f566020a1
| 87,273
|
def _is_interactive_opt(bk_opt):
"""
Heuristics to detect if a bokeh option is about interactivity, like
'selection_alpha'.
>>> is_interactive_opt('height')
False
>>> is_interactive_opt('annular_muted_alpha')
True
"""
interactive_flags = [
'hover',
'muted',
'nonselection',
'selection',
]
return any(part in interactive_flags for part in bk_opt.split('_'))
|
2602ec81adf631f584c436a6f07c79978cc379d3
| 87,280
|
import base64
def b2s(bs: bytes) -> str:
"""Bytes to b64 string."""
return base64.b64encode(bs).decode('utf-8')
|
98d73442c390e8aacbaba8229009730e49d0a88f
| 87,291
|
def mock_event(player: dict) -> dict:
"""
Fixture to create an AWS Lambda event dict
:param player: Input character; see above
:return: Mock event dict
"""
return {
"body": {
"Player": player,
"playerId": "player_hash",
"action": "attack",
"enhanced": False,
}
}
|
152e72e4d2e43512da4d929f8e90f085c1476475
| 87,292
|
def get_xy_column_type_precision(column_elt):
"""
Get the Xylinq type precision of a column.
@param IN column_elt Column XML element
@return The type precision as a (potentially empty) list
"""
precision_list = []
precision_level = 1
precision = column_elt.get("type_precision_%d" % precision_level)
while precision is not None:
precision_list.append(precision)
precision_level += 1
precision = column_elt.get("type_precision_%d" % precision_level)
return precision_list
|
b5d2e81264b01b77568a354c536b7510d4a7c570
| 87,302
|
import random
def weighted_choice(weights):
"""
Choose an index based on the index's weight
:param weights: a list of weights
:type weights: list(int)
:return: a number between 0 and len(weights) - 1
:rtype: int
"""
totals = []
running_total = 0
for w in weights:
running_total += w
totals.append(running_total)
rnd = random.random() * running_total
for i, total in enumerate(totals):
if rnd < total:
return i
|
c332751d57b1e2f62d77fcc7aa0bcec2798f8c67
| 87,306
|
def find_exml(val, attrib=False):
"""Test that the XML value exists, return value, else return None"""
if val is not None:
if attrib: # it's an XML attribute
return val
else: # it's an XML value
return val.text
else:
return None
|
31e29937d57134b420a7097c7c576a9983fcdd42
| 87,307
|
def hex_string_to_int(value: str) -> int:
"""returns an int from a hex string"""
return int(value, 16)
|
4eeedbce1fe8bc7533aee1233c7a97c3583bcb6a
| 87,308
|
def _use_time_window_post_cue(x, fs=250, t1_factor=1.5, t2_factor=6):
"""
Prepares the input data to only use the post-cue range.
Parameters:
- x: np.ndarray, size = [s, C, T], where T should be 1750
- fs: integer, sampling rate
- t1_factor: float, window will start at t1_factor * fs
- t2_factor: float, window will end at t2_factor * fs
Returns np.ndarray, size = [s, C, T'], where T' should be 1125 with default values
"""
assert t1_factor < t2_factor
t1 = int(t1_factor * fs)
t2 = int(t2_factor * fs)
return x[:, :, t1:t2]
|
1f726b6ca9bbe33d5f720c5aadf248bd7df19f0b
| 87,310
|
import torch
def sigmoid_t(x, b=0, t=1):
"""
The sigmoid function with T for soft quantization function.
Args:
x: input
b: the bias
t: the temperature
Returns:
y = sigmoid(t(x-b))
"""
temp = -1 * t * (x - b)
temp = torch.clamp(temp, min=-10.0, max=10.0)
return 1.0 / (1.0 + torch.exp(temp))
|
019cf4747daf25070550066eba916149779555a9
| 87,311
|
def is_say_ingredients(text: str) -> bool:
"""
A utility method to determine if the user said the intent 'say_ingredients'.
"""
exact_match_phrases = [
"ingredient", "ingredients"
]
sample_phrases = [
'say ingredient', 'tell me the ingredient', 'what are the ingredient', 'tell the ingredient',
'say the ingredient', 'say ingredient', 'tell me ingredient', 'tell ingredient'
]
return any(text == em for em in exact_match_phrases) or any(phrase in text for phrase in sample_phrases)
|
3de66c29ae2c7a35a5b3f71242bd958f1084e0de
| 87,314
|
import re
def camelcase_split(s):
"""Split camel case into list of strings"""
return re.findall(r'[A-Z](?:[a-z]+|[A-Z]*(?=[A-Z]|$))', s)
|
dfb06975a80be544174918521a2f25debdd56f2c
| 87,315
|
def connection_string_to_dictionary(str):
"""
parse a connection string and return a dictionary of values
"""
cn = {}
for pair in str.split(";"):
suffix = ""
if pair.endswith("="):
# base64 keys end in "=". Remove this before splitting, then put it back.
pair = pair.rstrip("=")
suffix = "="
(key, value) = pair.split("=")
value += suffix
cn[key] = value
return cn
|
9cba0c4c600321b4c8d01e02d78d34bd5c60d14e
| 87,317
|
def is_pow2(value: int) -> bool:
""" Check if value is power of two. """
return value > 0 and ((value & (value - 1)) == 0)
|
1d30e08ef450a965272cd94d7599bcc39d71992c
| 87,320
|
import torch
def box_wh_to_xy(x):
"""
Converts co-ordinates from (x, y, w, h) to
(x1, y1, x2, y2) format
"""
x, y, w, h = x.unbind(-1)
x1 = x - 0.5 * w
y1 = y - 0.5 * h
x2 = x + 0.5 * w
y2 = y + 0.5 * h
return torch.stack([x1, y1, x2, y2], dim=-1)
|
ebbd5bcba65539fae6611bd79a3730d98a22725d
| 87,322
|
def get_subdir(index):
"""
Return the sub-directory given the index dictionary. The return
value is obtained in the following order:
1. when the 'subdir' key exists, it's value is returned
2. if the 'arch' is None, or does not exist, 'noarch' is returned
3. otherwise, the return value is constructed from the 'platform' key
and the 'arch' key (where 'x86' is replaced by '32',
and 'x86_64' by '64')
"""
try:
return index['subdir']
except KeyError:
arch = index.get('arch')
if arch is None:
return 'noarch'
intel_map = {'x86': '32', 'x86_64': '64'}
return '%s-%s' % (index.get('platform'), intel_map.get(arch, arch))
|
e517a87ba928d9efd37617a6d84393888e9c07ca
| 87,323
|
def count_digits(n: int) -> int:
"""Counts the digits of number in base 10.
Args:
n: A non-negative integer.
Returns:
The number of digits in the base-10 representation of ``n``.
"""
return len(str(n))
|
1e867e4421a119f5c31a17e50ade417dddd73053
| 87,325
|
def is_arabic_only(word):
"""
Checks if word only contains arabic letters
"""
horof = ("ضصثقفغعهخحجدشسيبلاتنمكطذئءؤرىةوزظّ")
for letter in (word):
if letter not in horof:
return False
return True
|
3d1223fa563d72b7ac5c3f7360e90ff3c815a0c3
| 87,326
|
def fs_get_file_hash(filelist):
"""Returns a dict associating figshare file id to MD5 hash
Parameters
----------
filelist : list of dict
HTTP request response from fs_get_file_list
Returns
-------
response : dict
keys are file_id and values are md5 hash
"""
return {str(f["id"]): "md5:" + f["supplied_md5"] for f in filelist}
|
1244c6ccd8fe33a4df3df2391abef7766ba9af43
| 87,327
|
def replace_bibtex_cite_name(bibtex, current_name, new_name):
"""replaces the cite_name in a bibtex file with something else
:param: string of bibtex to do the replacing on
:param current_name: current cite name in the bibtex
:param new_name: name to replace it with
"""
new_bibtex = bibtex.replace(current_name, new_name, 1)
return new_bibtex
|
6ebae691836429c560762d43fe34bb2a2d1b7696
| 87,329
|
def get_ip_to_total_traffic_size(stream):
"""
Returns a dictionary relating IP addresses to their total traffic size.
Where total traffic size is the size of all the packets that the address
is present in as a source or a destination in the provided stream.
"""
ip_traffic_size = {}
for packet in stream:
src = packet.src_addr
dst = packet.dst_addr
length = packet.length
ip_traffic_size[src] = ip_traffic_size[src] + length if src in ip_traffic_size else length
ip_traffic_size[dst] = ip_traffic_size[dst] + length if dst in ip_traffic_size else length
return ip_traffic_size
|
95485cd794f702df0b2fc68e16f0c756bcb9b794
| 87,335
|
def duns_screener(duns):
"""
Takes a duns number and returns a modified string to comply with DUNS+4 format
common DUNS errors:
* leading zero removed: len == 8 --> add leading zero back + '0000' trailing
* 9-digits --> duns + '0000'
* else: 'error'
"""
if len(duns) == 9:
duns = duns + '0000'
return duns
elif len(duns) == 8:
duns = '0' + duns + '0000'
return duns
else:
return 'error'
|
bed8c6bff392a31d511cf962cec7c30e32b86d8b
| 87,346
|
def is_subset(subsampling, reference):
"""Return whether indices specified by ``subsampling`` are subset of the reference.
Args:
subsampling ([int] or None): Sample indices
reference ([int] or None): Reference set.
Returns:
bool: Whether all indices are contained in the reference set.
"""
if reference is None:
return True
elif subsampling is None and reference is not None:
return False
else:
return set(subsampling).issubset(set(reference))
|
66e924370b0698bb0f1c2b73d058cf99d4543259
| 87,352
|
import uuid
def make_uuid(rng):
"""
Generate a random UUID using the given random generator.
"""
return str(uuid.UUID(bytes=bytes(rng.getrandbits(8) for _ in range(16)), version=4))
|
d44a2cbf30f02caa9f4bac54c93f29cbd6646fb9
| 87,353
|
import hashlib
def md5(file_path):
"""Calculates the md5-hash of the file.
:param file_path: full path to the file.
"""
return hashlib.md5(open(file_path,'rb').read()).hexdigest().upper()
|
594276bfe719a97f36d91b3c1094a601618fe419
| 87,356
|
from typing import Dict
from typing import Tuple
def bbox_to_tf_style(bbox: Dict, img_width: int, img_height: int) -> Tuple:
"""
Convert Sighthound bounding box to tensorflow box style.
In Tensorflow the bounding box is defined by the tuple (y_min, x_min, y_max, x_max)
where the coordinates are floats in the range [0.0, 1.0] and
relative to the width and height of the image.
For example, if an image is 100 x 200 pixels (height x width) and the bounding
box is `(0.1, 0.2, 0.5, 0.9)`, the upper-left and bottom-right coordinates of
the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).
"""
decimals = 5
x_min = round(bbox["x"] / img_width, decimals)
x_max = round((bbox["x"] + bbox["width"]) / img_width, decimals)
y_min = round(bbox["y"] / img_height, decimals)
y_max = round((bbox["y"] + bbox["height"]) / img_height, decimals)
return (y_min, x_min, y_max, x_max)
|
f316d456743c0e881725fea9a2efd8eb6df7b636
| 87,357
|
def isnum(num):
"""
Returns true if the inputted argument can be converted into a float or int
:param num:
:return: Boolean value
"""
try:
float(num)
except ValueError:
return False
return True
|
a85c85359566cab92c744e4c088d70d92cffca67
| 87,364
|
def is_disc_aligned(disc, time_counter, align_offset):
"""Check if disc is aligned."""
return ((disc['cur_position'] + time_counter + align_offset)
% disc['positions'] == 0)
|
3dded7fbd062920faa0c80edb3e384543e1e81fa
| 87,368
|
def sign(n):
""" returns the sign of a number """
if n: return n/abs(n);
return 1;
|
f9087a679372d13614189a9cff9ef99214821cd5
| 87,371
|
def two_class_palette(x):
"""
Returns the color palette for two class labels.
0: blue
1: red
"""
if set(x) != set([0, 1]):
raise ValueError('class labels not 0-1')
pal = []
for i in range(len(x)):
if x[i] == 0:
pal.append('blue')
else:
pal.append('red')
return pal
|
8b0c577ce734846d8acd4c86f37a85d4f650538c
| 87,372
|
def fixed_value_thresholder(threshold):
"""Return a function that indicates scores >= a fixed value."""
def binarize(scores):
return scores >= threshold
return binarize
|
31ce3cdb1242fce461799d878676972ad0112619
| 87,373
|
def rename_model(connection):
"""SQL test statements for the RenameModelTests suite.
Args:
connection (django.db.backends.base.BaseDatabaseWrapper):
The connection being tested.
Returns:
dict:
The dictionary of SQL mappings.
"""
return {
'RenameModel': [
'ALTER TABLE "tests_testmodel" RENAME TO "tests_destmodel";',
],
'RenameModelSameTable': [],
'RenameModelForeignKeys': [
'ALTER TABLE "tests_testmodel" RENAME TO "tests_destmodel";',
],
'RenameModelForeignKeysSameTable': [],
'RenameModelManyToManyField': [
'ALTER TABLE "tests_testmodel" RENAME TO "tests_destmodel";',
],
'RenameModelManyToManyFieldSameTable': [],
}
|
c7c911ba8c81dcbfd7116b79b1e16b1c333a2e63
| 87,377
|
def get_filename(problem):
"""Returns filename in the form `001.py`"""
return '{:03d}.py'.format(problem)
|
9097b559eef6579cf41951f5ff89556cae307206
| 87,381
|
from pathlib import Path
import pkg_resources
def _create_model_folder(url):
""" Create the folder for the new model.
The new folder is created in the `models_data` folder, i.e., at the same
location of the other models (that can be installed independently from this
one).
Structure of the `models_data` folder.
```
models_data
|- path_model (not mandatory)
|- snippet_model (independent from this)
`- snippet_model_author (created with this method)
```
Parameters
----------
url: str
The url of the repository
Returns
-------
`pathlib.Path`
The path of the model folder
Raises
------
FileExistsError
If the model folder already exists
"""
# The model name is the name of the author of the repository
model_name = 'snippet_model_%s' % url.split('/')[-2]
# Get the models_data folder of credentialdigger
models_data = Path(pkg_resources.resource_filename('credentialdigger',
'models_data'))
# Create model folder. If the model already exists, its folder is already
# present at this path. In this case, a FileExistsError is raised by the
# instruction mkdir
local_model = models_data / model_name
local_model.mkdir()
return local_model
|
b6d8a9bcf4a1aa84a3b34413630fc755382c15ae
| 87,382
|
def remove_dup(dataframe, cam_id_name):
"""
Drop rows that have duplicated camera id (keep the first duplicated camera id data)
:param dataframe: a pandas dataframe
:return: a pandas dataframe after removing duplicates
"""
dataframe.drop_duplicates(subset=cam_id_name, inplace=True)
return dataframe
|
38bc48fb5fec1ca443697ecb661ae283b5f8d0a8
| 87,384
|
import random
def get_perspective_params(img, distortion_scale):
"""Helper function to get parameters for RandomPerspective.
"""
img_width, img_height = img.size
distorted_half_width = int(img_width / 2 * distortion_scale)
distorted_half_height = int(img_height / 2 * distortion_scale)
top_left = (random.randint(0, distorted_half_width),
random.randint(0, distorted_half_height))
top_right = (random.randint(img_width - distorted_half_width - 1, img_width - 1),
random.randint(0, distorted_half_height))
bottom_right = (random.randint(img_width - distorted_half_width - 1, img_width - 1),
random.randint(img_height - distorted_half_height - 1, img_height - 1))
bottom_left = (random.randint(0, distorted_half_width),
random.randint(img_height - distorted_half_height - 1, img_height - 1))
start_points = [(0, 0), (img_width - 1, 0), (img_width - 1, img_height - 1), (0, img_height - 1)]
end_points = [top_left, top_right, bottom_right, bottom_left]
return start_points, end_points
|
49528cc8400c6ef7ad4a4cc8b9efd38c64ecf2cb
| 87,390
|
def convert_time(time):
"""Convert time in millisecond to string format MM:SS:MS"""
minutes = str(time // 60000).zfill(2)
second = str((time % 60000) // 1000).zfill(2)
millisecond = str(time % 1000).zfill(3)
return "%s:%s:%s" % (minutes, second, millisecond)
|
94cdf74d037f7976419c36493963ab3a6aea9576
| 87,391
|
import re
def compile_matcher(regex):
"""Returns a function that takes one argument and returns True or False.
Regex is a regular expression. Empty regex matches everything. There
is one expression: if the regex starts with "!", the meaning of it is
reversed.
"""
if not regex:
return lambda x: True
elif regex == '!':
return lambda x: False
elif regex.startswith('!'):
rx = re.compile(regex[1:])
return lambda x: rx.search(x) is None
else:
rx = re.compile(regex)
return lambda x: rx.search(x) is not None
|
6632de458c4e95f3009d9a01233d144c6b7d5752
| 87,394
|
import webbrowser
def get_info_from_website (barcode):
"""Opens a website containing the nutrition info of the scanned barcode in a new tab
For this, I imported and used the webbrowser.open package to open a url in a new window.
Details at: https://docs.python.org/3/library/webbrowser.html
Parameters
----------
barcode: the barcode for which the nutrition info should be obtained
Returns
-------
nutrition_info_page: a website containing the nutrition info of the scanned food item.
"""
# Use webbrowser.open to open the website and use the previously acquired barcode
# to pull nutritional data on the scanned barcode
nutrition_info_page = webbrowser.open(f"https://world.openfoodfacts.org/product/{barcode}", new=1)
return nutrition_info_page
|
e8c86fef90f2140a862ef3b27dc0268a6d268de9
| 87,396
|
def _IsValidComposerUpgrade(cur_version, candidate_version):
"""Validates that only MINOR and PATCH-level version increments are attempted.
(For Composer upgrades)
Checks that major-level remains the same, minor-level ramains same or higher,
and patch-level is same or higher (if it's the only change)
Args:
cur_version: current 'a.b.c' Composer version
candidate_version: candidate 'a.b.d' Composer version
Returns:
boolean value whether Composer candidate is valid
"""
curr_parts = list(map(int, cur_version.split('.', 3)))
cand_parts = list(map(int, candidate_version.split('.', 3)))
if (curr_parts[0] == cand_parts[0] and
(curr_parts[1] < cand_parts[1] or
(curr_parts[1] <= cand_parts[1] and curr_parts[2] <= cand_parts[2]))):
return True
return False
|
0683c90e22df65eb0e27acd50ee21d08c1a366d9
| 87,405
|
def get_actual_preds_dose(dose, df_test, df_model_preds, df_targets):
"""Get the actual MOA target labels and predictions for each Treatment dose"""
dose_cpds_index = df_test[df_test['dose'] == dose].index
df_dose_preds = df_model_preds.loc[dose_cpds_index].reset_index(drop = True)
df_dose_targets = df_targets.loc[dose_cpds_index].reset_index(drop = True)
return df_dose_targets, df_dose_preds
|
4cbd52932031db3898a5ad3de00f70043190636d
| 87,408
|
def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9):
"""Learning rate policy used in nnUNet."""
return initial_lr * (1 - epoch / max_epochs)**exponent
|
abdf1b887ee6d24327b7129d7ed06b01a6cbe693
| 87,411
|
def mps_to_kmph(mps):
"""
Function to convert speed in m/sec to km/hr
:param mps: the speed in m/sec
:return: the speed in km/hr
"""
return 3.6 * mps
|
6a5932fa5117594894da1529d8c75e4c122baab6
| 87,413
|
def get_input_type(args):
"""Find input handler with its corresponding parameters based on CLI arguments.
:param `argparse.Namespace` args: `argparse.Namespace` instance.
:return: tuple of input type and corresponding options
:rtype: tuple
"""
input_type = None
options = []
if args.interactive:
input_type = 'interactive'
elif args.file:
input_type = 'file'
options.append(args.file)
elif args.request:
input_type = 'inline'
options.append(args.request)
elif args.stdin:
input_type = 'stdin'
return (input_type, options)
|
3479add51e9479fd0131436074501180a1bafba9
| 87,416
|
def isintegral_str(txt: str) -> bool:
"""Return true iff argument is an integer string."""
if not isinstance(txt, str):
return False
try:
int(txt)
return True
except ValueError:
return False
|
11ba307d2e276e370b29aee9dfa00f37d865125b
| 87,425
|
def getOffset(ann):
"""
Returns the offset of the supplied Matplotlib annotation object,
in pixels from its data point location on its subplot.
"""
return ann.get_position()
|
510ceace6a9a0c5745506569ed88efd4dc914ec5
| 87,427
|
def ends(iter):
"""Return the two ends of an iterable"""
li = list(iter)
return li[0], li[-1]
|
d8fa5e37b830cd7395a35f04fded74608c9f0acb
| 87,431
|
from typing import List
def validar_codigos(n: int) -> List[int]:
"""Valida que los códigos a ingresar no contengan duplicados.
Retorna una lista de códigos sin duplicados.
:param n: Cantidad de códigos
:n type: int
:return: Lista de códigos sin duplicados
:rtype: List[int]
"""
codigos = []
i = 1
while i <= n:
codigo = int(input(f"Código {i}: "))
if codigo in codigos:
print("Error, código duplicado")
continue
codigos.append(codigo)
i += 1
return codigos
|
ffce90bf582434b06d41ff5f5902080fe9f39d0a
| 87,435
|
def _get_axis_idx(header, axis_name):
"""Return the axis number of the given axis."""
axes = ['right ascension', 'declination', 'stokes', 'frequency']
assert axis_name.lower() in axes, "Unknown `axis_name`."
for ax in range(1, len(axes)+1):
key = 'ctype{:d}'.format(ax)
if header[key].lower() == axis_name.lower():
return ax
raise ValueError("Cannot find requested axis in the image.")
|
6f4a524530aaa8d6c1215bf4fa4cb2026ec87c4f
| 87,437
|
def is_printable(char: str) -> bool:
"""Determine whether a character is printable for our purposes.
We mainly use Python's definition of printable (i. e. everything that Unicode does not consider a separator or "other" character). However, we also treat U+F8FF as printable, which is the private use codepoint used for the Apple logo character.
"""
return char.isprintable() or char == "\uf8ff"
|
031f924505339888026b2d84dbd55790f67c232f
| 87,439
|
def get_threat_detail(threats):
"""
Iterate over threat details from the response and retrieve details of threats.
:param threats: list of threats from response
:return: list of detailed elements of threats
:rtype: list
"""
return [{
'Title': threat.get('title', ''),
'Category': threat.get('category', ''),
'Severity': threat.get('severity', ''),
'Description': threat.get('description', ''),
'Cve': threat.get('cves', []),
'Source': threat.get('source', ''),
'Published': threat.get('published', ''),
'Updated': threat.get('updated', ''),
'ThreatLastTrendingOn': threat.get('threatLastTrendingOn', ''),
'Trending': threat.get('trending', '')
} for threat in threats]
|
7ce4516f34c2dd72b0211a6e4754f0926456ebcc
| 87,441
|
def check_rect(rect):
"""
Check valid Rectangle
"""
x, y, w, h = rect
return x >= 0 and y >= 0 and w > 0 and h > 0
|
91620370d2a58f72532e3bdd34bf8c7666511337
| 87,443
|
def config_from_object(bilby_object):
"""Extract the configuration file stored within a `bilby.core.result.Result`
object (or alike)
Parameters
----------
bilby_object: bilby.core.result.Result (or alike)
a bilby.core.result.Result object (or alike) you wish to extract the
configuration file from
"""
config = {}
if bilby_object.meta_data is not None:
if "command_line_args" in bilby_object.meta_data.keys():
config = {
"config": bilby_object.meta_data["command_line_args"]
}
return config
|
387618da3d0261dd612faf0230ddd531bd468d0d
| 87,444
|
import ast
def create_return_statement(var_name=None):
"""
Create a return statement node
:param var_name: Name of variable to be returned
:return: Instance of `ast.Return`
"""
if var_name:
return ast.Return(value=ast.Name(id=var_name))
else:
return ast.Return()
|
a05eba5ea0756a073333df61abaa2f4ba109bb9c
| 87,445
|
def bytes_to_num(bval):
"""
Convert a four byte sequence to an integer.
:param bytes bval: A four byte sequence to turn into an integer.
"""
num = 0
num += ord(bval[0] << 24)
num += ord(bval[1] << 16)
num += ord(bval[2] << 8)
num += ord(bval[3])
return num
|
e5ade4caa4602b2a9cef3d2c0997995813eb0d2c
| 87,446
|
def _trunc_id(session_id):
"""Returns truncated session id which is suitable for logging."""
if session_id is not None:
return session_id[-5:]
|
041ceae167aad1bda000ae2de9fb7cb2ff23cc9f
| 87,451
|
def get_wheels_speed(encoderValues, oldEncoderValues, delta_t):
"""Computes speed of the wheels based on encoder readings"""
#Encoder values indicate the angular position of the wheel in radians
wl = (encoderValues[0] - oldEncoderValues[0])/delta_t
wr = (encoderValues[1] - oldEncoderValues[1])/delta_t
return wl, wr
|
7f677321d02d1b29971f246a711d6faf25abb922
| 87,453
|
from typing import Tuple
def manhattan_heuristic(start_point: Tuple[int, int], end_point: Tuple[int, int]) -> float:
"""Return the distance between start_point and end_point using Manhattan distance
(sum of the absolute differences between the two vectors)
"""
x1, y1 = start_point
x2, y2 = end_point
distance = abs(x1 - x2) + abs(y1 - y2)
return distance
|
096bb840bceb81e398807416157ef15d4987b20a
| 87,461
|
def _train_batch(loader, model, optimizer, loss_fun):
"""Train a batch and return the loss"""
running_loss = 0
total = 0
for batch_idx, (target, feat) in enumerate(loader):
pred = model(feat)
optimizer.zero_grad()
loss = loss_fun(pred.flatten(), target)
loss.backward()
optimizer.step()
running_loss += loss.item()
total = batch_idx
return running_loss / (total + 1)
|
56116d4f040e29df1608ba7c8ef54d0edd3ebfb5
| 87,462
|
def mFWQ(mFL, qL):
"""
mFWQ(mFL, qL):
(mole or mass) Fraction Weighted Quantity
Parameters:
mFL, list of mole or mass fractions, sum(mFL) = 1
qL, list of quantities corresponding to items in mFL
Returns:
weighted averaged of items in qL
"""
aveQ = 0
for idx,mF in enumerate(mFL):
aveQ += mF*qL[idx]
return aveQ
|
7af160a3f283cbe8e489060754e5d960e5b1d634
| 87,463
|
def _fits_section_header(section_name):
"""
Blank fits header cards for a section header. As in drizzle, one blank line,
a line with the section name as a comment, then one more blank.
"""
return [('', '', ''), ('', '/ ' + section_name, ''), ('', '', '')]
|
4d7ce0175e40706505bed15e2fc1f5d4f2d27dac
| 87,464
|
def check_file_contents(filename, expected_text):
"""Checks that the given file contains the expected text.
Parameters
----------
filename : str
Name of file to check
expected_text : str
Expected contents of file
Returns
-------
list
:obj:`list` of unexpected lines
"""
# Read the file
with open(filename, 'r') as fid:
lines = fid.readlines()
# Initialize a list of failures
failures = []
# Loop through lines
for actual_line, expected_line, line_num in zip(lines, expected_text.split('\n'), range(len(lines))):
expected_line += '\n'
# Test if lines are equal
if actual_line != expected_line:
failures.append('Unexpected file contents (line {}): ACTUAL: {} -- EXPECTED: {}'.format(line_num, actual_line, expected_line))
return failures
|
dafaa429be49462284b233eaa43aca9cf00aa2c5
| 87,467
|
import re
def stmt_type(statement):
"""Extract type of statement, e.g. SELECT, INSERT, UPDATE, DELETE, ..."""
return re.findall(r'[\w]+', statement)[0].upper()
|
06443f294fd7fc1727d9f738764491f4ece23aac
| 87,468
|
def parse_speaker_segments(results):
"""
From the Amazon Transcribe results JSON response, this function parses a list of all segments, their timeframe
and associated speaker label. The individual 'items' key of each segment are not parsed
Helper function for ``chunk_up_transcript()``
:param results: Amazon Transcribe results JSON
:return: List of segments with their time-frames and speaker labels
"""
labelled_speaker_segments = results['speaker_labels']['segments']
speaker_segments = []
for label in labelled_speaker_segments:
segment = dict()
segment["start_time"] = float(label["start_time"])
segment["end_time"] = float(label["end_time"])
segment["speaker"] = label["speaker_label"]
speaker_segments.append(segment)
return speaker_segments
|
7a51a9ac076ef34a2b6162cdaff9c9974b07f580
| 87,470
|
import hashlib
def string_to_num_id(string_id: str) -> int:
"""
Convert string id into a numeric one.
:param string_id: string id to convert
:return: numeric id
"""
# Initialize MD5 algorithm converter and update it with string id.
converter = hashlib.md5()
converter.update(string_id.encode("utf-8"))
# Get hexadecimal numeric id.
num_id = converter.hexdigest()
# Convert hexadecimal id to decimal one.
num_id = int(num_id, 16)
# Shorten full numeric id to 10 symbols.
num_id = int(str(num_id)[:10])
return num_id
|
e05a0ef7c9cfb8480a255d9eafbf320f02668d2b
| 87,472
|
def indent(string, prefix=' ' * 4):
"""
>>> indent('foo')
' foo'
"""
return prefix + string
|
e81d08254a6d8f2fbb75f47c0e7688ff72c1698f
| 87,474
|
def _get_text(path):
"""Return a string with the textual contents of a file at PATH."""
fp = open(path, 'rb')
try:
return fp.read()
finally:
fp.close()
|
c8ec126801e5175baead8ce9d50b8f237cf9de75
| 87,475
|
def get_file_name(filepath, extension=True):
"""Returns the name of the file of a given filepath or url.
Args:
filepath: String of full file path
extension: Boolean determining whether or not the file type will be returned
as part of the file name
Returns:
String: The filename
"""
name = filepath.split("/")[-1]
if extension:
return name
else:
return name.split(".")[0]
|
54bd34f5c4859ffecdeb04e9094a17982bb80790
| 87,478
|
def lower_but_first(tokens):
"""Lower-case all tokens of a list except for first one"""
return [(tokens[i] if i == 0 else tokens[i].lower()) for i in range(len(tokens))]
|
339b6576adcf2980ee1b1be95d5af6f884ae5129
| 87,482
|
def word_for_ampm(hour, ampm):
"""Return 'morning', 'afternoon', or 'evening' for the given hour and
AM/PM setting.
"""
if ampm == 'am':
return 'morning'
if hour < 6:
return 'afternoon'
return 'evening'
|
3b5c0a7e3fdbc5ab5d8fa7fe3a8cc95fdc04d5b7
| 87,485
|
from pathlib import Path
from typing import List
import itertools
def find_typeshed_search_paths(typeshed_root: Path) -> List[Path]:
"""
Given the root of typeshed, find all subdirectories in it that can be used
as search paths for Pyre.
"""
search_path = []
third_party_root = typeshed_root / "stubs"
third_party_subdirectories = (
sorted(third_party_root.iterdir()) if third_party_root.is_dir() else []
)
for typeshed_subdirectory in itertools.chain(
[typeshed_root / "stdlib"], third_party_subdirectories
):
if typeshed_subdirectory.is_dir():
search_path.append(typeshed_subdirectory)
return search_path
|
753561d92b905e92d24d38436b58104be2357525
| 87,494
|
import difflib
def get_diff(new_source, original_source, file_path):
"""
Get diff from two strings.
:param new_source:
:param original_source:
:param file_path:
:return: string with diff
:rtype str
"""
diff = difflib.unified_diff(
original_source.splitlines(),
new_source.splitlines(),
file_path,
file_path,
"(original)",
"(refactored)",
lineterm="",
)
diff_text = "\n".join(list(diff))
return diff_text
|
b431837b1e7b9ed50a8932de91d9bbe2a0973c6d
| 87,495
|
def get_upstream(version_str):
"""Given a version string that could potentially contain both an upstream
revision and a debian revision, return a tuple of both. If there is no
debian revision, return 0 as the second tuple element."""
try:
d_index = version_str.rindex('-')
except ValueError:
# no hyphens means no debian version, also valid.
return version_str, '0'
return version_str[0:d_index], version_str[d_index+1:]
|
2ef54d267261bd77fc0c5ac2bbfa526c1552df4d
| 87,496
|
import math
def factorize(val_to_fac):
"""Returns prime factorization as a string"""
anwser = ""
# Divides out as many 2's as possible, appending them to the anwser
while val_to_fac % 2 == 0:
anwser += "2 "
val_to_fac /= 2
# Going through all odd numbers from 3 to root of value
for i in range(3, math.ceil(val_to_fac/2), 2):
# If the odd number divides value, add to anwser
while val_to_fac % i == 0:
anwser += (str(i) + " ")
val_to_fac /= i
return anwser
|
eecfb4dbde95febec05a122c2c156c713598ba08
| 87,500
|
def _filter_internal(iterable, return_internal):
"""Filter the internal elements of an iterable."""
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
|
da6863a31f75e4a1ad1584430c031816544b875b
| 87,502
|
def prod(iterable, start=1):
"""Multiplies ``start`` and the items of an ``iterable``.
:param iterable: An iterable over elements on which multiplication may be
applied. They are processed from left to right.
:param start: An element on which multiplication may be applied, defaults to
``1``.
:return: The result of multiplication.
"""
for i in iterable:
start *= i
return start
|
4b5bfb0bf64f03b9697193dd83faf81e00bef25a
| 87,506
|
def batch_predict(tf_ds, batch_size, prediction_func):
"""Returns list of tuples (article_id, prediction, label).
Predicts tensorflow dataset in batches.
"""
evaluation_data = []
for aids, inps, lbls in tf_ds.batch(batch_size).as_numpy_iterator():
ps = prediction_func(inps)
evaluation_data += zip(aids, ps, lbls)
return evaluation_data
|
449e912b927bad105090dce321973877bb533573
| 87,508
|
import re
def git2pep440(git_ver):
"""Transforms git-describe output into PEP440 compatible format.
Examples:
v1.0.1.rc5-24-abcdef => 1.0.1.rc5+24.abcdef
v3.0-90-a89abc => 3.0+90.a89abc
"""
def _git2pep440(match):
major, minor, _3, patch, _5, rcdevversion, rcdevtype,_8, distance, _10, \
commit = match.groups()
version_string = f"{major}.{minor}"
if _3:
version_string += f".{patch}"
if _5:
version_string += f".{rcdevversion}"
if _8:
version_string += f"+{distance}.{commit}"
return version_string
return re.sub('v?([0-9]+)\.([0-9]+)(\.([0-9]+))?(\.((rc|dev)[0-9]+))?(-([0-9]+))?(-g([0-9a-fA-F]+))?', _git2pep440, git_ver)
|
86df44f2c1f0c666f2da1b17b689ceeb39ad8a2c
| 87,512
|
def split_into_parts(text: str, num_parts: int) -> list[str]:
"""
Splits text into an array of size num_parts.
Parameters
------------
text: str
The text to be split up.
num_parts: int
How many equally sized parts to split the text into.
Returns
------------
str[]
An array of strings (parts).
"""
#https://stackoverflow.com/questions/2130016/splitting-a-list-into-n-parts-of-approximately-equal-length
#tixxit
k, m = divmod(len(text), num_parts)
return list(text[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(num_parts))
|
89e74374385c7a22dde19dd948aa2206ec123efd
| 87,515
|
import inspect
def _is_method(obj, method):
"""Returns True if a given method is obj's method.
You can not simply test a given method like:
return inspect.ismethod(method)
This is because functools.wraps converts the method to a function
in log_method_call function.
"""
return inspect.ismethod(getattr(obj, method.__name__, None))
|
10d5060894ce223d40bc3c4f0e5b1c39d4b0e369
| 87,518
|
import math
def partition_items(count, bin_size):
"""
Given the total number of items, determine the number of items that
can be added to each bin with a limit on the bin size.
So if you want to partition 11 items into groups of 3, you'll want
three of three and one of two.
>>> partition_items(11, 3)
[3, 3, 3, 2]
But if you only have ten items, you'll have two groups of three and
two of two.
>>> partition_items(10, 3)
[3, 3, 2, 2]
"""
num_bins = int(math.ceil(count / float(bin_size)))
bins = [0] * num_bins
for i in range(count):
bins[i % num_bins] += 1
return bins
|
f335b412273d3c6dea4d27c4dd538a65efcd8cd2
| 87,525
|
import re
def cleanup(text: str) -> str:
"""
Function to remove puncutation and clean up a string
Parameters
----------
text : string
The string needed to be cleaned up
Returns
-------
string
Cleaned up version of text with no punctuation, single spaced seperation, and all lowercase
"""
text = text.replace("’","") # remove fancy apostrophe with no space
text = text.replace("\'","") # remove normal apostrophe with no space
text = text.replace("_"," ") # remove underscore with space (regex doesn't catch it)
text = re.sub(r'[^\w\s]',' ',text) # replace all other punctuation with a space
text = text.replace(" ", " ") # remove double spaces
text = text.lower() # change everything to lowercase
if text[-1:] == ' ': text = text[:-1] # Remove last character if whitespace
return text
|
f102b885396eaf65bbcd075a3d45cf1e5d76926d
| 87,528
|
import pathlib
def get_module_dot_notation(module_path: pathlib.Path) -> str:
"""
Given a module path, return the dot notation import string starting at astro.
:param module_path: Path to the module of interest
:return: String containing the absolute dot notation path to the module
"""
# We assume that this function is only being used for Astro submodules
# This can be generalised in future if needed
base_dir = pathlib.Path(__file__).parent.parent.parent # `astro` directory
module_path.relative_to(base_dir)
return ".".join(module_path.relative_to(base_dir).with_suffix("").parts)
|
2a7861dfa54ce3019c6922168cb34f86c80cd239
| 87,530
|
def make_play(play, board):
"""Put the word down on the board."""
(score, (i, j), (di, dj), word) = play
for letter in word:
board[j][i] = letter
i += di
j += dj
return board
|
fbc36b89dfebd526e02c3d790d0ac787e6228728
| 87,532
|
from typing import List
def cross(row: str, col: str) -> List[str]:
"""This function returns the list formed by
all the possible concatenations of a letter r in string row with a letter c in string col.
Args:
row (str): String of concatenated Characters
col (str): String of concatenated Numbers
Returns:
List[str]: List of all possible cross concatenations.
"""
return [r + c for r in row for c in col]
|
5fb4b95bf76401efc30496050c981d4d88660875
| 87,538
|
def add_gid(df):
""" Add a column to game df with unique game id """
def get_gid(row):
tid1, tid2 = sorted((row.WTeamID, row.LTeamID))
return '{0}_{1}_{2}'.format(row.Date, tid1, tid2)
df['gid'] = df.apply(get_gid, axis=1)
|
d9eb65fc57e3135cb727cc66cfe61c1e4d3345bf
| 87,550
|
def sort_tag_counts(tag_counts):
"""
Sort tag counts.
sort rules: sort by tag first then frequence.
tag_counts: list of tuples. example:[('python', 2), ('abc', 1)]
return: sorted tag_counts.
"""
tag_counts.sort(key=lambda x: x[0])
tag_counts.sort(key=lambda x: x[1], reverse=True)
return tag_counts
|
c8d942ec8fb4ddaddc4a6b58fb2b4111778abb71
| 87,551
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.