content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import math
def get_increment(count):
"""Returns a suitable base 10 increment."""
p = int(math.log10(count))
if not p:
p = 1
return int(math.pow(10, p - 1)) | a22c5b76e34606d78a12041b02a38c99a49aff97 | 107,540 |
def is_image_file(filename):
""" Check if file is an image
:param filename: file name string
:return: Boolean toggle
"""
return any(filename.endswith(extension) for extension in ['.bmp', '.png', '.jpg', '.jpeg', '.JPG', '.JPEG', '.PNG']) | de74117382cb6581af7d9c54dd84b35860dd09bc | 107,544 |
def coordinates_set(width, height):
"""
根据宽和高生成一个坐标元组集合。
Get a set of coordinate tuples with width and height.
:param width: 宽度。 width.
:param height: 高度。 height.
:return: <set (x_axis, y_axis)>
"""
s = set()
for i in range(width):
for j in range(height):
s.add((i, j))
return s | 80fdfea933068325cf79cc1f145eae6eba2d47bf | 107,550 |
def _wrap_into_list(x):
"""
Wrap the input into a list if it is not already a list.
"""
if x is None:
return []
elif not isinstance(x, (list, tuple)):
return [x]
else:
return list(x) | 161bae982083bf3c4b53fdaee89459a49a727ea8 | 107,551 |
def is_numpy_file(filepath: str) -> bool:
"""
Helper function to check if the file extension is for npy
Parameters
---
filepath (str)
File path
Result
---
bool
Returns True if file path ends with the npy extension.
"""
return filepath.endswith(".npy") | e2cf8be27036abeba70bce3134fb306cd8084218 | 107,558 |
def eval_step(
params, batch,
metric_fn
):
"""Performs a single evaluation step.
We use this wrapper for parallelizing the evaluation computations across
multiple devices.
Args:
params: Current model state.
batch: Current batch of evaluation examples.
metric_fn: Function that maps the model state and batch to output model
metrics.
Returns:
Model metrics for given inputs.
"""
return metric_fn(params, batch) | f3f1b64c09863d4eeda5ab470c4637abdd93c47e | 107,561 |
def rl_testrule(x):
"""
Rule for tests. Do nothing.
"""
return x | 81801b03004f49dc41e7ce7e7c3bfc9b47b4ed59 | 107,564 |
def setup_query(person_complete_name: str):
"""
Return the SPARQL query for obtaining gender, birthdate and nationality (if available) of the given person from
DBpedia
:param person_complete_name: person whose metadata are of interest
:return:
"""
query_template = """
SELECT *
WHERE {{
?p foaf:name "{}"@en;
foaf:gender ?gender;
dbo:birthDate ?birthdate.
optional {{ ?p dbp:nationality ?nationality_dbp }}
optional {{ ?p dbo:nationality ?nationality_dbo }}
}}
""".format(person_complete_name)
return query_template | 97e19b92e783d7a25cc02e6677131173d16d9273 | 107,566 |
from typing import Tuple
def generate_update_query(table: str, columns: Tuple[str, ...], key_column: str) -> str:
"""
Generate UPDATE query with named placeholders.
e.g.: UPDATE ae_data SET Time = :Time, Channel = :Channel WHERE SetID == :SetID
Args:
table: Table name
columns: Tuple of column names
(must be of type tuple to be hashable for caching)
key_column: Column name for WHERE clause
Returns:
Query string with named placeholders
"""
columns_list = list(columns)
try:
columns_list.remove(key_column)
except ValueError:
raise ValueError(f"Argument key_column '{key_column}' must be a key of row_dict") from None
query = "UPDATE {table} SET {set} WHERE {condition}".format(
table=table,
set=", ".join([f"{col} = :{col}" for col in columns_list]),
condition=f"{key_column} == :{key_column}",
)
return query | 1b928092063f21e7561f648002b3a9653c921231 | 107,570 |
def description_length(user):
"""
Get the length of user description in words
:param user: the user object in json tweet
:return: vector length of 1 indicating the length of description. If user
if user has no description, [0] will be returned
"""
des_length=0
if user['description']:
description = user['description']
des_length = len(description.split())
return des_length | 41e9ec6800b6fe86b14297114179d91955431100 | 107,571 |
import random
def randint(min_value, max_value):
"""Return random integer in range [min_value, max_value],
including both end points
Arguments:
min_value {int} -- min value
max_value {int} -- max value
Returns:
int -- random integer in range [min_value, max_value]
"""
return random.randint(int(min_value), int(max_value)) | d14e1b6b1f4090f1e6de0abc18d409a67ed26659 | 107,574 |
def get_model_url_name(model_nfo, page, with_namespace=False):
"""Returns a URL for a given Tree admin page type."""
prefix = ''
if with_namespace:
prefix = 'admin:'
return ('%s%s_%s' % (prefix, '%s_%s' % model_nfo, page)).lower() | 002b4caec118868ad6c319eff989a26e2ec022be | 107,575 |
from typing import MutableMapping
def flatten(d: MutableMapping, parent='', separator='_'):
"""
Flatten the given nested dict.
It is assumed that d maps strings to either another dictionary (similarly structured) or some other value.
"""
items = []
for k, v in d.items():
if parent:
new_key = parent + separator + k
else:
new_key = k
if isinstance(v, MutableMapping):
items.extend(flatten(v, new_key, separator=separator).items())
else:
items.append((new_key, v))
return dict(items) | 325bd542cee714c91c0faae3a3b63a18f7b91635 | 107,582 |
def check_integer_list_constraints(l, **kwargs):
"""
EXAMPLES::
sage: from sage.combinat.misc import check_integer_list_constraints
sage: cilc = check_integer_list_constraints
sage: l = [[2,1,3],[1,2],[3,3],[4,1,1]]
sage: cilc(l, min_part=2)
[[3, 3]]
sage: cilc(l, max_part=2)
[[1, 2]]
sage: cilc(l, length=2)
[[1, 2], [3, 3]]
sage: cilc(l, max_length=2)
[[1, 2], [3, 3]]
sage: cilc(l, min_length=3)
[[2, 1, 3], [4, 1, 1]]
sage: cilc(l, max_slope=0)
[[3, 3], [4, 1, 1]]
sage: cilc(l, min_slope=1)
[[1, 2]]
sage: cilc(l, outer=[2,2])
[[1, 2]]
sage: cilc(l, inner=[2,2])
[[3, 3]]
::
sage: cilc([1,2,3], length=3, singleton=True)
[1, 2, 3]
sage: cilc([1,2,3], length=2, singleton=True) is None
True
"""
if 'singleton' in kwargs and kwargs['singleton']:
singleton = True
result = [l]
n = sum(l)
del kwargs['singleton']
else:
singleton = False
if l:
n = sum(l[0])
result = l
else:
return []
min_part = kwargs.get('min_part', None)
max_part = kwargs.get('max_part', None)
min_length = kwargs.get('min_length', None)
max_length = kwargs.get('max_length', None)
min_slope = kwargs.get('min_slope', None)
max_slope = kwargs.get('max_slope', None)
length = kwargs.get('length', None)
inner = kwargs.get('inner', None)
outer = kwargs.get('outer', None)
# Preprocess the constraints
if outer is not None:
max_length = len(outer)
for i in range(max_length):
if outer[i] == "inf":
outer[i] = n+1
if inner is not None:
min_length = len(inner)
if length is not None:
max_length = length
min_length = length
filters = {}
filters['length'] = lambda x: len(x) == length
filters['min_part'] = lambda x: min(x) >= min_part
filters['max_part'] = lambda x: max(x) <= max_part
filters['min_length'] = lambda x: len(x) >= min_length
filters['max_length'] = lambda x: len(x) <= max_length
filters['min_slope'] = lambda x: min([x[i+1]-x[i] for i in range(len(x)-1)]+[min_slope+1]) >= min_slope
filters['max_slope'] = lambda x: max([x[i+1]-x[i] for i in range(len(x)-1)]+[max_slope-1]) <= max_slope
filters['outer'] = lambda x: len(outer) >= len(x) and min([outer[i]-x[i] for i in range(len(x))]) >= 0
filters['inner'] = lambda x: len(x) >= len(inner) and max([inner[i]-x[i] for i in range(len(inner))]) <= 0
for key in kwargs:
result = [x for x in result if filters[key](x)]
if singleton:
try:
return result[0]
except IndexError:
return None
else:
return result | cfdaac712dcc4d65044fd8887c1a52bc4d8f8232 | 107,584 |
def set_coordinate_indexing(coords, indexing="xy"):
"""Sets Coordinates Indexing Scheme
This converts coordinate layout from row-major to column major indexing.
Parameters
----------
coords : :class:`numpy:numpy.ndarray`
Array of shape (..., M, N, 2) containing xy-coordinates.
indexing : str
'xy' or 'ij', indexing scheme in which to convert data and coordinates.
Returns
-------
coords : :class:`numpy:numpy.ndarray`
Array of shape (..., N, M, 2) containing xy-coordinates.
"""
is_grid = hasattr(coords, "shape") and coords.ndim >= 3 and coords.shape[-1] == 2
if not is_grid:
raise ValueError(
f"wradlib: wrong coordinate shape {coords.shape}, "
f"(..., M, N, 2) expected."
)
if indexing not in ["xy", "ij"]:
raise ValueError(f"wradlib: unknown indexing value {indexing}.")
rowcol = coords[0, 0, 1] == coords[0, 1, 1]
convert = (rowcol and indexing == "ij") or (not rowcol and indexing == "xy")
if convert:
coords_shape = tuple(range(coords.ndim - 3)) + (-2, -3, -1)
coords = coords.transpose(coords_shape)
return coords | 6131abf2671bc4438039409b7c56cbe36f0fb9ff | 107,585 |
def _dump_point(obj, fmt):
"""
Dump a GeoJSON-like Point object to WKT.
:param dict obj:
A GeoJSON-like `dict` representing a Point.
:param str fmt:
Format string which indicates the number of digits to display after the
decimal point when formatting coordinates.
:returns:
WKT representation of the input GeoJSON Point ``obj``.
"""
coords = obj['coordinates']
pt = 'POINT (%s)' % ' '.join(fmt % c for c in coords)
return pt | dd8de87829b2d90ffb814890fa2866813cfc1c0c | 107,587 |
def choose_browser_by_precedence(cli_browsers=None, suite_browsers=None,
settings_default_browser=None):
""" Defines which browser(s) to use by order of precedence
The order is the following:
1. browsers defined by CLI
2. browsers defined inside a suite
3. 'default_driver' setting
4. chrome
"""
if cli_browsers:
browsers = cli_browsers
elif suite_browsers:
browsers = suite_browsers
elif settings_default_browser:
browsers = [settings_default_browser]
else:
browsers = ['chrome'] # default default
return browsers | 231434546e992952fdd9afd0bf519bf9bd55740b | 107,598 |
def _check_synsets(ref_synset, other_synset):
"""Check if other_synset is part of ref_synsetself.
Not that even if other_synset is a subset, still be careful when comparing them.
E.g., ref: ['apple', 'orange', 'melon'], other: ['apple', 'orange'] is OK
ref: ['apple', 'orange', 'melon'], other: ['orange', 'melon'] is not!
"""
if ref_synset == other_synset:
return True
if len(other_synset) < len(ref_synset):
if ref_synset[:len(other_synset)] == other_synset:
return True
return False | c6287e0735d32bba63b3fbd903eadfaefbbaa513 | 107,599 |
def format_doc(*args, **kwargs):
"""
Replaces the docstring of the decorated object and then formats it.
Modeled after astropy.utils.decorators.format_doc
"""
def set_docstring(obj):
# None means: use the objects __doc__
doc = obj.__doc__
# Delete documentation in this case so we don't end up with
# awkwardly self-inserted docs.
obj.__doc__ = None
# If the original has a not-empty docstring append it to the format
# kwargs.
kwargs['__doc__'] = obj.__doc__ or ''
obj.__doc__ = doc.format(*args, **kwargs)
return obj
return set_docstring | fb98500e919df1134be324a3e52d1db2daf57ce2 | 107,600 |
def extract_scalars(multiplexer, run, tag):
"""Extract tabular data from the scalars at a given run and tag.
The result is a list of 3-tuples (wall_time, step, value).
"""
tensor_events = multiplexer.Tensors(run, tag)
return [
# (event.wall_time, event.step, tf.make_ndarray(event.tensor_proto).item())
(event.wall_time, event.step, event.tensor_proto.float_val[0])
for event in tensor_events
] | bc4b43e790a04ad946ea4a909c572592f382f74a | 107,601 |
def findfirst(pred, seq):
"""Return the first element of given sequence that matches predicate.
"""
for item in seq:
if pred(item):
return item | 198387abc885ba434d5f9b09ecf5688b11a32a00 | 107,609 |
import itertools
def uniquify_enum_cases(lst):
"""Prunes duplicate enum cases from the list.
Arguments:
- lst: List whose elements are to be uniqued. Assumes each element is a
(symbol, value) pair and elements already sorted according to value.
Returns:
- A list with all duplicates removed. The elements are sorted according to
value and, for each value, uniqued according to symbol.
original list,
- A map from deduplicated cases to the uniqued case.
"""
cases = lst
uniqued_cases = []
duplicated_cases = {}
# First sort according to the value
cases.sort(key=lambda x: x[1])
# Then group them according to the value
for _, groups in itertools.groupby(cases, key=lambda x: x[1]):
# For each value, sort according to the enumerant symbol.
sorted_group = sorted(groups, key=lambda x: x[0])
# Keep the "smallest" case, which is typically the symbol without extension
# suffix. But we have special cases that we want to fix.
case = sorted_group[0]
for i in range(1, len(sorted_group)):
duplicated_cases[sorted_group[i][0]] = case[0]
if case[0] == 'HlslSemanticGOOGLE':
assert len(sorted_group) == 2, 'unexpected new variant for HlslSemantic'
case = sorted_group[1]
duplicated_cases[sorted_group[0][0]] = case[0]
uniqued_cases.append(case)
return uniqued_cases, duplicated_cases | cb114bc530def16bb09ec3bcf253079916e63d14 | 107,612 |
def list_tester(user_list, session_list, watch_lists):
"""
:param user_list: The user list if they supplied one
:param session_list: The session attributes or persistent attributes
:param watch_lists: tuple of watch list aliases
:return: True if list is a custom list, False if watchlist
"""
try:
session_list['list']
except KeyError:
session_list['list'] = None
if user_list is None and session_list['list'] is None:
return True
if not user_list and not session_list['list']:
return True
if user_list.lower() in watch_lists:
return True
return False | 2d08a56b1a9a5f75c7597e5f7233db28459dfa50 | 107,617 |
def issequence(obj) -> bool:
"""
Check if an object is a sequence / an iterable.
.. note::
Using against `isinstance(obj, collections.Sequence)` yields `False` for some types like `set` and `dict`.
:param obj: any object
:return: flag if the object is a sequence / an iterable
"""
return hasattr(type(obj), '__iter__') and hasattr(type(obj), '__len__') | 9e4bafaae3e479c250d8be68495823f99329d0da | 107,619 |
def isFloat(a):
"""
Return True if the string is float
False in other case.
"""
try:
float(a)
return True
except ValueError:
return False | cc70f08a16be3db7c868cb85c41fe427d4db43ec | 107,627 |
def get_rasterize_layer_params(src_vector, res=5):
"""Get params for rasterize_layer if you don't have a grid system.
Parameters
----------
src_vector: Geopandas.GeoDataFrame
The vector data to be rasterize.
res: resolution
The resolution (in meters) of the grid.
Returns
-------
rows: int
Target rasterized image's rows.
cols: int
Target rasterized image's cols.
geo_transform: tuple
Target rasterized image's geo_transform which is the affine parameter.
"""
xmin, ymin, xmax, ymax = src_vector.total_bounds
geo_transform = (xmin, res, 0, ymax, 0, -res)
cols = int((xmax - xmin) / res)
rows = int((ymax - ymin) / res)
return rows, cols, geo_transform | 8d2ea75192302fb6ed59659bb54f6ea23301e498 | 107,629 |
import uuid
def _get_sfn_execution_name(reservation):
"""
Generate a human-readable execution named composed of the passenger's
first and last name follwed by a UUID
"""
name = "{}-{}-{}".format(
reservation['last_name'].lower().replace(' ', '-'),
reservation['first_name'].lower(),
uuid.uuid4()
)
return name | 7648ec1de3a713576174b0558e88729ecf4cfdbd | 107,630 |
def _nonpar_core(event_list, dead_time_end, mask):
"""Numba-compiled core of the non-paralyzable dead time calculation.
Parameters
----------
event_list : array of floats
Event times of arrival
dead_time_end : array of floats
End of the dead time of each event
mask : array of bools
Final mask of good events. Initially, all entries must be ``True``
Return
------
mask : array of bools
Final mask of good events
"""
for i in range(1, len(event_list)):
if (event_list[i] < dead_time_end[i - 1]):
dead_time_end[i] = dead_time_end[i - 1]
mask[i] = False
return mask | 89fb91766962d8b98973690a054c3a2fc870aeb0 | 107,632 |
def length_vector_sqrd_numba(a):
""" Calculate the squared length of the vector.
Parameters
----------
a : array
XYZ components of the vector.
Returns
-------
float: The squared length of the XYZ components.
"""
return a[0]**2 + a[1]**2 + a[2]**2 | 49aaaf96ad80abda4636f9c6c5cceefb3fa96f01 | 107,636 |
def get_f1kg_texts(files):
"""Gets the specified F1KG text files (which do not need parsing, unlike the Perseus files."""
texts = []
for i, f in enumerate(files):
with open(f, "r") as fp:
texts.append(fp.read())
print(f"Number of texts read: {len(texts)}")
return texts | 1dcfab34202479754764525f3774072d5afb32d2 | 107,638 |
from typing import Union
from pathlib import Path
def read_file(file_path: Union[str, Path]):
"""read text file and return file content as a str
"""
with open(file_path) as f:
return f.readlines() | 021a14219e1d4f971c995b6d9cf8252f4cdbfef2 | 107,639 |
def PyTmStoHMSX(secs):
"""
Convert seconds to hours-minutes-seconds-milliseconds
Parameters
----------
secs: float, time in seconds
Returns
-------
list, [hours as int, minutes as int, seconds as int, milliseconds as float]
"""
h, secs = divmod(secs, 3600)
m, secs = divmod(secs, 60)
s, x = divmod(secs, 1)
return [int(h), int(m), int(s), x] | 4885b504b01fb273b844eeba4d4e3d8e110fdfaa | 107,640 |
def use_markers(model, w_marker=None, c_marker='c'):
"""
Temporarily modifies the _compute_* methods of the model to insert placeholder
values instead of coefficients in the QUBO. Note that the original methods are still
called, because they can have side-effects (example: computing new xplet properties).
Usage:
with use_markers(model) as altered_model:
Q = altered_model.to_qubo()
:param model: an implementation of :py:class:`hepqpr.qallse.QallseBase`
:param w_marker: the placeholder used for linear weights. Set it to None to use the original weight.
:param c_marker: the placeholder used for conflict strengths. Set it to None to use the original weight. Default to 'c'.
:return: an altered model
"""
old_cw, old_cc = None, None
# change the model functions during qubo
if w_marker is not None:
old_cw = model._compute_weight
def new_cv(*args, **kwargs):
old_cw(*args, **kwargs)
return w_marker
model._compute_weight = new_cv
if c_marker is not None:
old_cc = model._compute_conflict_strength
def new_cc(*args, **kwargs):
old_cc(*args, **kwargs)
return c_marker
model._compute_conflict_strength = new_cc
yield model
if old_cw is not None: model._compute_weight = old_cw
if old_cc is not None: model._compute_conflict_strength = old_cc | 50d6cdf81ad4effdcb8d04232103c1938f250ff8 | 107,647 |
import math
def weeks_to_sample(time_steps):
"""
Calculates the weeks to sample for a GMM model based on the time steps of the simulation.
Args:
time_steps: (list): Contains time_steps in `datetime.datetime` format
Returns:
num_weeks: (int): # weeks to sample
"""
# Total duration of simulation period in hours
last = time_steps[-1]
first = time_steps[0]
total_hours = (last - first).total_seconds() / 3600
num_weeks = math.ceil(total_hours / 168)
# If the first time stamp is later in the week add one week to sample
if first.weekday() > last.weekday():
num_weeks += 1
elif first.weekday() == last.weekday():
if first.time() > last.time():
num_weeks += 1
return num_weeks | 8bbd4d4620ad0620ae6b7affe6ec8d0e47cab88d | 107,648 |
def rgb_to_color(r, g, b):
"""Convert R, G, B values to hex string."""
r, g, b = int(r) & 0xFF, int(g) & 0xFF, int(b) & 0xFF
value = r << 16 | g << 8 | b
return '0x{0:06X}'.format(value) | 32b97537ced1485445663671bdf43a623420e033 | 107,649 |
def constraints_violated(constraints):
"""
:param constraints: constraints to be evaluated
:return [0]: True if there are any constraints that are violated, false otherwise
:return [1]: Maximum violation if one or more constraints exist, else None
:return [2]: Name of the maximum constraint violated if one or more constraints exist, else None
"""
n = []
v = []
violated = False
for constraint_name, constraint in constraints.items():
n.append(constraint_name)
v.append(constraint.violation)
if constraint.violated:
violated = True
if not v:
return False, None, None
return violated, max(v), n[v.index(max(v))] | a9274c2f5de0eada038b8c4068acf3a510dc0146 | 107,653 |
def exclude_keys(d, keys):
"""
Returns dict 'd' without the keys specified in 'keys'
"""
return dict((k, v) for k, v in d.iteritems() if k not in keys) | 598bbf41053b4e82d333c9287c401f95522e82a9 | 107,656 |
def rosenbrock(args):
"""Rosenbrock function
Global minimum: f(1,...,1) = 0.0
Search domain: -inf <= xi <= inf, 1 <= i <= n
"""
rosen = 0
for i in range(len(args) - 1):
rosen += 10.0*((args[i]**2) - args[i + 1])**2 + (1 - args[i])**2
return rosen | 4735a3d4b6cad02169466ba54e35c9f00c22d444 | 107,662 |
def make_ends(List1:list) -> list:
"""Return the valuea new list of containing the first and last elements
from the original list.
>>>List1 = [1,2,3,4,5,6]
[1,6]
>>>List1 = [1,2,3,4,5,7]
[1,7]
"""
return [List1[0], List1[-1]] | 7df51dc8dd56bbc638c26b8188fe80ca7524ce86 | 107,663 |
def get_synset_by_wnid(wnid, graph):
"""Return the synset of sampling_graph whose WordNet id is wnid."""
for n in graph:
if n.wn_id == wnid:
return n
return None | 0492fa2d5704b1cf03c1fa95b22dabdc8d76400d | 107,670 |
def get_device_profile_group_requester_id(dp_group_id):
"""Return the value to use in objects.RequestGroup.requester_id.
The requester_id is used to match device profile groups from
Cyborg to the request groups in request spec.
:param dp_group_id: The index of the request group in the device profile.
"""
req_id = "device_profile_" + str(dp_group_id)
return req_id | 170a554bb414dadd32d603476aa4847c4493f9ee | 107,675 |
def str2hex(number):
"""
Convert an hex based string number to int
:param string number: string hex number to convert
:return int: Integer value of the hex number
"""
return int(number, 16) | 3b5081f819a443c6a13132f9359fa31fd35f7bf9 | 107,679 |
def is_mole(c: str) -> bool:
"""Evaluate whether this character a mole."""
return c == "o" | e34ac29d590c6843911dd78ef476ef96e5e083ac | 107,698 |
def format_timedelta(seconds: int) -> str:
"""Returns a formatted message that is displayed whenever a command wants to display a duration"""
hours = int(seconds / (60 * 60))
minutes = int(seconds % (60 * 60) / 60)
return f"{hours}h {minutes}m" | 500f97d8a7015e372644d1bbba3274ddeee9c38a | 107,701 |
def rreplace(edit, old, new, count=-1):
"""
Python lacks a reverse replace function for strings so here is one.
"""
# Not using str.replace in our rreplace as this is (allegedly) fastest.
return new.join(edit.rsplit(old, count)) | 0496bf964e003b94f6656774ff286b76a33f7ee0 | 107,704 |
import math
def _prediction(player_elo, opponent_elo):
"""Standard elo prediction probability.
Based on the USCF rating algorithm. Predicts the probability of the player
winning over the opponent.
:param player_elo: float
:param opponent_elo: float
:return: float -- Probability of win.
"""
exponent = -1 * (player_elo - opponent_elo) / 400.0
return 1.0 / (1.0 + math.pow(10, exponent)) | 9227da40d34ed87882dc0f864628752ee7fb2eb5 | 107,705 |
import re
def validate_domain(domain):
""" Validate a domain name.
"""
if len(domain) > 255 or len(domain) == 0:
return False
if domain[-1] == '.':
domain = domain[:-1]
allowed = re.compile(
r'\A([a-z0-9]+(-[a-z0-9]+)*\.)+[a-z]{2,}\Z', re.IGNORECASE
)
return allowed.match(domain) | 5f672ed46394d93c0b2f2288130a3f2e5f490124 | 107,706 |
import string
import random
def randomstring(size=8, chars=string.ascii_lowercase + string.digits):
"""
Generates a random string of *size characters
:param size: number of characters. Default value = 8
:param chars: Type of characters. Default letters and numbers
:return: Random string of *size characters
"""
return ''.join(random.choice(chars) for _ in range(size)) | 757ed3b17f57be707ab3e12be34c785909a91fe3 | 107,711 |
def get_next_page_url(contents):
""" Get link to next page
Args:
contents (BeautifulSoup): page contents to search for next page
Returns link to next page (str)
"""
next_link = contents.find('li', {'class': 'pager-next'})
if next_link:
return next_link.find('a')['href'] | 124612c623c624fcf9eb2ea8093d385edf945835 | 107,713 |
def flatset(iterables):
"""Return a set of the items in a single-level flattening of iterables
>>> flatset([1, 2], [2, 3])
set(1, 2, 3)
"""
return set(item for iterable in iterables for item in iterable) | 949b5f99748d1c43e296f51b83b1f5224dcc3e28 | 107,715 |
def doubler(number):
""" Doubles the number that is passed as argument """
return number * 2 | a0323325784ddf21e819ef12aa7fcfa8fe2819a2 | 107,722 |
def aio_rpc_request(method):
"""
A method decorator announcing the method to be exposed as
a request handler.
This decorator assumes that the first parameter (after ``self``)
takes a BSONRpc/JSONRpc object reference as an argument, so that the method
will have an access to make RPC callbacks on the peer node (requests and
notifications) during its execution. From the second parameter onward the
parameters are exposed as-is to the peer node.
"""
method._request_handler = True
return method | 032b54da46e343fdaf85ebda3d7ce93ca2df208e | 107,725 |
def GET_DATA(tag: str) -> dict:
"""GET_DATA: generate APDU for GET DATA command
"""
return {'header' : '80CA' + tag, 'Le' : '00'} | 92f406b125d137a90613fa5760a4f45a5e521c34 | 107,729 |
def check_query_type(type):
"""Check the query type.
Only the following query types are allowed:
AllowedType:
AllLabels: Query all _labels of an instance
PartLabels: Query part of labels of an instance (Only available in multi-label setting)
Features: Query unlab_features of instances
NotImplementedQueryType
Relations: Query relations between two object
Examples: Query examples given constrains
AllLabels: query all labels of an selected instance.
Support scene: binary classification, multi-class classification, multi-label classification, regression
Partlabels: query part of labels of an instance.
Support scene: multi-label classification
Features: query part of features of an instance.
Support scene: missing features
Parameters
----------
type: str
query type.
Returns
-------
result: bool
if query type in ['AllLabels', 'PartLabels', 'Features'],return True.
"""
assert (isinstance(type, str))
QueryType = ['AllLabels', 'PartLabels', 'Features']
NotImplementedQueryType = ['Relations', 'Examples']
if type in QueryType:
return True
else:
return False | ffa2343fc0e7b883bea5e306c0ce0641892aaa77 | 107,737 |
def get_box_center(obj):
"""
Get the box center coordinate, once box detection comes with
top left x and y
:param obj: dict
:return: tuple (center x, center y)
"""
return obj['x'] + obj['width'] / 2, obj['y'] + obj['height'] / 2 | d701d18f2b58f9c067394d5628c896f25fb2c39c | 107,739 |
def is_multiclass(labels):
""" Return true if this is a multiclass task otherwise false. """
return labels.squeeze().ndim == 2 and any(labels.sum(axis=1) != 1) | 1902473c1c8bf8341553035f56f5d3d8814405ad | 107,741 |
def sessionID(ws):
"""Returns the sessionID, given as url parameter to the request."""
request = ws.environ.get('werkzeug.request')
session = request.args.get('session', "")
return session | 453685aacbf23309093bb690698146e996f7048d | 107,745 |
def is_submodule(line):
"""Return True if the line is a valid submodule statement."""
if len(line) < 2:
# XXX: This is just to prevent index error in the next test
# Not entirely sure what this ought to be, come back to it...
return False
if (line[0], line[1]) != ('submodule', '('):
return False
# Not a great test, but enough to get things going.
if len(line) == 5 and line[3] != ')':
return False
if len(line) == 7 and (line[4], line[6]) != (':', ')'):
return False
return True | ed90a8b5f11d9995217c54d43df0238ee699662f | 107,750 |
import pickle
def read_ds(path):
"""Read the data structure from the path."""
with open(path, 'rb') as ds_file:
ds = pickle.load(ds_file)
return ds | 3f5f1d0c52855b2ad4bcca6c498997a5fcc91877 | 107,752 |
import re
def normalize(xname):
"""
Normalize input string for use as a tdex_id
"""
nrgx = r'[\'`\-\?!%&\*@\(\)#:,\.\/\\;\+=\[\]\{\}\$\<\>]'
urgx = r'[ ★☆]'
return re.sub(urgx, '_', re.sub(nrgx, '', xname)).lower().strip() | 9a025ba325330149cf34d9c4405237ed300c6076 | 107,754 |
from typing import Tuple
def rich_rgb_str(rgb: Tuple[int, int, int]) -> str:
"""Convert RGB tuple to string with RGB values separated by commas."""
# drop alpha value if present
return "rgb(" + ",".join(str(c) for c in rgb[:3]) + ")" | b7b4d2aa207f87dc0942b882adc37daf17125335 | 107,756 |
def _decaying_weights(n, r):
"""Computes weights that decay geometrically at rate r."""
weights = [r**(n - i - 1) for i in range(n)]
return weights | f5ad92ea699ac31b37af777aea9f2382613186bd | 107,760 |
def apply_scale_factor(data, scale_factor):
"""Applies a scale factor to remote sensing data.
Parameters
----------
data : numpy array
Array containing the unscaled data.
scale_factor : int or float
Factor to multiply the unscaled data by.
Returns
-------
data_scaled : numpy array
Array containing the scaled data.
"""
# Scale data values
data_scaled = data * scale_factor
# Returned scaled data
return data_scaled | ff9172f3567463ad6fdfce48f6726fa99e5ad4dd | 107,762 |
import math
def get_angle(x1, y1, x2, y2):
"""
Calculates the angle of the line between the two given coordinates and the x-axis.
Will always be between 90 to -90 degrees.
:param x1: x coordinate at point 1
:param y1: y coordinate at point 1
:param x2: x coordinate at point 2
:param y2: y coordinate at point 2
:return: (float) the Related Acute Angle in degrees
"""
deltaX = x2 - x1
deltaY = y2 - y1
return math.degrees(math.atan(deltaY / deltaX)) | 8a252f8c8618ea7b1165beba6ef27fa2691fc4c2 | 107,774 |
import string
def delete_punctuation(text: str) -> str:
"""Delete all punctuation in a string."""
return text.lower().translate(str.maketrans(string.punctuation, len(string.punctuation) * ' ')) | 243c8f6ff8966c2e2ee45dc3553971399bb03a71 | 107,783 |
def asbool(value):
"""Return True if value is any of "t", "true", "y", etc (case-insensitive)."""
return str(value).strip().lower() in ("t", "true", "y", "yes", "on", "1") | 93f9fab4ea69b5a7853236d64766954c42e48e88 | 107,785 |
def set_column_hidden_attribute(column_limit, columns):
"""Sets the hidden attribute on columns higher
than the column_limit.
Args:
column_limit (int) The number of columns that can be displayed.
columns (list of Column) A list of columns.
"""
if len(columns) <= column_limit:
return columns
x = 0
for column in columns:
x += 1
if x > column_limit:
column.hidden = True | c3388fcd652e964fd763fab2b09713a6f7e1686a | 107,786 |
import torch
def duplicate(x, n):
"""Duplicate a tensor x n times by concatenation
Arguments:
x {[torch.Tensor]} -- [the tensor to duplicate]
n {[int]} -- [number of times to duplicate the tensor]
Returns:
[torch.Tensor] -- [The tensor containing the duplicated input]
"""
y = torch.cat(n*[x], 1)
return y | 3b319ba20cdd79ffd4770e959d3a6f4c4441bf0c | 107,788 |
def parse_u24le(data):
""" Parse a 24-bit little endian number """
return data[0] | (data[1] << 8) | (data[2] << 16) | fceed0c2e5ea717df3daee1cc652df0f8b9a7048 | 107,789 |
async def public_test():
"""A test public get endpoint returning a simple message."""
return {'message': 'anyone can see this'} | ef8a7e09cd628c3f4f916d224d4801466b0a048a | 107,790 |
import functools
import operator
from typing import Counter
def get_counter(list_of_symptoms: list) -> list:
"""Generates a counter of words with its number of occurences
Args:
list_of_symptoms (list): A list of set of symptoms -> [{...},{...},{..},..]
Returns:
Counter object: counter object with word and its occurence number
"""
counter = functools.reduce(
operator.add,
(
Counter(set(symptoms)) for symptoms in list_of_symptoms
)
)
return counter | d24bf9830c93953b951103cad34c8813317ea737 | 107,792 |
import torch
def get_torch_dtype(numeric_precision):
"""Provide torch dtype based on numeric precision string."""
dtypes = {'float64': torch.float64,
'float32': torch.float32,
'float16': torch.float16,
'bfloat16': torch.bfloat16
}
return dtypes[numeric_precision] | 2d470a27f1e6b3e0cc36a2e598a93b2a9c4d207c | 107,796 |
import pickle
def load_pickle(file_path):
""" given a file_path, loads a pickle and returns a python object"""
with open(file_path, 'rb') as infile:
return pickle.load(infile) | 762725199566504b5aad339d70678fd3b98c8fb6 | 107,797 |
def getVisibility(name):
"""
Returns the visibility of the given name by convention
"""
if name.startswith("__"):
return "private"
elif name.startswith("_"):
return "internal"
else:
return "public" | 2fdcf5fef4ae0a7401eff90b2580f392b6c5eb23 | 107,799 |
def format_date_parameters(params):
"""
Formats date parameters.
:param params: raw (unprocessed) date parameters
:return: formatted date parameters
"""
return [int(p.lstrip('0')) for p in params] | cd6eae6797b68a899851d6b3fcbbfc1f4f55d216 | 107,801 |
def count(qs):
"""Count a queryset, or list of items."""
try:
return qs.count()
except:
return len(qs) | 75eacf7938fcbf8450198d2d7f67ebad6197a74e | 107,806 |
def hello(friend_name):
"""
Return hello message for a friend
:param: friend_name: the person's name
:return: String containing the message
"""
return f'Hello, {friend_name}!' | fe9d859644ee3505087aab060cdb5261a1c0afe4 | 107,807 |
import re
def split_kwargs(kwargs_dict, prefix='shadow_'):
"""
Splits dictionary into two new dicts by checking the keys for a given prefix. Those key-value pairs
with the prefix will be added to a new dictionary with prefix removed from the keys.
:param kwargs_dict: original dict to be checked and split. As dict
:param prefix: prefix to search keys for. As string
:return: copy of original dict with preficed keys removed, new dict based on prefixed keys
"""
input_dict = kwargs_dict.copy()
output_dict = {}
shadow_dict = {}
for k, v in input_dict.items():
if bool(re.match(prefix + '.*', k)): # find keys with prefix
shadow_k = re.sub(prefix, '', k) # remove prefix
shadow_dict[shadow_k] = v
else:
output_dict[k] = v
return output_dict, shadow_dict | e5f07d03e5ffa44e69ed05e11e97bb58549d3246 | 107,809 |
import torch
def is_float_or_torch_tensor(x):
"""
Return whether input x is a float or a torch.Tensor.
"""
return isinstance(x, torch.Tensor) or isinstance(x, float) | 4e27ba1fcc251c846c138f58ddc0811702609196 | 107,810 |
def format_timestamp(t):
"""Send welcome emails to all new users
Parameters
----------
t : time.struct_time
A timestamp generated by time.localtime()
Returns
-------
timestamp : dict
A dictionary of differently formatted versions of the given timestamp
"""
timestamp={}
timestamp['original'] = t
t = [t[0], t[1], t[2], t[3], t[4], t[5]]
timestamp['list'] = t
for i in range(len(t)):
if(len(str(t[i])) == 1):
t[i] = ('0' + str(t[i]))
timestamp['list-padded'] = t
timestamp['filename'] = (str(t[0]) + '_' + str(t[1]) + '_' + str(t[2]) + '_' + str(t[3]) + '-' + str(t[4]) + '-' + str(t[5]))
timestamp['display'] = (str(t[1]) + '/' + str(t[2]) + '/' + str(t[0]) + ' ' + str(t[3]) + ':' + str(t[4]) + ':' + str(t[5]))
return timestamp | 06b0f0e07ac9eb7340ee5fe2cfaffb94d16add86 | 107,812 |
def increase_sockopt(socket_, level, option, value):
"""Increase a socket option to ``value``.
If the currently set value for that ``option`` equals or exceeds ``value``,
this will do nothing.
Args:
level (int): The protocol level where the option should be set.
option (int): The option to set for the protocol level.
value (int): The value to set for the socket option.
Returns:
bool: Indicating if the value was set.
"""
current_value = socket_.getsockopt(level, option)
if current_value >= value:
return False
socket_.setsockopt(level, option, value)
return True | fa753dabc7ff9a9d99c7fcbe5c0c7501993d065b | 107,816 |
def CreateSizesExternalDiagnostic(sizes_guid):
"""Creates a histogram external sizes diagnostic."""
benchmark_diagnostic = {
'type': 'GenericSet',
'guid': str(sizes_guid),
'values': ['sizes'],
}
return benchmark_diagnostic | 9105f87e27b16ca9dd6e2acf82cd4f211b8ee10b | 107,818 |
def get_path(data, path):
"""
Fetch a value in a nested dict/list using a path of keys/indexes
If it fails at any point in the path, None is returned
example: get_path({'x': [1, {'y': 'result'}]}, ['x', 1, 'y'])
"""
current = data
for p in path:
try:
current = data[p]
except Exception:
return None
return current | e4c978b70dea9b8c291a9003a7e55bbd36616869 | 107,819 |
def is_power(a, b):
"""A number, a, is a power of b if it is divisible by b and a/b is a power of b."""
if b <= 0:
return False
if a % b == 0:
if a == b:
return True
else:
return is_power(a/b, b)
return False | b787995ed92144a537e9d6aa5e4afae820fa6245 | 107,820 |
def link_titles(soup):
"""Return list of titles of links to other pages."""
if soup is None:
return []
links = []
for link in soup.find_all('a'):
href = link.get('href')
if href and href.startswith('/wiki'):
links.append(link.get('title'))
return links | 21f36d5714ff83ca8dbffc9c0cd710567827e3ec | 107,823 |
def opengl_to_bullet_frame(vec):
"""
Converts a rotation or a translation from the opengl ref frame to the
bullet ref frame (Y-UP vs Z-UP)
Parameters:
vec - A vector xyz, list of 3 elements
"""
return [vec[0], vec[2], -vec[1]] | 2f485efe08c34bc8e03f719cfa8763f7d76f2fc5 | 107,825 |
def chunk(arr, size=3):
"""return an iterable of arr values in chunks of `size`"""
return (arr[i:i + size] for i in range(0, len(arr), size)) | aee09bccb0a2e99797051a60f4e9ff3ff7031001 | 107,827 |
def variant_display_name(variant):
"""Construct a display name for a variant."""
display_name = "{}: {}: {}".format(
variant["Product SKU"],
variant["Product Name"],
variant["Question|Answer"]
)
return display_name.strip() | 8ef91205873f87769e3a1169208fd7174a4e5346 | 107,828 |
def get_collection_api_ids(user):
"""Gets all api_ids from user.colletion"""
collection_api_ids = []
if user:
for game in user.collection:
collection_api_ids.append(game.api_id)
return collection_api_ids | 477793deaf4816d4a626849e853b38235fa1f647 | 107,831 |
def _escape_percent_sign(string):
"""Return a string within which all percent signs are escaped"""
return string.replace('%', "%%") | eb2239395f32325a78d4fa32258b553c58b2486a | 107,837 |
import torch
def boost_activations(x, duty_cycles, boost_strength):
"""
Boosting as documented in :meth:`kwinners` would compute
x * torch.exp((target_density - duty_cycles) * boost_strength)
but instead we compute
x * torch.exp(-boost_strength * duty_cycles)
which is equal to the former value times a positive constant, so it will
have the same ranked order.
:param x:
Current activity of each unit.
:param duty_cycles:
The averaged duty cycle of each unit.
:param boost_strength:
A boost strength of 0.0 has no effect on x.
:return:
A tensor representing the boosted activity
"""
if boost_strength > 0.0:
return x.detach() * torch.exp(-boost_strength * duty_cycles)
else:
return x.detach() | 6b00b009ac6d4650100980d23975d5bede7e376b | 107,845 |
import yaml
def process_yaml_from_text(yaml_text):
"""Process yaml from given string data.
Args:
yaml_text: str, input for yaml processing.
Returns:
object, yaml data as dict.
"""
data = yaml.load(yaml_text, Loader=yaml.SafeLoader)
return data | 9f42c10fdcf841b4eb43c75befcf3f39713f2be3 | 107,848 |
import difflib
def diff(actual, expected):
"""
normalize whitespace in actual and expected and return unified diff
"""
return '\n'.join(list(
difflib.unified_diff(actual.splitlines(), expected.splitlines())
)) | 0fac7c8c4f22e66f627e8e7582a44ffa7cae6504 | 107,849 |
from typing import List
def mediate_heat(heat_vector: List[List[int]]) -> List[int]:
"""
Calculate the mean heat vector between the provided heat vectors.
:arg heat_vector: a list of heat vectors of the same size
:return: the mean heat vector
"""
mean_vector = []
vectors_num = len(heat_vector)
# Assume that all heat vectors are of the same size
for i in range(0, len(heat_vector[0])):
sum_temp = 0
for v in heat_vector:
sum_temp += v[i]
mean_vector.append(int(sum_temp / vectors_num))
return mean_vector | 0e3bcc18a0342d417785f1b79fb6dbb136eca710 | 107,850 |
def compute_mean_terr(res, N, d, Σ, g, ε0):
"""Compute mean tracking error from a set of quantum measurements.
Parameters
----------
res: dict
Measurements from a quantum circuit in Qiskit format.
N: int
Number of available stocks.
d: int
Number of chosen stocks.
Σ: 2d array of floats.
Stocks correlation matrix.
g: 1d array of floats.
Stocks correlation with index.
ε0: float.
Correlation of index with itself.
Return
------
terr: float.
Mean tracking error.
"""
terr = 0.0
shots = 0
for k, v in res.items():
state = int(k, 2)
shots += v
# Number of stocks in this state.
ns = 0
# Compute tracking error of this state.
terrs = 0.0
for i in range(N):
ns += (state>>i)&1
terrs += (Σ[i, i] - 2*g[i])*((state>>i)&1)
for j in range(i+1, N):
terrs += 2*Σ[i, j]*((state>>i)&1)*((state>>j)&1)
if ns == d:
terr += terrs*v
terr /= shots
terr += ε0
return terr | f5fe02e41d00d0303f6b95e446bf68157e22b07c | 107,852 |
def get_path_locations(sg, path_order_grid):
"""Given a solved grid and path order variables, return the path as points."""
model = sg.solver.model()
path = []
for p, po_var in path_order_grid.items():
po = model.eval(po_var).as_long()
if po >= 0:
path.append((po, p))
path.sort()
return [t[1] for t in path] | ead10adcd60385ce4b8d7843bddf95fc4d04bdf8 | 107,853 |
import re
def listify(string):
"""
:param string:
:return: list of space separated words of string input
"""
return re.sub("[^\w]", " ", string).split() | 5f38bbf04b8a9a1765b390616a7b072ba6b3e282 | 107,854 |
def parse_request(url):
"""Returns a pair (route, payload), where
route is the path to API and payload is the query string stored in a dictionary (if exists).
Parameters
----------
url : str
the URL to be parsed.
Returns
-------
tuple
a tuple of type:
(route, payload), if a query string is provided.
(route, None) if there is no query string.
(None, None) if provided url is not valid.
"""
try:
if '?' in url:
route, data = url.split('?')
data = data.split('&')
payload = {}
for d in data:
key, value = d.split('=')
payload[key] = value
return route, payload
else:
return url, None
except ValueError:
print('Invalid route!')
return None, None | d3fecff0092b7c01be2c139a8ab00d8d6c77d7e2 | 107,856 |
import struct
def read_dns_name_bytes(byte_array, start, label_store):
"""
reads a byte array from start until the end of a label and returns
the 'human' representation. Handles DNS decompression of labels.
:param byte_array: The packet data in a byte array
:param start: The offset to start reading this label
:param label_store: A per packet store of previously seen labels with
their offsets in the packet data
:return: tuple of:
(the current position in the packet data after the label has
been decoded,
the human readable (dot notation) query or resource name)
"""
read_on = True
buff_indx = start
labels = list()
return_parts = list()
while read_on:
b1 = byte_array[buff_indx]
if 1 <= b1 <= 63:
# This is the first time we have seen this label OR this packet
# is not using compression.
c_label = byte_array[buff_indx + 1: buff_indx + 1 + b1].tostring()
labels.append((buff_indx, c_label, ))
return_parts.append(c_label)
buff_indx = buff_indx + b1 + 1
elif b1 == 0:
# DNS name is done
read_on = False
buff_indx += 1
else:
location = struct.unpack("!H",
byte_array[buff_indx: buff_indx + 2])[0]
buff_indx += 2
# Strip off the top two bits
location = location & 0x3fff
if location in label_store:
return_parts.append(label_store[location])
labels.append((location, label_store[location], ))
else:
raise ValueError("read_dns_name_bytes encountered unexpected "
"compressed data in byte_array. Array bytes "
"are: {0}".format(byte_array[buff_indx - 2:]))
# Compressed labels come at the end. We break now
read_on = False
for index in range(len(labels)):
cur_label = '.'.join((x[1] for x in labels[index:]))
if cur_label not in label_store:
label_store[cur_label] = labels[index][0]
label_store[labels[index][0]] = cur_label
else:
# Suppose this packet is not using compression? But this
# is not the first time we have seen this label so move on.
pass
return buff_indx, '.'.join(return_parts) | 8944157d1cbb776e5304f282966e3765d737cf3a | 107,857 |
def build_repository(pharses):
"""
Build the bible repository as a dict from identifier to context
Jhn 3:10 --> text in John chapter 3 pharse 10
"""
repository = {}
for pharse in pharses:
book, _, other = pharse.partition(' ')
locator, _, context = other.partition(' ')
repository[' '.join([book, locator])] = context
return repository | c4d488b2ce58fed841dfcf6a1f0000171f7602eb | 107,858 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.