content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def intersect(line1, line2):
"""
Returns True if the two given line segments intersect each other, and False otherwise.
:param line1: 2-tuple of tuple (x, y)
:param line2: 2-tuple of tuple (x, y)
:return: boolean
"""
a1 = line1[1][1] - line1[0][1]
b1 = line1[0][0] - line1[1][0]
c1 = a1 * line1[0][0] + b1 * line1[0][1]
a2 = line2[1][1] - line2[0][1]
b2 = line2[0][0] - line2[1][0]
c2 = a2 * line2[0][0] + b2 * line2[0][1]
tmp = (a1 * b2 - a2 * b1)
if tmp == 0:
return False
sx = (c1 * b2 - c2 * b1) / tmp
if (sx > line1[0][0] and sx > line1[1][0]) or (sx > line2[0][0] and sx > line2[1][0]) or\
(sx < line1[0][0] and sx < line1[1][0]) or (sx < line2[0][0] and sx < line2[1][0]):
return False
sy = (a1 * c2 - a2 * c1) / tmp
if (sy > line1[0][1] and sy > line1[1][1]) or (sy > line2[0][1] and sy > line2[1][1]) or\
(sy < line1[0][1] and sy < line1[1][1]) or (sy < line2[0][1] and sy < line2[1][1]):
return False
return True
|
69df3d550b40b4c692d4d66fedeeb21c422aef8d
| 62,401
|
import six
def longest_common_prefix(str1, str2):
"""Returns the longest common prefix length of two strings."""
limit = min(len(str1), len(str2))
for i in six.moves.range(limit):
if str1[i] != str2[i]:
return i
return limit
|
5748102df6fba2053c0ef5c4de429eaa46b21d91
| 62,404
|
def list_devices(sess, filter='/job:worker'):
"""List all device names for a session, filtering by the
filter string, by default '/job:worker'"""
return [d.name for d in sess.list_devices()
if filter in d.name]
|
33ea9cbf880d2862c7a3fd1dc3f99bed8445a0e5
| 62,406
|
def _urls(package, version, mirrors):
"""Computes the urls from which an archive of the provided PyPI package and
version should be downloaded.
Args:
package: PyPI package name.
version: version for which the archive should be downloaded.
mirrors: dictionary of mirrors, see mirrors.bzl in this directory for
an example.
"""
return [
x.format(
p = package[:1],
package = package,
version = version,
)
for x in mirrors.get("pypi")
]
|
90ec27eaec83985b997e17b517b6e6fd792c11a6
| 62,411
|
def slf(x):
"""
slf(x)
slf = lambda x: x
"""
return x
|
22458f280035e04eaac169c775c4c6a36832fbb3
| 62,412
|
def get_spatial_image_size(image_resizer_config):
"""Returns expected spatial size of the output image from a given config.
Args:
image_resizer_config: An image_resizer_pb2.ImageResizer.
Returns:
A list of two integers of the form [height, width]. `height` and `width` are
set -1 if they cannot be determined during graph construction.
Raises:
ValueError: If the model type is not recognized.
"""
if image_resizer_config.HasField("fixed_shape_resizer"):
return [
image_resizer_config.fixed_shape_resizer.height,
image_resizer_config.fixed_shape_resizer.width
]
if image_resizer_config.HasField("keep_aspect_ratio_resizer"):
if image_resizer_config.keep_aspect_ratio_resizer.pad_to_max_dimension:
return [image_resizer_config.keep_aspect_ratio_resizer.max_dimension] * 2
else:
return [-1, -1]
if image_resizer_config.HasField(
"identity_resizer") or image_resizer_config.HasField(
"conditional_shape_resizer"):
return [-1, -1]
raise ValueError("Unknown image resizer type.")
|
87eb97bb3609098f1921671629450fa24529d72e
| 62,415
|
def optional(run, deco):
"""This is a decorator which applies another decorator only if the
condition is true."""
if run:
return deco
else:
def do_nothing(func):
return func
return do_nothing
|
5a8674d3c45bd51f7f2270e4a22aa876f08b2173
| 62,416
|
def normalize(img, maxImg, minImg):
"""
Scale an image from 0 to 1
Parameters:
img (ee.Image): image to be rescaled
maxImg (ee.Image): image storing the maximum value of the image
minImg (ee.Image): image storing the minimum value of the image
Returns:
ee.Image:
"""
return img.subtract(minImg).divide(maxImg.subtract(minImg))
|
58fe106a69fd3f29edbd54c13740f3968d855a2f
| 62,417
|
def current_directory(_deps, _maybedeps):
"""
Current working directory
"""
return '.'
|
243545e71a84029982ef0882d580ba2d6f94a3c9
| 62,419
|
import re
def is_np(url):
"""Return true if url is a no participate link."""
if re.match("https?://np\.", url):
return True
else:
return False
|
53750d698402f65e6eb96564815c02f39d89ed63
| 62,424
|
def really_submitted(form):
""" WTForms can be really finnicky when it comes to checking if a form
has actually been submitted, so this method runs validate_on_submit()
on the given form and checks if its "submitted" field has any data. Useful
for pages that have two forms on them.
:arg Form form: A form to check for submission.
:returns boolean: True if submitted, false otherwise.
"""
if form.submitted.data == "true":
return form.validate_on_submit()
return False
|
79f644ed440ed854a7ee3bf9c3a7f36c7a322928
| 62,425
|
import textwrap
def dedent(text, baseline_index=None):
"""
Safely clean all whitespace at the left of a paragraph.
Args:
text (str): The text to dedent.
baseline_index (int or None, optional): Which row to use as a 'base'
for the indentation. Lines will be dedented to this level but
no further. If None, indent so as to completely deindent the
least indented text.
Returns:
text (str): Dedented string.
Notes:
This is useful for preserving triple-quoted string indentation
while still shifting it all to be next to the left edge of the
display.
"""
if not text:
return ""
if baseline_index is None:
return textwrap.dedent(text)
else:
lines = text.split('\n')
baseline = lines[baseline_index]
spaceremove = len(baseline) - len(baseline.lstrip(' '))
return "\n".join(line[min(spaceremove, len(line) - len(line.lstrip(' '))):]
for line in lines)
|
32ae22e1cf4d0cca7d6a7315dfef5be840393e68
| 62,428
|
import math
def back_easein(pos):
"""
Easing function for animations: Back Ease In
"""
return pos * pos * pos - pos * math.sin(pos * math.pi)
|
dddcc52a49d9afe46a61e6622a16fb433204287e
| 62,433
|
import inspect
def is_exception_class(obj) -> bool:
"""Checks whether the given obj is an exception class."""
return inspect.isclass(obj) and issubclass(obj, BaseException)
|
0c9b9260ea123635c8279b778162647660c3dbec
| 62,441
|
def _Flatten(list_of_list):
"""Creates set of all items in the sublists."""
flattened = set()
for item in list_of_list:
flattened.update(item)
return flattened
|
7893271622bf6e77e0828b264fe443efd6172f38
| 62,445
|
def find_pressure_from_boiling_point(boiling: int) -> int:
"""Finds the pressure from a provided boiling point"""
pressure = 5 * boiling - 400
return pressure
|
229c3d7c93ed8785c6beeca19f16e4370360ba18
| 62,447
|
def split_header(diff):
"""Splits a diff in two: the header and the chunks."""
header = []
chunks = diff.splitlines(True)
while chunks:
if chunks[0].startswith('--- '):
break
header.append(chunks.pop(0))
else:
# Some diff may not have a ---/+++ set like a git rename with no change or
# a permissions change.
pass
if chunks:
assert chunks[0].startswith('--- '), 'Inconsistent header'
return ''.join(header), ''.join(chunks)
|
76b6065535828fc13e8ef44b2dfd310a29eb6ca2
| 62,449
|
def fetch_sensor_names(pattern, client, log):
"""Fetch matching sensor names for a specified pattern.
Args:
pattern (str): pattern to match.
client (obj): KATPortalClient object.
log: logger
Returns:
sensor_names (dict): sensor names matching supplied pattern.
None if no sensor results obtainable.
"""
log.info("Checking for sensor pattern: {}".format(pattern))
try:
sensor_names = yield client.sensor_names(pattern)
#if not sensor_names:
# log.warning("No matching sensors found for {}".format(pattern))
# return(None)
#else:
log.info("Match for telstate endpoint sensor: {}".format(sensor_names))
# return(sensor_names)
except Exception as e:
log.error(e)
return(None)
|
aee136381e50d27aaab94eaf4caa99ecfcc36173
| 62,451
|
def pct_change(before, after):
"""
Computes the increase from 'after' to 'before' as a percentage of 'before'.
"""
pct = ((after - before)/before)*100
return pct
|
023e1767ccd9e8b93324ee40b5b531af80a19678
| 62,452
|
def set_axis_limits(ax, xlimits=None, ylimits=None):
"""Sets the x- and y-boundaries of the axis (if provided)
:param ax: axis object
:param xlimits: a 2-tuple (lower_bound, upper_bound)
:param ylimits: a 2-tuple (lower_bound, upper_bound)
:returns: ax
"""
if xlimits is not None:
ax.set_xlim(*xlimits)
if ylimits is not None:
ax.set_ylim(*ylimits)
return ax
|
b7e012830267894cc964ee3b7e79794d77f27c6c
| 62,455
|
from pathlib import Path
def unlink_if_exists_and_not_using_as_cached(file_name: Path, use_cached_video_files: bool) -> bool:
"""Conditionally unlink video file, unless we want to use cached videos."""
generate_video = True
if file_name.exists():
if use_cached_video_files:
generate_video = False
else:
file_name.unlink() # deletes a previous version
return generate_video
|
92fcee5285a72674c40b77985f59f2371abec089
| 62,459
|
def square_to_quad(xy, base_pts):
"""Transforms a square in [0,1]^2 to a (possibly skew) quadrilateral
defined by base_pts.
Args:
xy: Array, shape [..., 2]. The 2d coordinates of the point.
base_pts: Array, shape [4, ...]. The coordinates of the quadrilateral.
Should be in counterclockwise order to maintain orientation.
Returns:
Coordinates in whatever space base_pts was defined in.
>>> square_to_quad(_TEST_XY[:, np.newaxis], _TEST_SKEW_Q)
array([[ 0.70710678, 0. , 0.70710678],
[ 0. , 0.70710678, 0.70710678],
[-0.70710678, 0. , 0.70710678],
[ 0. , -1. , 0. ],
[ 0.35355339, 0.35355339, 0.70710678],
[ 0. , -0.0732233 , 0.53033009],
[-0.35355339, -0.5 , 0.35355339],
[ 0.49497475, 0.21213203, 0.70710678],
[ 0.14142136, -0.24393398, 0.45961941],
[-0.21213203, -0.7 , 0.21213203]])
"""
a, b, c, d = base_pts[0], base_pts[1], base_pts[2], base_pts[3]
x, y = xy[..., 0], xy[..., 1]
return a + (b-a)*x + (d-a)*y + (a-b+c-d)*x*y
|
1fd54e5deb3c1f44e041f8b6ed729d9e098ee496
| 62,461
|
def decode(byte_data):
"""
Decode the byte data to a string if not None.
:param byte_data: the data to decode
"""
if byte_data is None:
return None
return byte_data.decode()
|
c773802dd490ca32de7542366c49487e16a411e0
| 62,466
|
from typing import List
def get_qubits_key(list_of_qubits: List[int]) -> str:
""" from subset of qubit indices get the string that labels this subset
using convention 'q5q6q12...' etc.
:param list_of_qubits: labels of qubits
:return: string label for qubits
NOTE: this function is "dual" to get_qubit_indices_from_string.
"""
return 'q' + 'q'.join([str(s) for s in list_of_qubits])
|
76452941e6e91caf7527350ecde1c4a3c18168a9
| 62,472
|
def _mul2012(num1, num2):
"""
Multiply two numbers in 20.12 fixed point format.
"""
# Separated into a function because >> has surprising precedence
return (num1*num2) >> 20
|
4d668a08633019930f99bcfb89cf35c5a68ad2df
| 62,475
|
def CreateTestCreativeWrapper(client, server, version, label_id):
"""Create a test creative wrapper.
Args:
client: DfpClient used for service creation.
server: str the API server.
version: str the API version.
label_id: str the id of the label the creative wrapper applies to.
Returns:
The ID of the test creative wrapper.
"""
creative_wrapper_service = client.GetService('CreativeWrapperService', server,
version)
# Create creative wrapper object.
creative_wrapper = {
'labelId': label_id,
'ordering': 'INNER',
'header': {'htmlSnippet': '<b>My creative wrapper header</b>'}
}
# Add creative wrapper.
creative_wrappers = creative_wrapper_service.CreateCreativeWrappers(
[creative_wrapper])
return creative_wrappers[0]['id']
|
67766fe90c6f41b725d01284ae637d3fd7d6da14
| 62,479
|
def mapintensity(map, lat, lon, amp):
"""
Calculates a grid of intensities, multiplied by the amplitude given.
"""
grid = map.intensity(lat=lat.flatten(), lon=lon.flatten()).eval()
grid *= amp
grid = grid.reshape(lat.shape)
return grid
|
be2e50992b8d3f2339e94c99567c3f33db9d9baf
| 62,481
|
def get_object_string(swift, container, object_name):
"""Get the object contents as a string """
data = swift.get_object(container, object_name)[1]
try:
return data.decode('utf-8')
except AttributeError:
return data
|
17bad8d491ce75dbda2cc78cc12e3f474535669f
| 62,484
|
def proper_squeeze(tensor):
"""
Squeezes a tensor to 1 rather than 0 dimensions
:param tensor: torch.tensor with only 1 non-singleton dimension
:return: 1 dimensional tensor
"""
tensor = tensor.squeeze()
if len(tensor.shape) == 0:
tensor = tensor.unsqueeze(0)
return tensor
|
229015f26fd7273b7a6c50fc350ec1f1b4edd5d5
| 62,498
|
import math
def calcRMS(items,avg):
""" returns RootMeanSquare of items in a list """
# sqrt(sum(x^2))
# Not statistics RMS... "physics" RMS, i.e. standard deviation: sqrt(sum((x-avg)**2)/N)
# return math.sqrt(reduce(lambda x: (x - avg)**2, items) / len(items))
return math.sqrt(sum([(x-avg)**2 for x in items])/len(items))
|
600a9d4a25e73026c0cd0e47245edcfac1a79794
| 62,500
|
def bytes_to_hex_str(bytes_hex: bytes, prefix="") -> str:
"""Convert bytes to hex string."""
return prefix + bytes_hex.hex()
|
2ef7e57cbe0e96465543b966def31f5ce36bcbbe
| 62,513
|
def get_square_coors(indices: tuple) -> list:
"""
Finds the coordinates of the other elements in the corresponding square
located in
:param indices: row and column indices
:return: list of tuples representing coordinates
"""
coors = []
row, col = indices
# Find the values of the start row and column coordinates
if row < 3:
start_row = 0
elif row < 6:
start_row = 3
else:
start_row = 6
if col < 3:
start_col = 0
elif col < 6:
start_col = 3
else:
start_col = 6
# Append the list of coordinates in the square
for i in range(3):
for j in range(3):
coors.append((start_row + i, start_col + j))
# Remove the initial coordinates
coors.remove(indices)
# Return the final list of coordinates
return coors
|
899d21b31a9a07c2a28c820b44652c1c7b503c88
| 62,519
|
def _worker(mp, tile):
"""Multiprocessing worker processing a tile."""
return tile, mp.execute(tile)
|
8251e09a5ea3f35626fc8910011af5659d470725
| 62,520
|
import base64
import secrets
def csp_nonce() -> str:
"""
Generate a Base64 random string of 24 characters for the cryptographic nonce.
Returns:
str: Random string of 24 Base64 characters.
"""
return base64.b64encode(secrets.token_bytes(18)).decode("utf-8")
|
173d46925c13380a04506c12ff106ce97ca7a566
| 62,522
|
def _filter_locals(filter_: str, locals_) -> bool:
"""
Evaluate a condition string.
:param filter_: Python code to evaluate.
:param locals_: Locals to supply to the code.
:return: True if the condition is true, False otherwise.
"""
return eval(filter_, {}, locals_)
|
37270f371315ed48d4de80ae51a6655f82159864
| 62,528
|
from typing import List
def get_setup_requirements() -> List[str]:
"""
Returns setup requirements (common for all package for now).
:return: setup requirements
"""
return ['setuptools', 'wheel']
|
2013d9d3e97177a04651234ebebc9cf668a240af
| 62,529
|
def SignExtend(val, nbits):
""" Returns a sign-extended value for the input value.
val - value to be sign-extended
nbits - number precision: 8 - byte, 16 - word etc.
"""
sign_bit = 1 << (nbits - 1)
return (val & (sign_bit - 1)) - (val & sign_bit)
|
24a004d3fee8db1282b75f9f3333cc00bc2439fe
| 62,530
|
def median(x):
"""Finds the median value of data set x"""
x_sorted = sorted(x)
mid = len(x) // 2
if len(x) % 2 == 1:
return x_sorted[mid]
else:
l = mid - 1
h = mid
return (x_sorted[l] + x_sorted[h]) / 2
|
87395bcbc1bc4800318a61cf7c4d3dbdc925486a
| 62,531
|
from pathlib import Path
def test_gcp(func):
"""A very simple test to see if we're on Pangeo GCP environment
Check existence of jovyan homedir and worker-template to determine if on
Kubernetes based cluster. Not very robust, but ok for now.
Raises:
NotImplementedError: Causes ASV to skip this test with assumption we're
not on Pangeo GCP environment
"""
pod_conf = Path('/home/jovyan/worker-template.yaml')
def func_wrapper(*args, **kwargs):
if not pod_conf.is_file():
if func.__name__ == 'setup':
raise NotImplementedError("Not on GCP Pangeo environment... skipping")
else:
return
else:
func(*args, **kwargs)
return func_wrapper
|
2523420869b61a01c0234930ad705de19104e471
| 62,534
|
import re
def _prenormalize_text(text):
"""Makes the text lowercase and removes all characters that are not digits, alphas, or spaces"""
# _'s represent spaces so convert those to spaces too
return re.sub(r"[^a-z0-9 ]", "", text.strip().lower().replace('_', ' '))
|
16801a7f3d13e343b190b448441d265656ba7081
| 62,537
|
import csv
def load_csv_records(fileName):
"""Given a CSV filename, returns records (= list of tuples)
Automatically transforms '2' to integer 2 and '1.9' to float 1.9
Output format is list tuples to be consistent with the result of a database query"""
def cast_if_possible(x):
"""Function that takes a string and returns int or float if possible, otherwise just string
Useful when reading from database where output is string by default before casting"""
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return x
records = []
with open(fileName, 'rt') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
row2 = [cast_if_possible(x) for x in row]
records.append(tuple(row2))
return records
|
b2ab1b0057c21ba6dbc9159a138c1c13c7b3d89c
| 62,540
|
import re
def check_password_requirements(new_password: str):
"""
Check if password meets requirements
Requirements are:
- a length of 8 or more characters, of which at least
- one uppercase letter,
- one lowercase letter
- and one digit
:param new_password: Password string to check against requirements
:return: True if password meets requirements
:return: False if the password does not meet the requirements
"""
if len(new_password) >= 8 and re.search("[a-z]", new_password) and re.search("[A-Z]", new_password) \
and re.search("[0-9]", new_password):
return True
return False
|
6671b8cb7ba3a944ba7075fc57e2b1e60f2564f4
| 62,541
|
import math
def UCB1(v, N, n):
"""
Parameters:
v: int -> value of the terminal_state
N: int -> number of times the parent node was visited
n: int -> number of times the node was visited
returns:
int for MCTS selection
"""
try:
# UCB1 formula
return v/n+2*math.sqrt(2*math.log(N)/n)
except ZeroDivisionError:
# if n is zero return infinity -> this node needs to be expanded
return float("inf")
except ValueError:
# if n is not set return infinity -> this node needs to be expanded
return float("inf")
|
7b16cfa9a123985add8a03029efcca9581de6a0c
| 62,542
|
def flag_is_set(flag, flags):
"""Checks if the flag is set
Returns boolean"""
if (flags & flag) > 0:
return True
return False
|
961e07349407d1c69f527a7ab545c36c05f64a24
| 62,543
|
def aligner_indices_fake_fs(fake_fs):
"""Return fake file system setup with files for aligner indices"""
d = {
"bwa": (".fasta.amb", ".fasta.ann", ".fasta.bwt", ".fasta.pac", ".fasta.sa"),
"star": ("/Genome", "/SA", "/SAindex"),
}
for aligner, suffixes in d.items():
for suffix in suffixes:
fake_fs.fs.create_file("/path/to/{}/index{}".format(aligner, suffix))
return fake_fs
|
a4c8466ccd5e0c07ec8cf6c45292f656bd2f1ea5
| 62,545
|
def spacing(area, shape):
"""
Returns the spacing between grid nodes
Parameters:
* area
``(x1, x2, y1, y2)``: Borders of the grid
* shape
Shape of the regular grid, ie ``(nx, ny)``.
Returns:
* ``[dx, dy]``
Spacing the y and x directions
Examples:
>>> print(spacing((0, 10, 0, 20), (11, 11)))
[1.0, 2.0]
>>> print(spacing((0, 10, 0, 20), (11, 21)))
[1.0, 1.0]
>>> print(spacing((0, 10, 0, 20), (5, 21)))
[2.5, 1.0]
>>> print(spacing((0, 10, 0, 20), (21, 21)))
[0.5, 1.0]
"""
x1, x2, y1, y2 = area
nx, ny = shape
dx = (x2 - x1)/(nx - 1)
dy = (y2 - y1)/(ny - 1)
return [dx, dy]
|
456a895baf875fb32dc9319602620848176b3ba1
| 62,546
|
import json
def _is_json(str_to_check: str) -> bool:
"""
Checks if str is JSON
:param str_to_check: sting to check
:return: true if str is JSON, otherwise raises ValueError or TypeError
:raises TypeError: if str_to_check is not str type
:raises ValueError: if str_to_check is not valid JSON
"""
json.loads(str_to_check)
return True
|
3bdb6387d8c4e076c2a45de2ac6638de5b249b05
| 62,550
|
def get_locations(soup):
"""Return a list of store location URL parameters."""
# extract option tag with value="matching string"
locs = soup.select('option[value^="/menu/?location="]')
# value iterable from BS4
locations = [value['value'] for value in locs]
# print (locations, names)
return locations
|
efd26f313f22fb3eb67309e5d1b642ce413c9c15
| 62,552
|
def as_int(x, default=0):
"""Convert a thing to an integer.
Args:
x (object): Thing to convert.
default (int, optional): Default value to return in case the conversion fails.
Returns:
int: `x` as an integer.
"""
try:
return int(x)
except ValueError:
return default
|
75eaa8fab3bb776c9578d0f527a5127c419a0b70
| 62,553
|
def first_second_ids(dates):
"""
Returns a dictionary of 'date:unique ID' for each date in 'dates'.
IDs are ordered from oldest to newest, starting at 0.
:param list dates: List of dates
:return: unique dates IDs
:rtype: dict
"""
dset = sorted(set(dates))
return dict([(date_, i) for i, date_ in enumerate(dset)])
|
3742f7a9ae9113303a9febff48faba83dd2da7d3
| 62,554
|
def terraform_current_workspace(terraform):
"""
Get the current terraform workspace
"""
return_code, stdout, stderr = terraform.cmd('workspace show')
return stdout.rstrip()
|
079ab06b2cfb5e030519567fda3916b8cad18d91
| 62,555
|
def factorial(n: int) -> int:
"""Return the factorial of n, an exact integer >= 0
By definition, the factorial is defined as follow:
factorial(n) = n * factorial(n - 1), factorial(0) = factorial(1) = 1
>>> factorial(4)
24
>>> factorial(10)
3628800
>>> factorial(25)
15511210043330985984000000
>>> factorial(-2)
Traceback (most recent call last):
...
ValueError: n must be >= 0
"""
if n < 0:
raise ValueError("n must be >= 0")
if n <= 1:
return 1
return n * factorial(n - 1)
|
809a3a7585f9f83070264f2c09644da0f07b3a68
| 62,557
|
def ZScoreNormalization(X, axes=(0,1), scorer=None):
"""Applies Z-Score Normalization over a multispectral image"""
if not scorer:
u = X.mean(axis=axes)
std = X.std(axis=axes)
scorer = lambda arr: (arr - u) / std
X_norm = scorer(X)
return X_norm, scorer
|
059a153ce7a911bcc689e11c8655e0a9ec55ddc0
| 62,558
|
def contains_word(s, w):
"""
Checks whether a string contains a certain word
"""
return f" {w} " in f" {s} "
|
587b1db49203981814a1fbdcf1d11d0bd88ac63d
| 62,560
|
def clear_dobson_paddy(relevance, accuracy):
"""
Calculate the certainity level using the approach presented in
Clear, Adrian K., Simon Dobson, and Paddy Nixon.
"An approach to dealing with uncertainty in context-aware pervasive systems."
UK/IE IEEE SMC Cybernetic Systems Conference. 2007.
:param relevance: relevance of the result (scale: 0..1)
:param accuracy: accuracy of the sensor data
:return: certainty level
"""
return accuracy + (relevance * accuracy)
|
9611bcf6774314341a64d717d372867625a5de5c
| 62,561
|
def lispify(value, string_to_keyword = False):
"""
Converts a given value from a Python data type into its Lisp counterpart.
Returns the following conversions:
lists/arrays -> lists
bools -> T or NIL
ints -> ints
floats -> floats
strings -> strings (or chars if string len == 1) or keywords
dicts -> acons lists (or NIL if key == "error")
Parameter value: The value which needs to be converted (i.e. Lispified).
Parameter string_to_keyword: Boolean to signal that strings should be
converted into keywords.
"""
if isinstance(value, list):
return "(" + " ".join(["list"] + [lispify(v) for v in value]) + ")"
elif isinstance(value, bool):
return "T" if value else "NIL"
elif isinstance(value, int) or isinstance(value, float):
return str(value)
elif isinstance(value, str):
if len(value) == 1:
return f"#\\{value}"
else:
return f":{value.lower()}" if string_to_keyword else f"\"{value}\""
elif isinstance(value, dict):
acons_list = []
for k, v in value.items():
if k == "error":
return "NIL"
acons_list += ["'({0} . {1})".format(lispify(k, True), lispify(v))]
return "(" + " ".join(["list"] + acons_list) + ")"
elif value is None:
return "NIL"
else:
raise TypeError("lispify function does not know how to handle value of type: " + str(type(value)))
|
d9c9b1071d4d7d4c5ebb7634ed80f197d6f4f3a9
| 62,563
|
def createDataUpload(node, sensorData):
"""
:param
node: the device Address or ID
sensorData: the sensor data is in dictionary type, for example: {"temperature": 30.5, "humidity": 20.0}
:return: a list to write into InfluxDB
"""
myDict = {"measurement": "Multisensor", "tags": {"Node": node}, "fields": sensorData}
return [myDict]
|
6bc9b73d75f26b5302537dd94945480660080845
| 62,564
|
import math
def nextpow2(N):
"""
Return the power of 2 greater than or equal to *N*.
"""
return 2**int(math.ceil(math.log(N, 2)))
|
0be668d61dac0f755b42d0472ac2cf53620ea1e0
| 62,567
|
def factorial(n):
"""
Returns the factorial of the input integer
Time complexity: O(n)
Space complexity: O(n)
"""
assert n >= 0, f"Input value {n} is not a positive integer."
if n <= 1:
return 1
return n * factorial(n - 1)
|
8044cc7267b0087eada82e76caa59a21806a39a3
| 62,568
|
def humanise(number):
"""Converts bytes to human-readable string."""
if number/2**10 < 1:
return "{}".format(number)
elif number/2**20 < 1:
return "{} KiB".format(round(number/2**10, 2))
elif number/2**30 < 1:
return "{} MiB".format(round(number/2**20, 2))
elif number/2**40 < 1:
return "{} GiB".format(round(number/2**30, 2))
elif number/2**50 < 1:
return "{} TiB".format(round(number/2**40, 2))
else:
return "{} PiB".format(round(number/2**50, 2))
|
af282bfe6e093efb90b292a16e37845bb2e19340
| 62,578
|
def _prep_sge_resource(resource):
"""Prepare SGE resource specifications from the command line handling special cases.
"""
resource = resource.strip()
k, v = resource.split("=")
if k in set(["ar"]):
return "#$ -%s %s" % (k, v)
else:
return "#$ -l %s" % resource
|
100c1152f5766a1bc68d289645ef56d152f98c14
| 62,579
|
import re
def is_fun(func, ir):
"""
Check that the intermediate representation (ir) describes
a function with name func.
"""
return type(ir) == list and type(ir[0]) == str and re.match(func + r'\b', ir[0])
|
0700ed77a707c23197dddf6793a892b3c8d958dd
| 62,580
|
def areaRectangulo(base, altura):
"""Function that finds the area of a rectangle given its width and height
Args:
base (float): the value for the width of the rectangle
altura (float): the value for the height of the rectangle
Returns:
float: The area of the rectangle
"""
return base * altura
|
c013f86cac04ee405a5d6cf06184a19ffbf7f254
| 62,583
|
def get_average_accuracy(networks: list):
"""
Get the average accuracy for a list of networks
:param networks: list of dict with network parameters
:return: average accuracy obtained by all networks
"""
total_accuracy = 0
for network in networks:
total_accuracy += network.accuracy
return total_accuracy / len(networks)
|
2839cf7a38be7fc5824e6191d886639911b13078
| 62,588
|
def convert_lines_to_object(lines: list) -> dict:
"""
Convert an array of lines into an object indexed by the line number
Indexing from 0
Parameters
----------
lines : list
The list of lines to convert
Returns
-------
dictionary
A dictionary of the lines, indexed by the line number (starting from 0)
"""
i = 0
pad_to = len(str(len(lines)))
obj = {"count": len(lines)}
for line in lines:
obj.update({"line" + str(i).rjust(pad_to, "0"): line})
i += 1
return obj
|
c10402a03c1105622af14eeb9dd1969e33150bbf
| 62,592
|
import hashlib
def get_sha1(filepath):
"""
calculate the sha1 hash of the content of a given file
Parameters
----------
filepath : path
the file of which to calculate the hash.
Returns
-------
hash: str
the hash of the content of the file.
"""
sha1sum = hashlib.sha1()
with open(filepath, 'rb') as source:
block = source.read(2**16)
while len(block) != 0:
sha1sum.update(block)
block = source.read(2**16)
return sha1sum.hexdigest()
|
d2c23ab329438250d8948305453263a5344a6154
| 62,593
|
import math
def is_pow2(val):
"""Check if input is a power of 2 return a bool result."""
return False if val <= 0 else math.log(val, 2).is_integer()
|
40f47a9135dc2e3abb3fc948a6d74e5a7758f2d6
| 62,594
|
def strand_guess(aligner, q, t, minpct=50):
"""Guess the strand of t (target) that q (query) lies on.
Given a Bio.Align aligner and two Bio.SeqRecords q and t, guess which strand
of t that q lies on. The approach is to align both q and q.reverse_complement()
to t, seeing which scores higher. The score has to be at least minpct of the
maximum possible score, default 50 percent.
ARGUMENTS
aligner: A Bio.Align aligner.
q: Query sequence as a Bio.SeqRecord.
t: Target sequence as a Bio.SeqRecord.
minpct: Score of best alignment between q and t must be at least minpct
percent of the maximum possible score.
RETURNS
> 0: q appears to lie on the forward strand of t.
< 0: q appears to lie on the reverse strand of t.
otherwise: unable to determine which strand of t.
"""
score_max = min(aligner.score(t.seq, t.seq), aligner.score(q.seq, q.seq))
score_f = aligner.score(q.seq, t.seq)
score_r = aligner.score(q.reverse_complement().seq, t.seq)
if score_f > score_r and score_f >= minpct * score_max / 100:
return 1
elif score_r > score_f and score_r >= minpct * score_max / 100:
return -1
else:
return 0
|
392cca8d089e81d456dce7e473899b3fa8282ae4
| 62,596
|
def _dictionary_to_column_paths(dictionary, prefix=tuple()):
"""Convert a dictionary to the column paths within this dictionary
For example, if the argument is
{
1 : {
'a' : True,
'b' : False
},
(10, 'blah') : SomeObject()
}
The result would be
[
(1, 'a'),
(1, 'b'),
((10, 'blah'))
]
"""
paths = set()
for key, val in dictionary.items():
if isinstance(val, dict):
paths.update(_dictionary_to_column_paths(val, prefix + (key,)))
else:
paths.add(prefix + (key,))
return paths
|
886b5305891a1f70f33f632f3cb5146bf165e4a6
| 62,598
|
import math
def p02_p01(M, beta, gamma):
"""Stagnation pressure ratio across an olique shock (eq. 4.9)
:param <float> M: Mach # upstream
:param <float> Beta: Shock angle w.r.t initial flow direction (radians)
:param <float> gamma: Specific heat ratio
:return <float> Total pressure ratio p02/p01
"""
m1sb = M * math.sin(beta)
t1 = (gamma + 1.0) / (2.0 * gamma * m1sb ** 2 - (gamma - 1.0))
t2 = (gamma + 1.) * m1sb ** 2 / (2.0 + (gamma - 1.0) * m1sb ** 2)
return t1 ** (1.0 / (gamma - 1.0)) * t2 ** (gamma / (gamma - 1.0))
|
ef983a75e300a439dfa2b68ffc4da0798bce9bf3
| 62,602
|
def get_season_episodes(show, season):
"""
Creates a list of episode objects from show details.
Parameters
----------
show : dict
Return from TV Search Controller
season : int
Season to return
Returns
-------
season_list: list
List of dict objects
"""
# initialize season list
season_list = []
# loop through and form list from episodes
# that match the season param
for episodes in show["episodes"]:
if episodes["season"] == season:
season_list.append(episodes)
# return new list
return season_list
|
cfea277c06aececf95f4b0ba0282b1e96dc93525
| 62,604
|
def startNamespace(moduleConfig):
"""String for the start the header namespace"""
string = [ 'namespace ' + s + '\n{\n' for s in moduleConfig['Namespace'] ]
string = ''.join(string)
return string
|
74102a294af94a36383aa697850fc67f86e67cd5
| 62,606
|
def _webwallet_support(coin, support):
"""Check the "webwallet" support property.
If set, check that at least one of the backends run on trezor.io.
If yes, assume we support the coin in our wallet.
Otherwise it's probably working with a custom backend, which means don't
link to our wallet.
"""
if not support.get("webwallet"):
return False
return any(".trezor.io" in url for url in coin["blockbook"] + coin["bitcore"])
|
b9ed226a8cff055910c3e5883a17aff54ab09843
| 62,612
|
def quote_parameters(positional, named):
"""Quote the given positional and named parameters as a code string."""
positional_list = [ repr(v) for v in positional ]
named_list = [ "%s=%r" % (k, v) for k, v in named.items() ]
return ', '.join(positional_list + named_list)
|
8c0b493107cdbd19ea8205eb1a5cc26b1bcc062d
| 62,613
|
def ret_func(par):
"""Return the passed argument."""
return par
|
00366afe4f292955da4d96ac551f7ab7deaad669
| 62,614
|
def _parse_header(line, lines):
"""Parse version, start date and time info from the header from the PWscf
standard output.
Args:
line (str): string with a line from PWscf standard output to be parsed.
lines (str): iterable of strings from PWscf standard output.
Returns:
A dictionary with the PWscf version, start date and time infomation.
Examples:
For a typical standard output header from PWscf like this:
" Program PWSCF v.6.1 (svn rev. 13591M) starts on 12Jul2017 at 10:17:52 \n"
the parsed dictionary output looks like this:
{'version': 'v.6.1', 'start_date': '12Jul2017', 'start_time': '10:17:52'}
"""
toks = line.split()
def _get_next_tok(toks, prev_tok):
match = [toks[i] for i in range(len(toks)) if toks[i-1] == prev_tok]
return match[0] if match else None
return {
"version": _get_next_tok(toks, 'PWSCF'),
"start_date": _get_next_tok(toks, 'on'),
"start_time": _get_next_tok(toks, 'at')
}
|
38507367f1ca582669b6688aafbb058e096e5510
| 62,618
|
def FFD(s,B):
"""First Fit Decreasing heuristics for the Bin Packing Problem.
Parameters:
- s: list with item widths
- B: bin capacity
Returns a list of lists with bin compositions.
"""
remain = [B] # keep list of empty space per bin
sol = [[]] # a list ot items (i.e., sizes) on each used bin
for item in sorted(s,reverse=True):
for (j,free) in enumerate(remain):
if free >= item:
remain[j] -= item
sol[j].append(item)
break
else: #does not fit in any bin
sol.append([item])
remain.append(B-item)
return sol
|
aa41b257dad8e3487fc08257234e8f66441be768
| 62,620
|
def square_of_sum(number):
"""
Return the square of sum of first [number] neutral integers
"""
return sum(range(1, number + 1)) ** 2
|
5b009b50d09efba576d404ca45236dbfc9a8efd0
| 62,621
|
import socket
def create_async_server_socket(host, port, reuse: bool = False):
"""
:param host:
:param port:
:param reuse: If true, allows shutting down the server and starting it up
right away. Otherwise, we have to wait 1min before starting it up again
https://stackoverflow.com/questions/4465959/python-errno-98-address-already-in-use
In production, you'd want this set to `false`.
:return:
"""
# socket.socket, bind, accept, listen, send, (recv to do), close, shutdown
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if reuse:
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((host, port))
server_socket.listen()
server_socket.setblocking(False)
return server_socket
|
c3548393dd64e549e270503953a843cc205a65d7
| 62,626
|
def rotations(S):
"""
Returns all the rotations of a string
"""
L=list(S)
L2=list()
for i in range(0, len(L)):
L2.append(''.join(L[i:] + L[:i]))
return L2
|
a59c1d1daa9df37581ef670c2744351c61bd83c9
| 62,629
|
def charge(mol):
"""Total charge"""
return sum(a.charge for _, a in mol.atoms_iter())
|
c849fe6c25c0e778c12fc9d04c38421ac7d32651
| 62,631
|
def solution(n):
"""Returns the sum of all fibonacci sequence even elements that are lower
or equals to n.
>>> solution(10)
10
>>> solution(15)
10
>>> solution(2)
2
>>> solution(1)
0
>>> solution(34)
44
"""
i = 1
j = 2
sum = 0
while j <= n:
if j % 2 == 0:
sum += j
i, j = j, i + j
return sum
|
8844ba11fc1bbc048a0cc5593d3d68beb0f21f52
| 62,636
|
def _control_input(devices, control_inputs, idx):
"""Returns the `idx`-th item in control_inputs to be used in ops.control_dependencies.
This is a helper function for building collective ops.
Args:
devices: a list of device strings the collective run on.
control_inputs: a list or None.
idx: the index into `inputs` and `control_inputs`.
Returns:
A one item list of the `idx`-th element of `control_inputs`, or an empty
list if `control_inputs` is None.
"""
if control_inputs is None:
return []
if len(control_inputs) != len(devices):
raise ValueError(
'control_inputs must match the length of the devices, %s != %s' %
(len(control_inputs), len(devices)))
return [control_inputs[idx]]
|
ea0b4cf6ed8d033ecfa3ba3a85cac1aff29fb847
| 62,639
|
import torch
def hf_represent(batch_ids, model, layer=-1):
"""
Encode a batch of sequences of ids using a Hugging Face
Transformer-based model `model`. The model's `forward` method is
`output_hidden_states=True`, and we get the hidden states from
`layer`.
Parameters
----------
batch_ids : iterable, shape (n_examples, n_tokens)
Sequences of indices into the model vocabulary.
model : Hugging Face transformer model
later : int
The layer to return. This will get all the hidden states at
this layer. `layer=0` gives the embedding, and `layer=-1`
gives the final output states.
Returns
-------
Tensor of shape `(n_examples, n_tokens, n_dimensions)`
where `n_dimensions` is the dimensionality of the
Transformer model
"""
with torch.no_grad():
reps = model(batch_ids, output_hidden_states=True)
return reps.hidden_states[layer]
|
3764245a67e207a4ad13a21928a4ca52851dda5a
| 62,640
|
def parse_readable_size_str(size_str):
"""Convert a human-readable str representation to number of bytes.
Only the units "kB", "MB", "GB" are supported. The "B character at the end
of the input `str` may be omitted.
Args:
size_str: (`str`) A human-readable str representing a number of bytes
(e.g., "0", "1023", "1.1kB", "24 MB", "23GB", "100 G".
Returns:
(`int`) The parsed number of bytes.
Raises:
ValueError: on failure to parse the input `size_str`.
"""
size_str = size_str.strip()
if size_str.endswith("B"):
size_str = size_str[:-1]
if size_str.isdigit():
return int(size_str)
elif size_str.endswith("k"):
return int(float(size_str[:-1]) * 1024)
elif size_str.endswith("M"):
return int(float(size_str[:-1]) * 1048576)
elif size_str.endswith("G"):
return int(float(size_str[:-1]) * 1073741824)
else:
raise ValueError("Failed to parsed human-readable byte size str: \"%s\"" %
size_str)
|
3d62bd98343b6cef0e55b4f3919e4bb9282f34b9
| 62,651
|
def add_one(input):
"""
Returns the list with 1 added to every element of input
Example: add_one([1,2,3,1]) returns [2,3,4,2]
Parameter input: The data to process
Precondition: input an iterable, each element an int
"""
result = []
for x in input:
x = x+1
result.append(x)
return result
|
a6441b119944c33f7f499919d7ef1aba47a2e648
| 62,652
|
def GeneralizedAdvantageEstimator(critic, state_batch, reward, gamma, lamb):
"""
Compute the Generalized Advantage Estimator.
See https://danieltakeshi.github.io/2017/04/02/notes-on-the-generalized-advantage-estimation-paper/
:param critic: Critic network.
:param state_batch: A numpy array of batched input that can be feed into the critic network directly. Note that it must also contain the ending state, i.e. has batch_size + 1 entries.
:param reward: numpy array of shape (batch_size,)
:param gamma: Tuning parameter gamma.
:param lamb: Tuning parameter lambda.
:return: Python List of GAE values in ascending time order (matched with parameter reward)
"""
n = len(state_batch) - 1
assert reward.shape == (n,)
value = critic.predict_on_batch(state_batch).flatten()
delta = reward + gamma * value[1:, ] - value[:-1, ]
# No premature optimization for now...
result = [0]
r = gamma * lamb
for i in range(0, n):
result.append(result[-1] * r + delta[n-1-i])
return result[:0:-1]
|
b5f00c3c31e26f6b2a2c75b54d1ba3edb4f45d52
| 62,653
|
def url_replace(request, field, value):
"""URL Replace.
Allows us to quickly and easily swap parameters in the request
object in the case of doing complex queries like filtering
a certain search results, feed, or metadata. Pour exemple:
<a href="?{% url_replace request 'viewing' 'unanswered' %}" class="feedNav-link">Unanswered</a> # noqa
And let's pretend the url currently looks like this:
http://app.dev/feed/?viewing=all&topic=Chronic%20Pain
Clicking on that link would generate a URL like this:
http://app.dev/feed/?viewing=unaswered&topic=Chronic%20Pain
"""
dict_ = request.GET.copy()
dict_[field] = value
return dict_.urlencode()
|
5828680cd207689b31175ab952f34350ac62de82
| 62,659
|
def _eval_feature_fn(fn, xs, classes):
"""_eval_feature_fn(fn, xs, classes) -> dict of values
Evaluate a feature function on every instance of the training set
and class. fn is a callback function that takes two parameters: a
training instance and a class. Return a dictionary of (training
set index, class index) -> non-zero value. Values of 0 are not
stored in the dictionary.
"""
values = {}
for i in range(len(xs)):
for j in range(len(classes)):
f = fn(xs[i], classes[j])
if f != 0:
values[(i, j)] = f
return values
|
11e300b2896197a16a0faa46961d348683aa143f
| 62,666
|
def get_out_layer_name(graph):
"""
Get the output layer's name for the model.
graph.output only returns the output's node index
"""
output_index_list = [x.name for x in graph.output]
return [node.name for node in graph.node if node.output[0] in output_index_list]
|
31637ec7343cce64a87619705a6e8f60ba75d2c7
| 62,667
|
def isDescendant(node1, node2):
"""
Checks if node1 is descendant of node2
:param node1:
:param node2:
:return: True if node1 is descendant of node2, False otherwise
"""
if node2 is None:
return False
if node1 is node2:
return True
return isDescendant(node1, node2.left) if node1.value < node2.value else isDescendant(node1, node2.right)
|
83e90e33b763a0f7706a41abaa71a01d30b036ac
| 62,672
|
def total_seconds(delta):
"""
Adapted from Python 2.7's timedelta.total_seconds() method.
Args:
delta: datetime.timedelta instance.
"""
return (delta.microseconds + (delta.seconds + (delta.days * 24 * 3600)) * \
10 ** 6) / 10 ** 6
|
8637215f13333f0b0d9bc3f84896ccc80e6cabf2
| 62,677
|
def format_value_for_munin(value, zero_allowed=False):
"""
Convert value into a format that will be understood by Munin
@param value: value to write
@param boolean zero_allowed: if True, 0 will be reported as 0, otherwise as unknown ('U')
@return value
"""
return value if value or (zero_allowed and value == 0) else 'U'
|
e1c3fe5fb39ed85dac033cb7d9cc95eee5d84b1d
| 62,689
|
def split_suffix(symbol):
"""
Splits a symbol such as `__gttf2@GCC_3.0` into a triple representing its
function name (__gttf2), version name (GCC_3.0), and version number (300).
The version number acts as a priority. Since earlier versions are more
accessible and are likely to be used more, the lower the number is, the higher
its priortiy. A symbol that has a '@@' instead of '@' has been designated by
the linker as the default symbol, and is awarded a priority of -1.
"""
if '@' not in symbol:
return None
data = [i for i in filter(lambda s: s, symbol.split('@'))]
_, version = data[-1].split('_')
version = version.replace('.', '')
priority = -1 if '@@' in symbol else int(version + '0' *
(3 - len(version)))
return data[0], data[1], priority
|
b71e96bb2ff9bf694bec541ca3478c696eaee029
| 62,693
|
def find_all_indexes(text, pattern):
"""Return a list of starting indexes of all occurrences of pattern in text,
or an empty list if not found."""
'''
worksheet: String Algorithms, pt. 2
let p = len(pattern)
let t = len(text)
p * t = checking every letter in text against every letter in pattern
p(p-1) = p(p^2 - 1) = # REVIEW N/A VALUES IN WORKSHEET
Best time complexity -- O(___),
Worst time complexity --
O( p*t - p^2 + p ) =
O( p*t + 1) - p^2 ) =
O( p*t - p^2 ) remove constant since it doesn't have that much of an effect
if p < t, then O(p * t)
'''
if pattern == '':
return list(range(0, len(text)))
index = 0
t_index = 0 # first letter of pattern
matching_indexes_list = []
while index <= len(text) - len(pattern):
if text[index + t_index] == pattern[t_index]:
# if t_index is the last index in pattern
if t_index == len(pattern) - 1:
matching_indexes_list.append(index)
t_index = 0
index += 1
else:
t_index += 1
else:
t_index = 0 # reset
index += 1
return matching_indexes_list
|
a10590d54a23854ae5126c56e88debd0fb0dae93
| 62,695
|
from typing import List
def vectorize(
tokenized_text: List[str],
embedding_model):
"""
Args:
tokenized_text: a tokenized list of word tokens
embedding_model: the embedding model implements `.infer_vector()` method
Returns:
np.ndarray: a word embedding vector
"""
return embedding_model.infer_vector(tokenized_text)
|
6da9b0eb87220585a79af5f86d615bad6b747845
| 62,702
|
from typing import Optional
from typing import List
import random
def generate_random_number(
length: int = 6, forbidden_first_digit: Optional[List[int]] = None
) -> str:
"""
Generate random number with the provided length (number of digits) ensuring that two neighboring
digits are always different and with each digit having a value between 1 - 9.
"""
result = ""
while len(result) < length:
random_value = random.randint(0, 9)
if len(result) == 0:
# First digit
if forbidden_first_digit and random_value in forbidden_first_digit:
continue
result += str(random_value)
else:
# Make sure it's different than the previous digit
if random_value == int(result[-1]):
continue
result += str(random_value)
return result
|
effbec2feeebd09c541e183bac1c82fe167f3e47
| 62,708
|
import torch
import pickle
def load_model(model_pkl_path, device=None):
"""Load StyleGAN2 model from pickle checkpoint file."""
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
with open(model_pkl_path, 'rb') as f:
G = pickle.load(f)['G_ema'].to(device) # torch.nn.Module
return G
|
c9a053cf4f9297423867c2cd76c06b15daa13a53
| 62,710
|
from typing import Union
import hashlib
def make_etag(args: Union[dict, list]):
"""Make an etag by hashing the representation of the provided `args` dict"""
argbytes = bytes(repr(args), "utf-8")
return hashlib.md5(argbytes).hexdigest()
|
b0ed4c16fd2b69b184cddac6ca92e50c3ba110a3
| 62,711
|
def has_as_dict(obj):
"""Check if object has 'as_dict' method."""
return hasattr(obj, "as_dict") and callable(getattr(obj, "as_dict", None))
|
e3d79446c71d364dc6bfa46561d1afb6f8965c07
| 62,712
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.