content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
from typing import Sequence
from typing import Tuple
from typing import List
def pad_string_sequences(seq: Sequence[Sequence[str]]) -> Tuple[List[List[str]], Sequence[int]]:
""" Like keras.preprocessing.sequence.pad_string_sequences but for strings, and it also returns seq_length. """
seq_length = [len(item) for item in seq]
maxlen = max(seq_length)
result = []
for i, item in enumerate(seq):
result.append(list(item) + [''] * (maxlen - seq_length[i]))
return result, seq_length | db8483ca16b1cecbd4b07168d1dd218e516b5950 | 97,605 |
def color_post_response_ok(devid, hue, saturation):
"""Return color change response json."""
return '''
{
"idForPanel": "''' + devid + '''",
"hue": ''' + str(int(hue)) + ''',
"saturation": ''' + str(int(saturation)) + '''
}''' | 1223b27deb41f43beb8f598c2baabab0f265d360 | 97,606 |
def human2bytes(size, unit, *, precision=2, base=1024):
"""
Convert size from human to bytes.
Arguments:
size (int): number
unit (str): converts from this unit to bytes
'KB', 'MB', 'GB', 'TB', 'PB', 'EB'
Keyword arguments (opt):
precision (int): number of digits after the decimal point
default is 2
base (int): 1000 - for decimal base
1024 - for binary base (it is the default)
Returns:
(int) number in bytes
Example:
>>> human2bytes(10, 'GB')
'10737418240.00'
>>> human2bytes(10, 'GB', precision=0)
'10737418240'
>>> human2bytes(10, 'PB')
'11258999068426240.00'
"""
dic_power = {
"KB": base,
"MB": base ** 2,
"GB": base ** 3,
"TB": base ** 4,
"PB": base ** 5,
"EB": base ** 6,
"ZB": base ** 7,
}
if unit not in dic_power:
raise ValueError(
"invalid unit. It must be {}".format(", ".join(dic_power.keys()))
)
try:
num_bytes = float(size) * int(dic_power[unit])
except ValueError:
raise ValueError("value is not a number")
return "{0:.{prec}f}".format(num_bytes, prec=precision) | 6ea7dc67598ef0385e23926d6896d55f2f6b1257 | 97,609 |
def dget(d, dkey, default=None):
"""Dictionary get: gets the field from nested key
Args:
d (dict, list): Dictionary/list to retrieve field from
dkey (str): Nested key to retrive field from dictionary/list separated
by periods. Example: key1.3.key2, this will be the equivalent of
d['key1'][3]['key2'].
default (optional): Default object to return if value not found.
Defaults to None.
Returns:
Any: field to return from dict or default if not found.
"""
keys = dkey.split('.')
obj = d
for key in keys:
if not obj:
return default
if key.isdigit():
index = int(key)
if isinstance(obj, list) and index < len(obj):
obj = obj[index]
else:
return default
else:
if isinstance(obj, dict) and key in obj:
obj = obj[key]
else:
return default
return obj | 813aaad713f2167fac3b479903b021bc409d0d6c | 97,611 |
def is_obs_valid(obs, max_plate_z):
"""
Checks if the given observation is a valid observation.
"""
z_g = obs.g_pos.value
theta = obs.theta.value
if z_g == 0 or z_g > max_plate_z:
return theta == 'N'
elif z_g == 1:
return theta != 'N'
else:
return True | b6bb4ded4b455103941fe71f4b02e91945b295c7 | 97,613 |
from typing import OrderedDict
def get_table_columns(metadata):
""" Extract columns names and python typos from metadata
Args:
metadata: Table metadata
Returns:
dict with columns names and python types
"""
cols = OrderedDict()
for col in metadata.c:
name = str(col).rpartition(".")[2]
cols[name] = col.type.python_type.__name__
return cols | 6170e677dbaf3c9b2d76dc575917ed7d8d4462b0 | 97,614 |
def import_object(name):
"""Returns an object in a module given its dot import path."""
if not isinstance(name, str) or '.' not in name:
raise ValueError('Expected a dot import path string')
name_module, name_object = name.rsplit('.', 1)
module = __import__(name_module, fromlist=[name_object])
return getattr(module, name_object) | 956a603025792494b9bc197d4be14c23071846f6 | 97,620 |
def hash_response(r):
"""Hashes a request into a unique name."""
return "{}:{}:{}".format(r.method, r.uri, r.body) | a55915f6c201a54e1afa6a2ed214228b1feac279 | 97,624 |
def list_deb (archive, compression, cmd, verbosity, interactive):
"""List a DEB archive."""
return [cmd, '--contents', '--', archive] | 368c2490a1479b579243c170f6050dc1b5b5288b | 97,625 |
def JD_to_LST(JDs, llong):
"""Returns LST [frac. of day] using:
JD - [iterable] - values of Julian data
llong - local longitude [+/-degrees].
"""
LSTs = list()
llong_h = llong / 15.
for JD in list(JDs):
D = JD - 2451545.0
GMST = 18.697374558 + 24.06570982441908 * D
GMST = GMST % 24
if GMST < 0:
GMST += 24.
elif GMST >= 24.0:
GMST -= 24.0
LST = GMST + llong_h
#convert to fraction of day
LST = LST / 24.
LSTs.append(LST)
return LSTs | 785573d7715b47487a4408922dcf8bea6e9d6e6d | 97,630 |
def post_process_pleonastic_pronoun(system_mentions):
""" Removes pleonastic it and you.
These are detected via the following heuristics:
- it: appears in 'it _ _ that' or 'it _ _ _ that'
- you: appears in 'you know'
Args:
system_mentions (list(Mention): A list of system mentions.
Returns:
list(Mention): the filtered list of mentions.
"""
filtered = []
for mention in system_mentions:
if " ".join(mention.attributes["tokens"]).lower() == "it":
context_two = mention.get_context(2)
context_three = mention.get_context(3)
if context_two is not None:
if context_two[-1] == "that":
continue
if context_three is not None:
if context_three[-1] == "that":
continue
if " ".join(mention.attributes["tokens"]).lower() == "you":
if mention.get_context(1) == ["know"]:
continue
filtered.append(mention)
return sorted(filtered) | dd31751d5d291b05268d2d9c6e24fabf31fde932 | 97,633 |
def format_time(t):
"""Return human-readable interval of time.
Assumes t is in units of seconds.
"""
minutes = int(t / 60)
seconds = t % 60
r = ""
if minutes > 0:
r += "%d minute%s " % (minutes, "" if minutes == 1 else "s")
r += "%.3f seconds" % seconds
return r | 6ba6ab7484308f0829fd3ff705a76912bdeb2a99 | 97,634 |
import hashlib
def compress(string: str, max_length: int = 64) -> str:
"""
Compress a string to a string of restricted length.
The function can be useful, because some filesystems and/or
disk encryption tools impose restriction on maximum length of
a filename.
:param string:
string to be compressed
:param max_length:
maximum length of output, default is 64
:return:
compressed string which is a truncated hash of input string
"""
hashed_string = hashlib.sha256(string.encode('utf-8')).hexdigest()
result = hashed_string[:max_length]
return result | a411198eeef594a93b4db5283d61392d366b6f50 | 97,635 |
def allergen_get_name_list(allergens):
"""
Return allergen name list from list of allergens
"""
allergen_name_list = []
for allergen in allergens:
allergen_name_list.append(allergen["name"])
return allergen_name_list | e47f443da8c0c4155c78a8f5d7d62ef436052304 | 97,636 |
import math
def smallest_multiple(n):
""" Smallest multiple of all numbers from 2 to n. """
multiple = 1
for i in range(2, n+1):
if multiple % i:
multiple *= i // math.gcd(multiple, i)
return multiple | f6cad77f23ec277455a2957edf92fd9fe4cbca5e | 97,643 |
def simtelTelescopeConfigFileName(
site, telescopeModelName, modelVersion, label, extraLabel
):
"""
sim_telarray config file name for a telescope.
Parameters
----------
site: str
South or North.
telescopeModelName: str
LST-1, MST-FlashCam, ...
modelVersion: str
Version of the model.
label: str
Instance label.
extraLabel: str
Extra label in case of multiple telescope config files.
Returns
-------
str
File name.
"""
name = "CTA-{}-{}-{}".format(site, telescopeModelName, modelVersion)
name += "_{}".format(label) if label is not None else ""
name += "_{}".format(extraLabel) if extraLabel is not None else ""
name += ".cfg"
return name | e5ae27857c5615bedeb11ca9698355ba8f03ce70 | 97,649 |
def _mat_sym_dims(symbol):
"""Return codegen Argument dimensions for a MatrixSymbol."""
return ((0, symbol.shape[0] - 1), (0, symbol.shape[1] - 1)) | 771eb3e56481cdde3dcaf7c5013e09351526dcc1 | 97,650 |
def archive_list(soup_obj) -> list:
"""
Gets a list of URLs of all of the XKCD comics
Takes in a 'Beautiful Soup' object
Returns all the post URLs
"""
# Gets all the URLs from the archive table
archive = soup_obj.find('body').find('div', id='middleContainer')
archive = archive.find_all('a')
base_url = 'https://www.xkcd.com'
# Adds the main page URL to the obtained URL ending
# eg. 'https://www.xkcd.com' + '/124/'
archive_links = list(base_url + post['href'] for post in archive)
return archive_links | 2983ea89384c3c23e5e0f5bd180182a3ab7b983e | 97,658 |
def _is_cuda(*args):
"""Returns True is any of the argument is on a CUDA device, False
otherwise."""
for arg in args:
if arg.is_cuda:
return True
return False | 68ae140afebe0bde8c2d10d2a3c37195475e3138 | 97,660 |
def hole_current_density(mu=0,density=0,d_phi_d_z=0,
diffusivity=0,dp_dz=0):
"""
returns the hole current density
Parameters
----------
mu : TYPE, required
DESCRIPTION. The default is 0.
density : TYPE, required
DESCRIPTION. The default is 0.
d_phi_d_z : TYPE, required
DESCRIPTION. The default is 0.
diffusivity : TYPE, required
DESCRIPTION. The default is 0.
dp_dz : TYPE, required
DESCRIPTION. The default is 0.
Returns
-------
The hole current density
"""
j = -(mu*density*d_phi_d_z) - (diffusivity*dp_dz)
return j | 45cc515111f4a8dfe36c9ac9ae26e2bde94d0236 | 97,662 |
def should_save_html_according_response_code(code, allowed_list):
"""
Args:
code (int): response status code
allowed_list (list): list of response status codes allowed to save html
Returns:
bool: True if allowed_list is empty (save all responses), or response
code in allowed list.
"""
return not allowed_list or code in allowed_list | 2fd0d27019f0ad91b7c6f22bf72687851f827615 | 97,663 |
def make_hash_table(nbuckets):
"""
Takes as input a number, nbuckets, and outputs an
empty hash table with nbuckets empty buckets.
"""
return [[] for i in range(nbuckets)] | 19921ac2f6f50f1161c023af8420cad3e3b6c19e | 97,668 |
def leading_num_key(s):
"""Keys for sorting strings, based on leading multidigit numbers.
A normal string comparision will compare the strings character by
character, e.g., "101P" is less than "1P" because "0" < "P".
`leading_num_key` will generate keys so that `str.sort` can
consider the leading multidigit integer, e.g., "101P" > "1P"
because 101 > 1.
Parameters
----------
s : string
Returns
-------
keys : tuple
They keys to sort by for this string: `keys[0]` is the leading
number, `keys[1]` is the rest of the string.
"""
pfx = ''
sfx = s
for i in range(len(s)):
if not s[i].isdigit():
break
pfx += s[i]
sfx = s[i:]
if len(pfx) > 0:
pfx = int(pfx)
else:
pfx = 0
return pfx, sfx | 4825b8b67a14745d90a606f9fa9e57e6e003546f | 97,669 |
def getIndices(lines):
"""Returns list of tuples: (index1,index2,pair probability).
"""
index_list = []
#For each line that ends with 'ubox' (which denotes lines with indices)
for line in filter(lambda x: x.endswith('ubox'), lines):
#split on whitespace
up_index,down_index,probability,ubox = line.split()
#Build tuple with indices and pair probability
index_list.append((int(up_index), int(down_index), \
float(probability)))
return index_list | 286a76562b3a4307aa5ba42de872bfdb540d2dcf | 97,670 |
def make_modifier_resized(target_size):
"""Make a string designating a resize transformation.
Note that the final image size may differ slightly from this size as
it only reflects the size targeted.
Args:
target_size: Target size of rescaling in x,y,z.
Returns:
String designating the resize transformation.
"""
return "resized({},{},{})".format(*target_size) | 0c1b5a12f6d7888a44fe4e720f3d333b8d0906ee | 97,672 |
from typing import Dict
from typing import Tuple
def get_colormap() -> Dict[str, Tuple[int, int, int]]:
"""
Get the defined colormap.
:return: A mapping from the class names to the respective RGB values.
"""
classname_to_color = { # RGB.
"noise": (0, 0, 0), # Black.
"animal": (70, 130, 180), # Steelblue
"human.pedestrian.adult": (0, 0, 230), # Blue
"human.pedestrian.child": (135, 206, 235), # Skyblue,
"human.pedestrian.construction_worker": (100, 149, 237), # Cornflowerblue
"human.pedestrian.personal_mobility": (219, 112, 147), # Palevioletred
"human.pedestrian.police_officer": (0, 0, 128), # Navy,
"human.pedestrian.stroller": (240, 128, 128), # Lightcoral
"human.pedestrian.wheelchair": (138, 43, 226), # Blueviolet
"movable_object.barrier": (112, 128, 144), # Slategrey
"movable_object.debris": (210, 105, 30), # Chocolate
"movable_object.pushable_pullable": (105, 105, 105), # Dimgrey
"movable_object.trafficcone": (47, 79, 79), # Darkslategrey
"static_object.bicycle_rack": (188, 143, 143), # Rosybrown
"vehicle.bicycle": (220, 20, 60), # Crimson
"vehicle.bus.bendy": (255, 127, 80), # Coral
"vehicle.bus.rigid": (255, 69, 0), # Orangered
"vehicle.car": (255, 158, 0), # Orange
"vehicle.construction": (233, 150, 70), # Darksalmon
"vehicle.emergency.ambulance": (255, 83, 0),
"vehicle.emergency.police": (255, 215, 0), # Gold
"vehicle.motorcycle": (255, 61, 99), # Red
"vehicle.trailer": (255, 140, 0), # Darkorange
"vehicle.truck": (255, 99, 71), # Tomato
"flat.driveable_surface": (0, 207, 191), # nuTonomy green
"flat.other": (175, 0, 75),
"flat.sidewalk": (75, 0, 75),
"flat.terrain": (112, 180, 60),
"static.manmade": (222, 184, 135), # Burlywood
"static.other": (255, 228, 196), # Bisque
"static.vegetation": (0, 175, 0), # Green
"vehicle.ego": (255, 240, 245)
}
return classname_to_color | 069a7c4aff434d72a0d67bc4b59db732589541a7 | 97,675 |
def get_license_link_for_filename(filename, urls):
"""
Return a link for `filename` found in the `links` list of URLs or paths. Raise an
exception if no link is found or if there are more than one link for that
file name.
"""
path_or_url = [l for l in urls if l.endswith(f"/{filename}")]
if not path_or_url:
raise Exception(f"Missing link to file: {filename}")
if not len(path_or_url) == 1:
raise Exception(f"Multiple links to file: {filename}: \n" + "\n".join(path_or_url))
return path_or_url[0] | 60a445209988214e402fa8612ac1bc371eb01c2c | 97,682 |
import hashlib
def calculate_file_hash(file_object):
"""
Calculate the sha256 hash of an uploaded file
"""
ctx = hashlib.sha256()
if file_object.multiple_chunks():
for data in file_object.chunks(ctx.block_size):
ctx.update(data)
else:
ctx.update(file_object.read())
return ctx.hexdigest() | 2c95b49c726df0b1c0333a8a184219da345c4d27 | 97,685 |
import struct
def unpack_compact_int(bytestr):
""" See
https://bitcoin.org/en/developer-reference#compactsize-unsigned-integers
Args:
bytestr (bytes): bytes containing an unsigned integer to be
deserialized.
Returns:
n (int): deserialized integer.
"""
b0 = bytestr[0]
if b0 < 0xfd:
return (b0, bytestr[1:])
elif b0 == 0xfd:
return (struct.unpack('<H', bytestr[1:3])[0], bytestr[3:])
elif b0 == 0xfe:
return (struct.unpack('<I', bytestr[1:5])[0], bytestr[5:])
elif b0 == 0xff:
return (struct.unpack('<Q', bytestr[1:9])[0], bytestr[9:])
else:
return None | 93143577affebb75a11c4a2355097531002efb6f | 97,691 |
import json
def get_dict_from_json(fp):
"""Get a dict from json"""
try:
return dict(json.load(fp))
except Exception:
raise ValueError("Could not get a dict from %s" % fp.filename) | 67873dc143520e749bdd3b51a3c358f86ebc9cb2 | 97,696 |
def distinct_scavenger_hunt_tasks(submissions):
"""Get distinct Scavenger Hunt tasks from a list of submissions"""
return submissions.values('task').distinct() | b57c556b3a2913d6f1cfc4cac9da83728a543d9c | 97,698 |
def color565(r, g, b):
"""Convert 24-bit RGB color to 16-bit."""
return (r & 0xf8) << 8 | (g & 0xfc) << 3 | b >> 3 | 7cce9acc8899d0c87bd4688c7af34fbe5d6a7f4c | 97,699 |
import networkx as nx
def cluster(linkage):
"""
Cluster all genomes based on established linkages using networkx
:param linkage: Dictionary of dictionaries
:return: A dictionary with cluster index and list of genomes
"""
g = nx.from_dict_of_dicts(linkage)
clustered = []
clusters = {}
clust_num = 1
for n in g.nodes():
c = [n]
if n in clustered: continue
edges = list(nx.dfs_edges(g, n))
for e in edges:
n1, n2 = e
clustered += [n1, n2]
c += [n1, n2]
c = list(set(c))
clusters[clust_num] = c[:]
clust_num += 1
return clusters | 5e69319f28c0eeae67ba60545e3af80276bebee7 | 97,700 |
def format_isni(st):
"""Format ISNI id by inserting space after every 4 chars."""
return ' '.join(st[i:i + 4] for i in range(0, len(st), 4)) | f93ccf03ed0c4580a66f120dfe43879e615e4088 | 97,703 |
import torch
def weighted_binary_cross_entropy_with_logits(logits, labels, weights):
"""
assume N samples
:param logits: (N, 2) unnormalized foreground/background score
:param labels: (N, ) {0, 1}
:param weights: (N, ) float \in [0,1] for rareness
:return: float loss
"""
loss = logits.clamp(min=0) - logits*labels + torch.log(1 + torch.exp(-logits.abs()))
loss = (weights*loss).sum()/(weights.sum()+1e-12)
return loss | 9d599d0f0f94ed8717fa15675ebdb4fd344ed15a | 97,706 |
def _coords(shape):
"""
Return a list of lists of coordinates of the polygon. The list consists
firstly of the list of exterior coordinates followed by zero or more lists
of any interior coordinates.
"""
assert shape.geom_type == 'Polygon'
coords = [list(shape.exterior.coords)]
for interior in shape.interiors:
coords.append(list(interior.coords))
return coords | dd63ff6700111ad09b64b28baef7876988821b83 | 97,707 |
import base64
def base64_2_bytes(base64string):
"""Decode base64 string to bytes object"""
return base64.b64decode(base64string) | f6a09cbdd2844c86525eba557ac7c2cd19bcef44 | 97,709 |
def preprocess(txt):
""" Removes empty lines and unnecessary whitespace. """
return [line.strip() for line in txt.splitlines() if line.strip() != ""] | fb768267a1b66428d8155929e3ad0d239d487898 | 97,711 |
def get_pb_from_bin_file(filename, pb_value):
"""Get a proto from given binary file."""
with open(filename, 'rb') as file_in:
pb_value.ParseFromString(file_in.read())
return pb_value | 08fe771c8c2277cf4e20e8692625a4de6e1df837 | 97,714 |
def _Lentz_Dn(z, N):
"""
Compute the logarithmic derivative of the Ricatti-Bessel function.
Args:
z: function argument
N: order of Ricatti-Bessel function
Returns:
This returns the Ricatti-Bessel function of order N with argument z
using the continued fraction technique of Lentz, Appl. Opt., 15,
668-671, (1976).
"""
zinv = 2.0 / z
alpha = (N + 0.5) * zinv
aj = -(N + 1.5) * zinv
alpha_j1 = aj + 1 / alpha
alpha_j2 = aj
ratio = alpha_j1 / alpha_j2
runratio = alpha * ratio
while abs(abs(ratio) - 1.0) > 1e-12:
aj = zinv - aj
alpha_j1 = 1.0 / alpha_j1 + aj
alpha_j2 = 1.0 / alpha_j2 + aj
ratio = alpha_j1 / alpha_j2
zinv *= -1
runratio = ratio * runratio
return -N / z + runratio | c34905103535b5c84a8c8df543f47ad838fe1cf1 | 97,715 |
from typing import List
def invert_positions(
solution: List[int],
origin: int,
destiny: int
) -> List[int]:
"""
Definition for invert position
ex:
solution: [1,2,3]
origin: 2
destiny: 3
return: [1,3,2]
"""
solution[destiny], solution[origin] = solution[origin], solution[destiny]
return solution | 9541cbf8039afa9e5fc37e537f8a8f4697f7672e | 97,716 |
import math
def dcr_of_annulus(
thickness: float, inner_radius: float, outer_radius: float, rho: float = 1.68e-8
):
"""Calculate the sheet resistance of an annulus
"""
assert (
outer_radius > inner_radius
), f"Outer radius must be greater than inner radius"
resistance = (2e3 * math.pi * rho) / (
thickness * (math.log(outer_radius / inner_radius))
)
return resistance | 8021d7e59d7bb95a309bc1172ba27b1b41e7b14f | 97,719 |
import json
def json_serializable(cls):
"""
Adds a 'to_json' method to instances the class that it annotates. This method
uses json.dumps and a JSONEncoder to create a JSON string from the
object's __dict__ attribute
Note that this decorator will return the original cls value passed into it. You will get the same object out that
gets put in, though, obviously, it will be mutated.
"""
class MyEncoder(json.JSONEncoder):
def default(self, o):
return o.__dict__
def to_json(self) -> str:
return json.dumps(self.__dict__, cls=MyEncoder)
setattr(cls, 'to_json', to_json)
return cls | 8aea4d5b492158b9bb9a7f87b40f8280d1fedff5 | 97,723 |
def calcModDuration(duration, freq, ytm):
""" Calculates the Modified Duration """
tmp = 1 + (ytm / freq)
return duration / tmp | 9ceb9c72c3d1b5b28b8ff86ab271a238d5ae962e | 97,724 |
def bit_length(input):
"""
Return the bit length of input.
EX: 7 (0b111) has length 3
EX: 8 (0b1000) has length 4
"""
return len(bin(input)) - 2 | 2d75eb08d1d9f039d3293e97f78e0f7d311f8477 | 97,729 |
import torch
def combine_heads(x):
"""combine multi heads
Args:
x: [batch_size, length, depth / num_heads] x heads
Returns:
x: [batch_size, length, depth]
"""
return torch.cat(x, 2) | aecdc9c08f6549558b2d6bf63e2fe0d097e24575 | 97,730 |
def clean_list_folder(list_complete: list, list_exclude: list):
"""Clear folders paths
Args:
list_complete (list): All paths to search files
list_exclude (list): Paths to exclude from full list
Returns:
list: List purge with the paths
"""
# delete all equals while have it
for exclude in list_exclude:
if exclude in list_complete:
list_complete.remove(exclude)
# create a list with paths with wildcard
list_wildcard = [
exclude.replace("/*", "") for exclude in list_exclude if exclude.count("*")
]
for path in list_complete:
for exclude in list_wildcard:
if path.startswith(exclude):
list_complete.remove(path)
return sorted(list_complete) | fea33a0969fb0cf17886a41f65d8325862558441 | 97,731 |
def multiline_test(line):
"""
test if the current line is a multiline with "=" at the end
:param line: 'O1 3 -0.01453 1.66590 0.10966 11.00 0.05 ='
:type line: string
>>> line = 'C1 1 0.278062 0.552051 0.832431 11.00000 0.02895 0.02285 ='
>>> multiline_test(line)
True
>>> line = 'C1 1 0.278062 0.552051 0.832431 11.00000 0.05 '
>>> multiline_test(line)
False
"""
line = line.rpartition('=') # partition the line in before, sep, after
line = ''.join(line[0:2]) # use all including separator
line = line.rstrip() # strip spaces
if line.endswith('='):
return True
else:
return False | df78238db9811f6cad19dfd3d5cbca8fa9535c00 | 97,735 |
def apply(function, input_tensor, instruments, params={}):
""" Apply given function to the input tensor.
:param function: Function to be applied to tensor.
:param input_tensor: Tensor to apply blstm to.
:param instruments: Iterable that provides a collection of instruments.
:param params: (Optional) dict of BLSTM parameters.
:returns: Created output tensor dict.
"""
output_dict = {}
for instrument in instruments:
out_name = f'{instrument}_spectrogram'
output_dict[out_name] = function(
input_tensor,
output_name=out_name,
params=params)
return output_dict | 3885f44fc2561e21f48009afef9426593e10791a | 97,736 |
def set_config(client, name, value, ignore_errors=None):
"""Set a Creo config option.
Args:
client (obj):
creopyson Client.
name (str):
Option name.
value (str):
New option value.
ignore_errors (boolean, optional):
Whether to ignore errors that might occur when setting the config
option. Defaults is False.
Returns:
None.
"""
data = {"name": name, "value": value, "ignore_errors": False}
if ignore_errors is not None:
data["ignore_errors"] = ignore_errors
return client._creoson_post("creo", "set_config", data) | c84e64c310951ee289ff319983cd4871c223b241 | 97,740 |
from typing import Any
from typing import Dict
def recursive_parse(b: Any) -> Dict[str, Any]:
"""Recursively parses a BeancountType into a nested dictionary of models.
Since a NamedTuple can be represented as a dictionary using the bultin
_asdict() method, this function attempts to recursively convert a
BeancountTuple and any children types into a nested dictionary structure.
Args:
b: The BeancountType to recursively parse
Returns:
A nested dictionary with all parsed models.
"""
result: Dict[str, Any] = {}
for key, value in b._asdict().items():
if hasattr(value, "_asdict"):
result[key] = recursive_parse(value)
elif isinstance(value, list) and value:
if hasattr(value[0], "_asdict"):
result[key] = [recursive_parse(c) for c in value]
else:
result[key] = value
else:
result[key] = value
return result | 8af3b04683f5e75a492c944374239ac1779823c2 | 97,741 |
def f_string_2(value):
"""Round value with 2 decimals."""
return f'{value:.2f}' | d803a21e1b50dbb17432931a489e6b304f77fe56 | 97,745 |
def cow_line_splitter(cow_line):
"""
Turn a single line from the cow data file into a list of [cow, weight].
"""
return cow_line.strip().split(",") | 7796227379bc81a36c3966e4bafb32e1a1e42c31 | 97,750 |
import re
def _is_int(string_data):
"""
Verifies if string is a valid int.
Parameters
----------
string_data: str
The string to be evaluated.
Returns
-------
bool: True on success, false otherwise.
"""
return re.match(r"[-+]?\d+$", string_data) is not None | 6cbcb8ac655ad5ba017f576ef99c238572e4d8f9 | 97,754 |
def fabs(x):
"""
Returns absolute value of x.
"""
return abs(x) | 7e4a7d53dd7ac8f700eb07e40b84bd47c6a61235 | 97,755 |
def get_pipeline_info(pipeline):
""" Function print info about pipeline and return operations in it and depth
:param pipeline: pipeline to process
:return obtained_operations: operations in the nodes
:return depth: depth of the pipeline
"""
obtained_operations = [str(node) for node in pipeline.nodes]
depth = int(pipeline.graph_depth)
pipeline.print_structure()
return obtained_operations, depth | 18e8d528a18669706897074edc757e6cd9dd1786 | 97,756 |
def merge_lists(x, y):
"""
Merge ``y`` list items in list ``x`` avoiding duplicate entries.
Return the updated ``x``.
"""
seen = set(x)
new = (i for i in y if i not in seen)
x.extend(new)
return x | 1e5692e948161aa2d72a5331b4905860cd804957 | 97,767 |
import requests
from bs4 import BeautifulSoup
def load_page(url: str):
"""
Load a page with BeautifulSoup.
Args:
url: The url to load
Returns:
BeautifulSoup html
"""
print("Loading page: {}".format(url))
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
return soup | 1e87769c46593e2010ec4e802ff60710efe7df7b | 97,768 |
def filtered_core_metadata(core):
"""Filter out some internal metadata, to avoid returning them via REST API."""
core = core.copy()
for key in ["_systemd_coredump", "_core_dir"]:
if key in core:
del core[key]
return core | fa259917bb080c4186fe41b96e8bc7adc581d4c3 | 97,769 |
def r_from_rd_m(rd, rw_m=0.1):
"""
translate dimensionless distance into dimensional distance
rd - dimensionless distance
rw_m - well radius, m
"""
return rd*rw_m | 540d0cdf0254092f9a4dfbbb762eac9d27749df2 | 97,771 |
import yaml
def read_yaml_file(filename):
"""Read a YAML file."""
with open(filename, 'r') as f:
return yaml.safe_load(f.read()) | 4f13467fcf675d26f5b1118ad48dc7b03f769d47 | 97,774 |
def skip_row(datum, header):
"""Determine if a row has not been filled in for this sheet."""
values = [datum.get(key) for key in header]
return sum(1 for value in values if value) <= 4 | 68070549be4029de5916a9db661e32fe59c55016 | 97,776 |
def expand_to_list(item, n):
"""Expands item into list of n length, unless already a list."""
if type(item) is list:
assert len(item) == n
return item
else:
return [item for _ in range(n)] | b74c242cb43b1eca9faf215c4933a9c743c8a842 | 97,783 |
from functools import reduce
def concat(lists):
"""
Take nested lists and make it a single level list.
"""
return reduce(lambda x, y: x+y, lists, []) | b0cb64ac193f6b3b1f039239337e1d19167a3aa0 | 97,784 |
from typing import Sequence
from typing import Tuple
def reorder_indices(lst: Sequence[str], target: Sequence[str]) -> Tuple[int, ...]:
"""
Determine how to bring a list with unique entries to a different order.
Supports only lists of strings.
:param lst: input list
:param target: list in the desired order
:return: the indices that will reorder the input to obtain the target.
:raises: ``ValueError`` for invalid inputs.
"""
if set([type(i) for i in lst]) != {str}:
raise ValueError('Only lists of strings are supported')
if len(set(lst)) < len(lst):
raise ValueError('Input list elements are not unique.')
if set(lst) != set(target) or len(lst) != len(target):
raise ValueError('Contents of input and target do not match.')
idxs = []
for elt in target:
idxs.append(lst.index(elt))
return tuple(idxs) | 14572165dd9fe38362e304a8f2edb7cf9bb08848 | 97,786 |
from datetime import datetime
def gmt_epoch(mtime: str) -> int:
"""Convert YYYY-MM-DD HH:MM:SS in GMT to epoch."""
utc_time = datetime.strptime(mtime, '%Y-%m-%d %H:%M:%S')
return int((utc_time - datetime(1970, 1, 1)).total_seconds()) | 254b5da1880268fe9c6a13100fd61930c2060c63 | 97,787 |
import re
def clean_path(path):
"""
Ensures that a provided S3 path follows the convention enforced by rivet
Args:
path (str): The S3 path being cleaned
filename (str): The filename portion of a full S3 path
Returns:
str: The full, cleaned S3 path
Raises:
ValueError: If 'path' violates rivet's S3 path conventions
"""
if '//' in path:
raise ValueError('Double-forward slashes (\'//\') are not permitted '
'by rivet. Use \'rivet.read_badpractice_file\' '
'if reading such a file is necessary.')
if re.search(r'\.\.', path):
raise ValueError('Double-dots (\'..\') are not permitted by rivet.')
if (path.find('.') < path.rfind('/')) and (path.find('.') != -1):
raise ValueError('Period characters (\'.\') are not permitted '
' by rivet except in file extensions.')
return path | aa055636592eaa903d05cfe51fb9327087dc5ff8 | 97,788 |
def generate_polygon_from_horizontal_line(line_coords=None, thickness=None):
"""
Generate a list of coordinate pairs that represent a polygon of given thickness around a line
:param line_coords: list of coordinate pairs that represent a horizontal line.
:param thickness: overall thickness of the resulting polygon.
:returns: list of coordinate pairs that make up the resulting polygon
:raises TypeError: none
"""
if line_coords is not None and thickness is not None:
# print(f"line_coords in generate_polygon_from_horizontal_line", line_coords)
polygon_coords = []
half_thickness = thickness // 2
line_coords.sort(key=lambda x: x[0])
rev_coords = line_coords[::-1]
# print("reverse coordinates: ", rev_coords)
for line_coord_pair in rev_coords:
top_side = line_coord_pair[1]-half_thickness
if top_side < 0:
top_side = 0
polygon_coords.append([line_coord_pair[0], top_side])
for line_coord_pair in line_coords:
bottom_side = line_coord_pair[1]+half_thickness
polygon_coords.append([line_coord_pair[0], bottom_side])
# print(polygon_coords)
return polygon_coords
else:
return False | 910ceb5be625dde6125cf039cb3e5abc5ae18b0e | 97,802 |
def get_minimum_weight(G, S, T):
"""
Given a networkx graph G and two subsets of vertices S and T, this method returns the minimum weight of an edge e,
having one endpoint in S and the other in T
:param G: A networkx graph
:param S: A subset of vertices in G
:param T: A subset of vertices in G
:return: The minimum weight of an edge crossing the cut (S, T)
"""
min_weight = float('inf')
for u in S:
for v in T:
if G.has_edge(u, v):
if "weight" in G[u][v].keys():
min_weight = min(min_weight, G[u][v]["weight"])
else:
min_weight = min(min_weight, 1)
return min_weight | 7f8ee72a550fde446b1ee9473e55244403afa3e7 | 97,821 |
def find_parent(root, child):
"""
Find the parent element of the specified @child node, recurisvely searching through @root.
Args:
root (ET.Element): Root of the xml element tree to start recursively searching through.
child (ET.Element): Child element whose parent is to be found
Returns:
None or ET.Element: Matching parent if found, else None
"""
# Iterate through children (DFS), if the correct child element is found, then return the current root as the parent
for r in root:
if r == child:
return root
parent = find_parent(root=r, child=child)
if parent is not None:
return parent
# If we get here, we didn't find anything ):
return None | a4e1816f5a98eefa67a0c8bbb1c9210990ea115d | 97,822 |
def bin2x2(arr):
"""Bin 2-d ``arr`` in 2x2 blocks. Requires that ``arr`` has even shape sizes"""
shape = (arr.shape[0] // 2, 2, arr.shape[1] // 2, 2)
return arr.reshape(shape).sum(-1).sum(1) | 1568387a680ec163e514886d16a9676175ccdb81 | 97,825 |
def _tuple_to_str(value: tuple) -> str:
""" returns a tuple as a string without parentheses """
return ','.join(map(str, value)) | fcdda680f043799595d23fac3c916167f6b66d3f | 97,832 |
def make_section_str(str="", width=60, symbol='-'):
"""Generate string for starting/ending sections of log messages"""
strlen = len(str)
dash_len = width - strlen if strlen < width else 0
side_dash = symbol * (dash_len // 2)
section_str = "{}{}{}".format(side_dash, str, side_dash)
return section_str | bde2d367eadd45e8b45a19792ad310e3c919e36a | 97,835 |
def reset_exchange_msg(exchange_msg, variable_slot, new_data, scope):
"""Reset scope info in exchange_msg."""
exchange_msg[variable_slot][scope] = new_data
return exchange_msg | ead84ab2300d84267ca6214c67ccb4672d6cc4e5 | 97,836 |
import math
def Percentile(data, percentile):
"""Find a percentile of a list of values.
Parameters:
data: A sorted list of values.
percentile: The percentile to look up, from 0.0 to 1.0.
Returns:
The percentile.
Raises:
ValueError: If data is empty.
"""
if not data:
raise ValueError()
k = (len(data) - 1) * percentile
f = math.floor(k)
c = math.ceil(k)
if f == c:
return data[int(k)]
return data[int(f)] * (c - k) + data[int(c)] * (k - f) | 5514958f0a71050d000f9da7dcf9f31d46d5cfb1 | 97,837 |
def localE(x, label, l):
"""
Parmeters:
x The spin configuration on the Buckyball lattice.
label The one chosen site of the Buckyball lattice.
l The Buckyball lattice nearest node labels of one chosen site.
Returns:
E The local energy on one site of the Buckyball lattice.
"""
E = x[label]*x[int(l[label,0])]+x[label]*x[int(l[label,1])]+x[label]*x[int(l[label,2])]
return E | f8da46507914d33705d3766a8f69bf3e9f4de910 | 97,843 |
def _text_of_first_tag(dom, tag):
"""Returns the text inside the first tag of the dom object.
Args:
dom: The dom object.
tag: The tag name.
Returns:
A string.
Raises:
ValueError: If dom object doesn't contain the specified tag or if the
first tag doesn't have a text.
"""
tags = dom.getElementsByTagName(tag)
# Tag not found.
if len(tags) == 0 or tags[0].firstChild is None:
raise ValueError('No tag {} found'.format(tag))
# No text in first tag.
if tags[0].firstChild is None:
raise ValueError('No text in tag {} found'.format(tag))
return dom.getElementsByTagName(tag)[0].firstChild.nodeValue | 81a21dafc6db4bc14faba1b03d9ea09b91426773 | 97,848 |
import logging
import time
def stopwatch(func):
""" Stopwatch Decorator.
Use this decorator on any function to measure how long it took
for the function to complete. This is in seconds, but may have fractions of a second
if the system clock provides more precision.
Notes:
This is _logged_, not printed to the Terminal
Examples:
1. How long does it take to add an item to the cart?
@stopwatch
def add_item_to_cart(py):
py.get('#add-item').click()
py.get('#added-notification').should().be_visible()
2. How long does it take to edit an item's available stock via the API
and see it change in the UI?
@stopwatch
def update_available_stock(py, item, quantity):
payload = {'item': item, 'qty': quantity}
api.items.update(payload)
py.get(f'#available-stock-{item}').should().have_text(quantity)
"""
def wrapper(*args, **kwargs):
log = logging.getLogger('driver')
start_time = time.time()
func(*args, **kwargs)
stop_time = time.time()
func_name = func.__name__
log.info(f'STOPWATCH - {func_name} took {stop_time - start_time} seconds')
return wrapper | 1664373088f7e624ac84d57781b49d2b6cd12e39 | 97,851 |
import re
def validar_rg(rg):
"""
Valida RGs, retornando apenas a string de números válida.
# RGs errados
>>> validar_rg('abcdefghi')
False
>>> validar_rg('123')
False
>>> validar_rg('')
False
>>> validar_rg(None)
False
>>> validar_rg('123456789')
False
# CPFs corretos
>>> validar_rg('293748767')
'293748767'
>>> validar_rg('29.374.876-7')
'293748767'
>>> validar_rg(' 29 374 876 7 ')
'293748767'
"""
rg = ''.join(re.findall('\d', str(rg)))
if (not rg) or (len(rg) < 9):
return False
# Pega apenas os 8 primeiros dígitos do RG e gera os 1 dígitos que faltam
inteiros = [int(digit) for digit in rg if digit.isdigit()]
novo = inteiros[:8]
r = sum([(9-i)*v for i, v in enumerate(novo)]) % 11 # type: int
if r != 10:
f = r
else:
f = 0
novo.append(f)
# Se o número gerado coincidir com o número original, é válido
if novo == inteiros:
return int(rg)
return False | 9ced6e83dd93eb4117c0a15096b3a914434da376 | 97,852 |
def make_key(*args):
"""Create the key concatenating all args with `:`.
Parameters
----------
args : str
List of parts of the key that will be converted to strings and concatenated, separated
by a `:`
Returns
-------
str
The concatenated string
"""
return u":".join(str(arg) for arg in args) | 07189ec888e2d965fc0658a4136a7200ae4ce186 | 97,854 |
def squeeze_layer(axis=1):
"""Layer for squeezing dimension along the axis."""
def init_fun(rng, input_shape):
del rng
if axis < 0:
raise ValueError("squeeze_layer: negative axis is not supported")
return (input_shape[:axis] + input_shape[(axis + 1):]), ()
def apply_fun(params, inputs, **kwargs):
del params, kwargs
return inputs.squeeze(axis)
return init_fun, apply_fun | fe79f5e5bd2e4f616c10d7374f995be8c2315547 | 97,856 |
from typing import List
import re
def dummy_get_token_indices(text: str) -> List[List[int]]:
"""Get the start/stop char indices of word boundaries.
>>> john_damascus_corinth = "Τοῦτο εἰπὼν, ᾐνίξατο αἰτίους ὄντας"
>>> indices_words = dummy_get_token_indices(text=john_damascus_corinth)
>>> indices_words[0:3]
[[0, 5], [6, 11], [13, 20]]
"""
indices_words = list()
pattern_word = re.compile(r"\w+")
for word_match in pattern_word.finditer(string=text):
idx_word_start, idx_word_stop = word_match.span()
indices_words.append([idx_word_start, idx_word_stop])
return indices_words | d7c22d4835374da70535f5c5c75f757f4eede6c1 | 97,858 |
import struct
import base64
def pack_signature(r, s):
"""Combine R&S
Note: This serialization exactly reflects de/serialization written in the
uplink backend. Modifying the way ECDSA signatures are serialized may cause
transactions sent by this SDK to be deemed invalid by the uplink node.
"""
rb = str(r)
lenrb = struct.pack(">h", len(rb))
rEnc = lenrb + rb.encode()
sb = str(s)
lensb = struct.pack(">h", len(sb))
sEnc = lensb + sb.encode()
signature = base64.b64encode('{}:{}'.format(rEnc.decode(), sEnc.decode()).encode())
return signature | 45d4426a02560b41ef8c4812dedcda4a1d1bc4d0 | 97,860 |
def _unique_names_with_namespaces(item):
"""
Compute the unique key for the given (namespaced) item within a single
collection.
"""
return (item.metadata.name, item.metadata.namespace) | dfe122954eca231ffc80161f2dd8f8fbb4551806 | 97,863 |
def xor(a, b):
"""Computes the exclusive or between two blocks of 16 bytes"""
s = []
for i in range(0, 16):
s.append(a[i] ^ b[i])
return s | ae5adba23df1b979ac6d69c436e6dd1ec527ab6a | 97,864 |
from typing import List
def slices(series: str, length: int) -> List[str]:
"""
:param series: str: A sequence of integers
:param length: int: Length of sub-sequences to be returned
:return: List[str]: A list of all sub-series of length 'length' of the given series
"""
# Check if given length is non-zero and less than (or equal) the length of string
if 0 < length <= len(series) and series:
return [series[start:(start + length)] for start in range(0, len(series) - length + 1)]
raise ValueError("Invalid inputs for slicing the series !!") | 68fbb342c543839f2e921e4ddd1144a59352753f | 97,870 |
def flatten_fos(row):
"""Flatten field of study info into a list
Args:
row (dict): Row of article data containing a field of study field
Returns:
fields_of_study (list): Flat list of fields of study
"""
return [f for fields in row['fields_of_study']['nodes']
for f in fields if f != []] | 6b028cfd0bd5ec68f3bfd1d76fcd9df813054595 | 97,874 |
def _quarter(q, on_court_width):
"""
Get the quarter 1-4 based on the starting court width at the time
"""
if on_court_width < 250:
q = 1
elif 250 <= on_court_width and on_court_width < 500:
q = 2
elif 500 <= on_court_width and on_court_width < 750:
q = 3
else:
q = 4
return q | 47368eab3ccb70da8177d08c5204f4b13bc9b208 | 97,876 |
def calc_overlap(list1, list2):
"""
Calculate how much two lists overlap percentage wise
"""
if len(list1) > 0 and len(list2) > 0:
return \
(1.0 - len(set(list1).difference(set(list2))) / len(list1)) * 100, \
(1.0 - len(set(list2).difference(set(list1))) / len(list2)) * 100
return 0, 0 | d74108eb63b4a2021fc9ce404aecf6cda2040acf | 97,877 |
import string
import random
def short_random_id() -> str:
"""
Generates a random string of 10 characters that
starts with a random letters of the ascii alphabet or '_'
and is followed by 9 randomly chosen digits, letters or '_'.
Example:
--------
>>> short_random_id()
'mcy_t5l0fp'
Returns
-------
`str`
Randomly generated 10 character string.
"""
alphabet = string.ascii_lowercase + string.digits + "_"
return "".join(random.choices(string.ascii_lowercase + "_")) + "".join(
random.choices(alphabet, k=9)
) | 2ca6663aa357faf70bd271f1d294f6275b9cf7d5 | 97,878 |
import pathlib
def get_year(image):
"""
Get the acquisition year of the image based on the filename.
Parameters
----------
image : Path
Path to tiff image.
Returns
-------
year : str
Acquistiion year as string.
"""
filepath = pathlib.Path(image)
basename = filepath.name
year = basename[0:4]
return year | 571c1365fb77645b96498d82c2d7403f18c48a5e | 97,880 |
def pelem(elem):
"""
Helper to extract text from a bs4 element
"""
if elem is not None:
return elem.text
return "" | 67bfbe2960ef49a0ba7a447f3f010c690feb7c4c | 97,881 |
def rootssq(ll):
"""Return the root of the sum of the squares of the list of values. If a
non-iterable is passed in, just return it."""
try:
return sum([ss**2 for ss in ll])**0.5
except TypeError:
pass # ll is not iterable
return ll | d6765c69f68e53f995f5ca0871f8f75625a6b744 | 97,885 |
def get_relative_path(data_path, extra_path):
"""Merge path"""
return data_path + extra_path | 3ebc36e850527375cc55edcea938472533ad6eb7 | 97,886 |
def _find_adapter(adpt_list, adpt_name):
"""
Will return the adapter in the adpt_list for the adpt_name. If it does
not exist, None will be returned.
:param adpt_list: The list of adapters to search through.
:param adpt_name: The name of the adapter to find in the list.
:return: The adapter that corresponds to the adapter name. If one is not
in the list, None is returned.
"""
for adpt in adpt_list:
if adpt.name == adpt_name:
return adpt
return None | 2c165c75253f788954b784a08ba3922a8ffca127 | 97,902 |
def gc_content(seq):
"""Return the GC content of the given sequence (e.g. the
fraction of nucleotides that are either G or C)."""
return sum(x in 'GC' for x in seq) / len(seq) | 09443759b3b0556538cf9e3972f67dc182cdf469 | 97,903 |
def session_length_filter(dataframe, session_max=600):
"""If dataset has start and stop times for a session, filters out sessions exceeding max defined length
Parameters
----------
dataframe : pandas DataFrame
DataFrame of all the records including single timestamp and session duration
session_max : int
The max length of a session in seconds to be included as a mobility candidate session
Returns
-------
filtered_dataframe : pandas DataFrame
DataFrame with all the records with a single timestamp and those with sessions shorter than defined
by `session_max`
"""
try:
dataframe['session_length'] = dataframe['session_end'] - dataframe['session_start']
mobility_sessions = dataframe['session_length'] <= session_max
filtered_dataframe = dataframe[mobility_sessions].drop(columns=['session_length']).reset_index(drop=True)
except:
filtered_dataframe = dataframe
return filtered_dataframe | 2d2965059ba5a2ce3f8866b8f94fde0b56319d58 | 97,904 |
def does_group_exist(iam, group_name):
"""Check if the group name exists.
Parameters
----------
iam: iam client, required
group_name: string, required
Returns
------
bool: returns if the group exists
"""
group_exists = False
try:
response = iam.get_group(GroupName=group_name)
if "Group" in response:
group_exists = True
except iam.exceptions.NoSuchEntityException as e:
group_exists = False
print(e)
return group_exists | a33f2097b4c1b254a761d7bb17e60ebc80378e80 | 97,905 |
from typing import Union
from pathlib import Path
def ensure_existing_dir(path: Union[str, Path]):
"""Ensure provided path exists and is a directory.
Args:
path: path to check
Returns:
Path: Path object.
NoneType: If path does not exists or is not a directory
"""
path = Path(path)
if not path.exists():
return None
if not path.is_dir():
return None
return path | 2e4fdbf0cce27e50839b70d78156eb1e5d0a76f0 | 97,912 |
import collections
def GC_partial(portion: str):
"""Manually compute GC content percentage in a DNA string, taking
ambiguous values into account (according to standard IUPAC notation).
Parameters
----------
portion : str
DNA sequence on which GC content is computed.
Returns
-------
float :
The percentage of GC in the input string.
"""
sequence_count = collections.Counter(portion)
gc = (
sum([sequence_count[i] for i in "gGcCsS"])
+ sum([sequence_count[i] for i in "DdHh"]) / 3.0
+ 2 * sum([sequence_count[i] for i in "VvBb"]) / 3.0
+ sum([sequence_count[i] for i in "NnYyRrKkMm"]) / 2.0
) / len(portion)
return 0 or 100 * gc | a8494bd9ec9d323e5fcc7d716ec069a7c40e18d1 | 97,917 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.