content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import re
def sanitize_username(username: str) -> str:
"""
Remove non-word characters from a username
:param username: The username to sanitize
:return: The sanitized username
"""
illegal_chars = r'[^\w]'
return re.sub(illegal_chars, '_', username) | 19160b9327f7b0ded761f12adf2b1c1135c152bb | 114,415 |
def keep_going(steps, num_steps, episodes, num_episodes):
"""Determine whether we've collected enough data"""
# If num_episodes is set, stop if limit reached.
if num_episodes and episodes >= num_episodes:
return False
# If num_steps is set, stop if limit reached.
elif num_steps and steps >= num_steps:
return False
# Otherwise, keep going.
return True | 2ebace009e4cbc949ba2e826956a5d02a3d3a8c0 | 114,417 |
import MySQLdb
def guess_type(m):
"""
Returns fieldtype depending on the MySQLdb Description
"""
if m in MySQLdb.NUMBER:
return 'Currency'
elif m in MySQLdb.DATE:
return 'Date'
else:
return 'Data' | 771b1c2d61214d0f376c837f93b0c452be577806 | 114,418 |
def get_new_corners(bb1,bb2):
"""returns tuple of corners describing box bounding two bounding boxes"""
(ux1,uy1),(lx1,ly1) = bb1
(ux2,uy2),(lx2,ly2) = bb2
ul_corner = min(ux1,ux2),min(uy1,uy2)
lr_corner = max(lx1,lx2),max(ly1,ly2)
return ul_corner,lr_corner | 7fe2e6e758f5637f0ba6f27bcfec308788161332 | 114,421 |
def first(value):
"""Returns the first item in a list."""
try:
return value[0]
except IndexError:
return '' | f80f20d484c8b2d227790a780a324dd6f08b6f40 | 114,422 |
from typing import List
def _update_velocities(v: List[float],
new_a: List[float],
a: List[float],
dt: float
) -> None:
"""Update velocities using current and new accelerations in place"""
n_particles = len(v) // 3
for i in range(n_particles):
for k in range(3): # x, y, z
idx = 3 * i + k
v[idx] += (new_a[idx] + a[idx]) * (dt / 2.0)
return None | 5f8b95a24487faffd69c52ad8cddecd21294b0e2 | 114,423 |
def _var(ctor, default, callback=None):
"""
Creates a Tkinter variable, initialize it and possibly trace it.
@param default the variable initial value
@param callback function to invoke whenever the variable changes its value
@return the created variable
"""
var = ctor()
var.set(default)
if callback:
var.trace('w', callback)
return var | 7fbe57c7013cb73d0f4cbafc83b823f8ecf4df87 | 114,429 |
import torch
def _masked_loss_func(output, masks, samples):
""" Computes the MSE loss.
:param output: tensor(Float); imputed samples
:param masks: tensor(Float); corresponding masks
:param samples: tensor(Float); original samples (with missing values)
:return: tensor(Float); loss obtained
"""
if masks is None:
masks = torch.zeros(samples.shape)
mse_loss = torch.sum((~masks.bool() * samples - ~masks.bool() * output) ** 2)
if torch.sum(1 - masks) > 0:
mse_loss /= torch.sum(1 - masks)
return mse_loss | 2168991a8f79f2ed82b948bb508ef310de7236d9 | 114,431 |
def nations_from_str(nations, home, identifier):
"""Determine the nations of identified by ``qualifier``.
Args:
nations (set): Set of all possible nations.
home (str): String denoting the home nations.
identifier (str): String qualifying a country or a
group of countries eg: ``Entrants``.
Returns:
Set of identified nations.
Raises:
ValueError if ``identifier`` does not represent
recognized nations.
"""
if identifier == 'Entrants':
return nations
if identifier == 'Foreigners':
identified_nations = nations.copy()
identified_nations.discard(home)
return identified_nations
if identifier not in nations:
raise ValueError(f'{identifier} is not a recognized country')
return {identifier} | d98cf011b9bcf8e99f29afd2e04c1a3e4f733910 | 114,435 |
import re
def _parse_openssl_text(text, data_type='Data', data_name='data'):
"""Parse OpenSSL text certificate entry."""
m = re.search(r'^\s*{0}(.*)$'.format(re.escape(data_type)), text, flags=re.MULTILINE)
if m is None:
raise Exception('Cannot find "{data_type}" in {data_name}!'.format(data_name=data_name, data_type=data_type))
return m.group(1).strip() | 54ba8c2080e56db7ce398e923d0cd6cdf4909fe3 | 114,437 |
def has_flag(flag, cmd):
"""Return true if a cmd contains a flag or false if not."""
return bool(next((x for x in cmd if x.startswith(flag)), False)) | 595deb46ba064d9accea47347fe0a80fdeac9511 | 114,440 |
def get_ref_spec(spec='LIOx'):
"""
Store of reference species for families
Parameters
----------
spec (str): species/tracer/variable name
Returns
-------
ref_spec (str) reference species for a given family
Notes
-----
This is for use in conbination with functions that calculate relative values
(e.g. in units of Ox, I, etc)
"""
d = {
'Cly': 'Cl',
'Cl': 'Cl',
'LOx': 'O3',
'POx': 'O3',
'LOX': 'O3',
'POX': 'O3',
'LIOx': 'I',
'PIOx': 'I',
'PClOx': 'Cl',
'LClOx': 'Cl',
'PClOxI': 'Cl',
'LClOxI': 'Cl',
'PClOxII': 'Cl',
'LClOxII': 'Cl',
'PClOxI': 'Cl',
'LCI': 'Cl',
'LCII': 'Cl',
'PBrOx': 'Br',
'LBrOx': 'Br',
'Br': 'Br',
'Bry': 'Br',
'I': 'I',
'Iy': 'I',
'IxOy': 'I',
'SO4': 'S',
'NIT': 'N',
'NH4': 'N',
}
return d[spec] | 189f9edcddd9ff1e6ff38b4b1e46c09ae7d2800d | 114,446 |
def is_iterable(obj):
"""
Test if an object is iterable.
:param obj: The object to test.
:return: True if the object is iterable, False otherwise.
"""
# noinspection PyBroadException
try:
_ = iter(obj)
return True
except:
return False | 9427fe56b8a354f12c9b0773a4ba434578265bae | 114,447 |
def volume_change_fusion_calc(density_liquid, density_solid, molar_mass):
"""Calculates the molar volume change during fusion based on density values"""
return (1/density_liquid - 1/density_solid) * molar_mass | d98f3019fb9ab62a0902fa6296c2f5b3bea34276 | 114,453 |
import torch
def _float_from_bool(a):
"""
Since pytorch only supports matrix multiplication on float,
IoU computations are done using floating point types.
This function binarizes the input (positive to True and
nonpositive to False), and converts from bool to float.
If the data is already a floating-point type, it leaves
it keeps the same type; otherwise it uses float.
"""
if a.dtype == torch.bool:
return a.float()
if a.dtype.is_floating_point:
return a.sign().clamp_(0)
return (a > 0).float() | 3a16532903a44976a9dc036615c4887268096206 | 114,454 |
def _construct_ws_obj_ver(wsid, objid, ver, is_public=False):
"""Test helper to create a ws_object_version vertex."""
return {
"_key": f"{wsid}:{objid}:{ver}",
"workspace_id": wsid,
"object_id": objid,
"version": ver,
"name": f"obj_name{objid}",
"hash": "xyz",
"size": 100,
"epoch": 0,
"deleted": False,
"is_public": is_public,
} | ae9b5a66a5fb73a466f18c07ff71f395cf53f704 | 114,461 |
def getMinScore(queryText: str) -> int:
"""
Returns the minimum score for a queryText, based on the length.
If we scale the min_score by the length of the section, we may be able to use 'max' score_mode always.
Minimum text length to get > 20 score in the 'max' score_mode is ~ 340 characters
See, e.g. `constants.misc_civil_rights` (section 9 of 117hr5ih)
Args:
queryText (str): The text of the query.
Returns:
int: minimum score
"""
length = len(queryText)
if length < 500:
return 20
elif length < 1000:
return 40
elif length < 1500:
return 50
else:
return 60 | 84b3673975f0c65c70cef6b8de2fabb79e1f24f3 | 114,462 |
import pickle
def _load_dictionary(saved_location):
"""
Loads a saved dictionary from a given location
"""
with open(saved_location, "rb") as f:
word_dict = pickle.load(f)
with open(saved_location[:-4] + ".txt", "r") as f:
max_len = int(f.read())
return word_dict, max_len | ad1048fd7ceb81b738f48ee8c2c33c3d1228cc4d | 114,463 |
import math
def is_pentagonal(number):
"""
Check given number to be a pentagonal number.
:param number: value to be checked to be a pentagonal number.
:returns: True when given value is a pentagonal number
see http://en.wikipedia.org/wiki/Pentagonal_number
>>> is_pentagonal(12)
True
>>> is_pentagonal(13)
False
"""
value = (math.sqrt(24 * number + 1) + 1) / 6.0
return value > 0.0 and value == int(value) | 3fec5a5f452eb264f757ca9593a3827f926bdce4 | 114,464 |
def get_offer_floor(html_parser):
"""
This method returns the floor on which the apartment is located.
:param html_parser: a BeautifulSoup object
:rtype: string
:return: The floor number
"""
floor_number_raw_data = html_parser.find(class_="param_floor_no")
floor = ""
if hasattr(floor_number_raw_data, 'strong'):
floor = floor_number_raw_data.strong.text
return '0' if floor == "parter" else floor | a42ce5036870561836cbd86cc0aa3acfd3db6ad9 | 114,465 |
def get_channel_for_utt(flist, ch, utt):
""" Returns a specific channel for one utterance.
Raises a KeyError if the channel does not exist
:param flist: A dict representing a file list, i.e. the keys are the utt ids
:param ch: The channel to fetch (i.e. X/CH1). Separator must be `/`
:param utt: The utterance to fetch
:return: Path to the file
"""
val = flist[utt]
for branch in ch.split('/'):
if branch in val:
val = val[branch]
else:
raise KeyError('No channel {} for {}'.format(ch, utt))
return val | ba0351e9970dcf2c3ed84ce7e2888e647b8955db | 114,466 |
import requests
def _get_charts(url):
"""Retrieves the current UK Top 40 Charts"""
response = requests.get(url).json()
data = response['entries']
return data | 176a07ac0d0beef6b7440103e8a86390e0597e54 | 114,467 |
def get_open_network_transactions(session, NetworkTransaction, confirmation_threshold):
"""Get list of transaction_type, txid of transactions we need to check."""
ntxs = session.query(NetworkTransaction).filter(NetworkTransaction.confirmations < confirmation_threshold, NetworkTransaction.txid != None) # noqa
return [(ntx.transaction_type, ntx.txid) for ntx in ntxs] | e84d6e78936a851bfc8613d93f547c4f27b11564 | 114,472 |
def _raw_misc_to_dict(raw):
"""
Converts text-form of misc to a dictionary.
misc will be stored in the form ["(key1, val1)", "(key2, val2)",...]
"""
ret = {}
for elem in raw:
key, _, val = elem.partition(',')
key = key[1:].strip()
val = val[:-1].strip()
ret[key] = val
return ret | d3b0889f6bf670c4b1f6cd33938aa9a2e555b0b2 | 114,475 |
def is_permutation(string_1, string_2):
"""Determine if string one is a permutation of string 2."""
return sorted(string_1) == sorted(string_2) | 1bf39851b0a9d1bbbc2b4a8d1c8e6a54d76a114d | 114,476 |
from typing import List
def list_squares(n: int) -> List[int]:
"""Generates a list of squares of numbers from 0 to n using list comprehension.
doctests:
>>> list_squares(2)
[0, 1, 4]
>>> list_squares(5)
[0, 1, 4, 9, 16, 25]
"""
x = [i ** 2 for i in range(0, n + 1)]
return x | e88dc5807fb81a39d48f92bae5d8cb692769861c | 114,480 |
import math
def buckling_factor_m_n(
D11, D12, D22, D66, mode_m=1, mode_n=1,
N_x=0, N_y=0, length_x=1, length_y=1):
"""
returns the buckling factor of a simply-supported orthotropic laminate
for a specific buckling mode, based on out-of-plane stiffnesses
INPUT
- D11, D12, D22 and D66: out-of-plane stifnesses
- length_x: plate dimensions (x-direction)
- length_y: plate dimensions (y-direction)
- N_x: compressive loading intensity in the x-direction
- N_y: compressive loading intensity in the x-direction
- mode_m: buckling mode (number of half-waves) in the x direction
- mode_n: buckling mode (number of half-waves) in the y direction
"""
alpha = (mode_m/length_x)**2
beta = (mode_n/length_y)**2
return (math.pi**2)*(D11*alpha**2 \
+ 2*(D12 + 2*D66)*alpha*beta \
+ D22*beta**2)/(alpha*abs(N_x) + beta*abs(N_y)) | 40e803351daa63e6d6154c0e07a0070d2bd3aeae | 114,485 |
def remove_id(string):
"""
Removes "minecraft:" at the beginning of a string
"""
assert string.startswith("minecraft:"), string
return string[len("minecraft:"):] | 8efcdaa10f499fe321d5edac714c474af2614e8a | 114,490 |
from pathlib import Path
from datetime import datetime
def create_date_path(root: Path, date: datetime) -> Path:
"""Create path form a root path and a date."""
return root / Path(str(date.year)) / Path(str(date.month)) / Path(str(date.day)) | 425558bad2167188de601c0a0e1755deb9b41733 | 114,492 |
def smoothed_estimate(n_x: int, N: int, d: int) -> float:
"""Estimates with 1-Laplace smoothing the probability of a category from a multinomial distribution.
Args:
n_x (int): The count of some outcome "x" among ``N`` trials.
SHOULD be non-negative.
SHOULD be no greater than ``N``.
N (int): The count of trials.
SHOULD be non-negative.
d (int): The count of distinct possible outcomes.
(i.e. the dimensionality of the distribution)
SHOULD be positive.
Returns:
float: The estimated probability with 1-Laplace smoothing of some outcome "x".
"""
return (n_x + 1) / (N + d) | 99cd29e0e97454c32726b39a6fbac6730330c752 | 114,494 |
def check_parameter(data, parameter, min_length=None, max_length=None):
"""
Check if the given parameter is the the data request. If max or min parameters
are present, it checks for the parameter length
:param data: Dict containing all the request data
:param parameter: Key to search for
:param min_length: Optional, min length of the parameter
:param max_length: Optional, max length of the parameter
:return:
"""
if parameter not in data:
return False
if min_length is not None and len(data[parameter]) < min_length:
return False
if max_length is not None and len(data[parameter]) > max_length:
return False
return True | 7c2eade9fce6fa69c0a8b40d3cc0fac8042794e1 | 114,495 |
def diff(orig, new):
""" Calculates the difference between two dictionaries.
Any key with a child list or dict which is, itself, changed will be
considered changed.
:param orig: the original (unmodified) dictionary
:param new: the modified dictionary
:return: a dictionary contianing only those keys which were changed.
"""
updated = {}
for k, v in new.items():
if k not in orig:
updated[k] = v
elif isinstance(v, list):
if len(v) != len(orig[k]):
updated[k] = v
else:
has_change = False
for i in range(len(v)):
if isinstance(v[i], dict) and diff(orig[k][i], v[i]):
has_change = True
break
elif v[i] != orig[k][i]:
has_change = True
break
if has_change:
# the update needs to contain the ENTIRE new list, so
# Zoho doesn't zap non-updated elements
updated[k] = v
elif isinstance(v, dict):
if diff(v, orig[k]):
# the update needs to contain the ENTIRE new dict, so
# Zoho doesn't zap non-updated values
updated[k] = v
elif v != orig[k]:
updated[k] = v
return updated | 4769c0d8440b7c3cab5571287e42f0ce5c4276ff | 114,498 |
def kurtosis(N,N2,N3,N4,**kwargs):
"""Calculate kurtosis in data N from averages <N^4>, <N^3>, <N^2>, and <N>."""
return (-3*(N**4) + 6*(N**2)*N2 - 4*N*N3 + N4)/((N2 - (N**2))**2) | 23973a27d371beb23c6f942b0258f77fda9e1de4 | 114,502 |
import json
def _loads_regions(regions_mapping_file: str):
"""loads regions mapping file
this function is cached to avoid unnecessary disk accesses
Args:
regions_mapping_file: the filename where the json mapping file is
Return:
dict created from the json file
"""
with open(regions_mapping_file, "r") as f:
return json.load(f) | 46c50de35ad91b07d5a00bec590a3474dc210518 | 114,506 |
import requests
from bs4 import BeautifulSoup
def get_text_by_title(title):
"""Get text from Wikisource based on title.
If the text is split over several wikipages (which is the case with novels)
then the full text will not be returned, - only the index page.
Parameters
----------
title : str
Title of wikipage on Danish Wikisource.
Returns
-------
text : str or None
The text. Returns none if the page does not exist.
"""
url = 'https://da.wikisource.org/w/api.php'
params = {'page': title, 'action': 'parse', 'format': 'json'}
data = requests.get(url, params=params).json()
if 'parse' in data:
text = BeautifulSoup(data['parse']['text']['*'], "lxml").get_text()
else:
text = None
return text | 27644ac8601db9f7a5d7e3107146f16bb47a1275 | 114,507 |
def check_rank(shape, required_rank):
""" Check if the shape's rank equals the expected rank """
if isinstance(shape, tuple):
return len(shape) == required_rank
else:
return False | f38afd67c132aa713429e376a63eb0dd26dbe863 | 114,509 |
def decode_labels(y_encoded, labels):
"""
Convert autoincrementing integers back to strings.
"""
return [labels[i] for i in y_encoded] | 0668dce9d315a6b68a8517e8826249be11e73dfc | 114,514 |
def is_sequence(x):
"""Is x a sequence? We say it is if it has a __getitem__ method."""
return hasattr(x, '__getitem__') | f55a8ab63e4018eda2fac7de15fc8684245c1126 | 114,515 |
def is_empty(string, trim_spaces=True, chars=None):
""":yaql:isEmpty
Returns true if the string with removed leading and trailing chars is
empty.
:signature: string.isEmpty(trimSpaces => true, chars => null)
:receiverArg string: value to be checked for emptiness after trim
:argType string: string
:arg trimSpaces: true by default, which means string to be trimmed with
chars. false means checking whether input string is empty
:argType trimSpaces: boolean
:arg chars: symbols for trimming. null by default, which means trim is
done with whitespace characters
:argType chars: string
:returnType: boolean
.. code::
yaql> "abaab".isEmpty(chars=>"ab")
true
yaql> "aba".isEmpty(chars=>"a")
false
"""
if string is None:
return True
if trim_spaces:
string = string.strip(chars)
return not string | 11a8d3fae8b31811b589adf082ebc026618913c1 | 114,517 |
def flatten(nested_list: list) -> list:
"""
扁平化list
:param nested_list: list
:return: list
"""
res_list = []
for i in nested_list:
if isinstance(i, list):
res_list.extend(flatten(nested_list=i))
else:
res_list.append(i)
return res_list | d2909e6a397a0390be4bc4fc9d5084b0703e50d7 | 114,518 |
def format_options(options):
"""Util for serializing dhcp options
@options = [1,2,3]
>>> format_options([1, 2, 3])
'\x01\x02\x03'
"""
return "".join((chr(item) for item in options)) | 97ee015c601a26891f425ce2055cdc19eb35ddbd | 114,521 |
def square(x):
"""
Squares a value.
Equivalent to calling x**2.
Arguments:
x (int|float): The value to square.
Returns:
The squared value.
"""
return x * x | 17406728eae21ac09fb3fd0890871f550f90e445 | 114,527 |
def count_characters(db):
"""Returns the number of characters from MongoDB database.
Args:
db (pymongo.database.Database): MongoDB database
Returns:
(int) Number of characters
"""
agg_dict = [
{
"$count": "count"
}
]
agg_res = db["charactercreator.character"].aggregate(agg_dict)
return list(agg_res)[0]["count"] | bf66ebf1384bb26d850cb19a6ea52042d2ab8534 | 114,532 |
def normalised_scores(scores, turns):
"""
The per-turn normalised scores matrix
Parameters
----------
scores : list
A scores matrix (S) of the form returned by the scores function.
turns : integer
The number of turns in each round robin.
Returns
-------
list
A normalised scores matrix (N) such that:
N = S / t
where t is the total number of turns played per repetition for a given
player excluding self-interactions.
"""
nplayers = len(scores)
normalisation = turns * (nplayers - 1)
return [
[1.0 * s / normalisation for s in r] for r in scores] | 6a8e260984d7e9a2c70c794f9a887a4fac0dee6f | 114,535 |
def center_to_box(center: list) -> tuple:
"""
Convert point to small box around it.
:param center: Center point.
:return: Point bounding box.
"""
margin = 0.4
return center[0] - margin, center[1] - margin, 2 * margin, 2 * margin | 8e00612c1a5182929e0971a101f5adf8a6996aa7 | 114,537 |
def tobin(deci_num, len=32):
"""
Given a decimal number, returns a string bitfield of length = len
Example: given deci_num = 1 and len = 10, it return 0000000001
"""
bitstr = "".join(map(lambda y: str((deci_num >> y) & 1), range(len - 1, -1, -1)))
return bitstr | d505a3ad247e4c5508a97f71898617cfcfd8cca5 | 114,542 |
def norm_mac(mac):
"""Normalizes a MAC address from pypowervm format to OpenStack.
That means that the format will be converted to lower case and will
have colons added.
:param mac: A pypowervm mac address. Ex. 1234567890AB
:return: A mac that matches the standard neutron format.
Ex. 12:34:56:78:90:ab
"""
mac = mac.lower().replace(':', '')
return ':'.join(mac[i:i + 2] for i in range(0, len(mac), 2)) | e360fd1e0661dc85407e3137f6055783184d0966 | 114,543 |
def islistable(obj):
"""Return True is obj is listable."""
return isinstance(obj, (list, tuple, set)) | b76a58e1804104fdf02d450859c136adaafb23aa | 114,544 |
def convert_to_milli(val):
"""
Converts a string to milli value in int.
Useful to convert milli values from k8s.
Examples:
convert_to_milli('10m') -> 10
convert_to_milli('10000m') -> 10000
convert_to_milli('1') -> 1000
convert_to_milli('100') -> 100000
"""
if val[-1] == 'm':
return int(val.rstrip('m'))
else:
return 1000 * int(val) | 0ceff48d5e8093b0563c7c55bef7cbb11a58cc3f | 114,545 |
def fullname(obj):
"""
Returns the fully qualified class name of an object
Args:
obj: (object) the object
Returns: (str) the class name
"""
obj_type = type(obj)
return '{}.{}'.format(obj_type.__module__, obj_type.__name__) | e855c38727ad5c611b617d15db141e091e4eb0bb | 114,547 |
def q_ntu(epsilon, c_min, temp_hot_in, temp_cold_in):
"""Computes the q value for the NTU method
Args:
epsilon (int, float): The value of the effectivness for the HX.
c_min (int, float): minimum C value for NTU calculations.
temp_hot_in (int, float): Hot side inlet temeprature.
temp_cold_in (int, float): Cold side inlet temeprature.
Returns:
int, float: The value of the removal from the NTU method.
"""
return epsilon*c_min*(temp_hot_in-temp_cold_in) | 77481e584feb5b99daaf8e6a303703983cd44759 | 114,555 |
def update_gms_internet_policy_services(
self,
services: dict,
) -> bool:
"""Set a new service list used in Overlay editor's Internet Policy
section.
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - services
- POST
- /gms/services
Saving a new service list will completely replace the current
implementation. Any service IDs that were saved previously, but not
included in the POST body will be removed. These services will also
be removed from the overlay's policy list.
:param services: Dictionary of services to be set in the form
``{"SERVICE_1" : {"name":"SERVICE_NAME_1"}, ...}``
:type services: dict
:return: Returns True/False based on successful call
:rtype: bool
"""
return self._post(
"/gms/services",
data=services,
return_type="bool",
) | 9e1da7b5381934411fefd7303e6bc84aa43893f1 | 114,557 |
import pickle
def load_model(filepath):
"""Loan pickled instance of SklearnModel.
Parameters
----------
filepath : str
Path to the pickled model file.
Returns
-------
SklearnModel
Unpickled model.
"""
with open(filepath, 'rb') as f:
model = pickle.load(f)
return model | 12b3eafc87338a74da1fe1af2c6fa8eb1613c086 | 114,558 |
import json
def load_translations(translation_file):
"""Loads translations from the given file and returns them as a map"""
translations = json.load(translation_file)
for slug, translation in list(translations.items()):
del translations[slug]
translations[slug.replace('\n', '\\n')] = translation.replace('\n', '\\n')
return translations | 5c3fbb54fa8143b1b86526a35a008542bec9be70 | 114,559 |
def parse_role_links(role_links_filename):
"""
Parse TIGRFAMS_ROLE_LINK.
"""
tigrfam_role = {}
with open(role_links_filename) as f:
for line in f:
tigrfam, role = line.strip().split()
tigrfam_role[tigrfam] = role
return tigrfam_role | f37e5b250acfe7c07bf72e95ef6ce84379ee5b53 | 114,561 |
def reverse_subtoken_map(subtoken_map):
"""
Creates a map: idx --> (left_span_boundary, right_span_boundary) that effectively reverse the map created in
create_subtoken_map.
"""
reverse_map = {}
for idx, mapped_val in enumerate(subtoken_map):
if mapped_val in reverse_map:
reverse_map[mapped_val] = (min(idx, reverse_map[mapped_val][0]), max(idx, reverse_map[mapped_val][1]))
else:
reverse_map[mapped_val] = (idx, idx)
return list(reverse_map.values()) | 3012640c7f0cbf823141d31b14222165b649350b | 114,563 |
def kw_cases(keyword):
""" Returns 3 variations on a passed keyword string: uppercase, lowercase,
and Title Case.
"""
return [keyword.upper(), keyword.lower(), keyword.title()] | 5bc160f92db1a39cc8738c5b81dbc1086e005fe0 | 114,569 |
def get_counts_by_tract(mf_with_tract, output_col_name):
"""Return a dataframe containing the the mortgage foreclosure counts by Census Tract by year.
Parameters
----------
mf_with_parcels : pandas df
Mortgage foreclosure data, where each row represents a foreclosed property and its parcel
ID
Returns
-------
pandas df
Dataframe with columns ['year', 'GEOID', 'num_mortgage_foreclosures']
"""
# Get the counts by census tract by year
counts_by_ct_by_year = mf_with_tract.groupby(["year", "GEOID"]).size().to_frame('num').reset_index()
# Reset index to add year column & clean up
mf_counts_by_ct = (
counts_by_ct_by_year
.rename(columns={"num": output_col_name},) # "num_mortgage_foreclosures" or "lien-foreclosures"
)
return mf_counts_by_ct | e4d204889ab67e28e1abf90cc0618fd23fedaef7 | 114,573 |
def balanced_blockquotes(str):
"""
Check if blockquotes are balanced
Parameters
----------
str : str
text
Returns
-------
True if blockquotes are balanced, False otherwise
"""
num_blockquotes = str.count("```")
# balanced if even number of blockquotes
return (num_blockquotes % 2) == 0 | a1fdf1678c8322ab2d5fe7a4253557d1bdcb8a8a | 114,581 |
import time
def get_nth_day(day_name, n, year, month):
"""Get Nth occurence of day named <blah> in some month. If n = -1, return the last one.
Args:
day_name (str): name of day, full and capitalized, e.g. "Thursday"
n (int): Nth occurrence of day_name (1 = first, -1 = last)
year (int): 4-digit year
month (int): integer month (1 = Jan, 2 = Feb, etc.)
Returns:
jday (int): Julian date
"""
days_matched = []
for d in range(1, 32):
try:
dt = time.strptime("{} {} {}".format(year, month, d), "%Y %m %d")
dname = time.strftime("%A", dt)
if dname == day_name:
j = int(time.strftime("%j", dt))
days_matched.append(j)
except ValueError:
pass
if n == -1:
return days_matched[-1]
else:
return days_matched[n-1] | e1308451cd9337741023fa76249158371e64e0d1 | 114,582 |
def detect_and_decorate(decorator, args, kwargs):
"""
Helper for applying a decorator when it is applied directly, and also
applying it when it is given arguments and then applied to a function.
"""
# special behavior when invoked with only one non-keyword argument: act as
# a normal decorator, decorating and returning that argument with
# click.option
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return decorator(args[0])
# if we're not doing that, we should see no positional args
# the alternative behavior is to fall through and discard *args, but this
# will probably confuse someone in the future when their arguments are
# silently discarded
elif len(args) != 0:
raise ValueError("this decorator cannot take positional args")
# final case: got 0 or more kwargs, no positionals
# do the function-which-returns-a-decorator dance to produce a
# new decorator based on the arguments given
else:
def inner_decorator(f):
return decorator(f, **kwargs)
return inner_decorator | 728451ca0675dc7b2ec4b388dda60076e3154761 | 114,585 |
def get_face_roi_size(feature: list):
"""
Get face patch size
:param feature: list of features
:return: face patch size
"""
return list(feature[11]) | dc6f370587ee2fdb347ac71910253d33d79fcc6e | 114,587 |
from typing import Dict
from typing import OrderedDict
def gather_subtasks(d: Dict) -> OrderedDict:
"""Declare subtasks and log messages in order
Args:
d (Dict): a dict-like object mapping subtasks to log messages
Returns:
OrderedDict: map tasks to log messages in order of execution
"""
if not all(isinstance(k, str) for k in d.keys()):
raise TypeError("Subtasks must be str")
if not all(isinstance(v, str) for v in d.values() if v is not None):
raise TypeError("Subtask log messages must be str or NoneType")
return OrderedDict(**d) | ac172dd2ed3fa24c82ec0b65642f75859b4faaf3 | 114,592 |
from pathlib import Path
def list_source_files(base_dir: Path) -> list[Path]:
"""List source files.
Args:
base_dir (Path): Base directory.
Returns:
list[Path]: Paths of source files.
"""
files = []
for child in base_dir.iterdir():
if child.is_dir():
files = files + list_source_files(child)
elif child.is_file() and str(child.suffix) == ".cpp":
files = files + [child]
return files | 7199550fa23b3db2526a80de610148e408b8a8cd | 114,593 |
from pathlib import Path
from typing import List
def get_acqui_id(mpath: Path, cam_folders: List[str], sync_folder: str) -> List[str]:
"""
Return the names of all acqusition ids within sync_dir across
both camera's
Parameters
----------
mpath : Path
The parent micasense data directory that contains the cam_folders
cam_folders : List[str]
A list of the camera folder names, e.g. ["red_cam", "blue_cam"]
sync_folder : str
The name of the "SYNCXXXXSET" folder
Returns
-------
acqi_ids : List[str]
List of acquisition id's, e.g.
["0000", "0001", "0002", ....., "XXXX"]
"""
acqi_ids = []
for cam in cam_folders:
# cam = "red_cam" or "blue_cam"
for f in (mpath / cam / sync_folder).glob("**/*.tif"):
acqi_ids.append(f.stem.split("_")[1])
return sorted(list(set(acqi_ids))) | d1a1fac85c75060855ccd4d9dabe0b5dcdf8197a | 114,600 |
def identisch(a, b):
""" Teste ob Werte identisch sind, berücksichtige Rundungsfehler
:param a: Wert 1
:type a: float
:param b: Wert 2
:type b: fload
:return: ist identisch
:rtype: bool
"""
return abs(a-b) < 0.003 | ac9a8024d924821f71f824f6bbb44f5a83c9e16c | 114,601 |
def inverse_filter_dict(dictionary, keys):
"""Filter a dictionary by any keys not given.
Args:
dictionary (dict): Dictionary.
keys (iterable): Iterable containing data type(s) for valid dict key.
Return:
dict: Filtered dictionary.
"""
return {key: val for key, val in dictionary.items() if key not in keys} | d934dd17eed2b409a89dd42c3f037e0a8d97ec7b | 114,607 |
import textwrap
def wrap(text):
"""Wraps the given text into multiple lines."""
return "\n".join(textwrap.wrap(text, 50)) | 1523879570a7bf2c270695664cbe5f649cb8715c | 114,613 |
def check_list_or_object(_list: list, _class: type):
"""
Checks if _list parameter is a list of the same type as _class, or if it is a single value of that type.
:param _list: List or single value to be checked.
:param _class: Type of object desired.
:return: List of values of the type _class.
"""
if _list:
if isinstance(_list, (list, tuple)):
if all([isinstance(obj, _class) for obj in _list]):
return _list
else:
raise TypeError("Object in argument list is not a {0}!".format(_class.__name__))
else:
if isinstance(_list, _class):
return [_list]
else:
raise TypeError("Argument used is not a {0}!".format(_class.__name__))
else:
return [] | 607effe166b1b9f80644fa9e296514f1013e04d2 | 114,615 |
def by_residue(atoms):
"""Iterate over atoms by residue, returning a list of list of atoms."""
residues = []
current = []
lastid = None
lastname = None
lastchain = None
for atom in atoms:
if atom.resid != lastid or atom.resname != lastname or \
atom.chain != lastchain:
if len(current) != 0:
residues.append(current)
current = [atom]
lastid = atom.resid
lastname = atom.resname
lastchain = atom.chain
else:
current.append(atom)
if len(current) != 0:
residues.append(current)
return residues | 0289876f01e5a4425485901762afce9f1f9024b3 | 114,619 |
def get_repository_name(repository) -> str:
"""Return the name of the repository for use in the frontend."""
name = None
if repository.repository_manifest.name:
name = repository.repository_manifest.name
else:
name = repository.data.full_name.split("/")[-1]
name = name.replace("-", " ").replace("_", " ").strip()
if name.isupper():
return name
return name.title() | 402713e7d433442e26f49d01d8dd13bd5a095514 | 114,620 |
def get_channel_info(slack_client, channel_id):
"""
Return the channel info of a given channel ID.
"""
response = slack_client.api_call("channels.info",
channel=channel_id)
return response | a2eb7a46214cfe2c57bd03251c2b1c555fecb46b | 114,623 |
import re
def validate_device_name(device_name):
"""Validates a given device name. Allowed characters are A-Z, a-z, _, - and 0-9. Minimum is
one character, maximum are 64 characters. Removes all leading and trailing spaces before
validation.
Args:
device_name: Device name to validate as string.
Returns:
True if the given string is an allowed device name, False if not.
"""
if device_name is not None:
search = re.compile(r'^[a-zA-Z_\-\d]{1,64}$').search
return bool(search(device_name.strip()))
return False | edac53312261d529b6bd85fab7c505fb883a8de7 | 114,631 |
def path_to_points(path, num_points, closed, reverse):
"""Convert SVG path to a 2D point list. Provide path, number of points,
and whether or not this is a closed path (loop). For closed loops,
the size of the point list returned is one element larger than the
number of points passed, and the first and last elements will
coincide."""
points = []
num_points = max(num_points, 2)
if closed:
div = float(num_points)
else:
div = float(num_points - 1)
for point_num in range(num_points):
if reverse:
point = path.point(1.0 - point_num / div, error=1e-5)
else:
point = path.point(point_num / div, error=1e-5)
points.append((point.real, point.imag))
if closed:
points.append(points[0])
return points | 3504d10c9b689701e7f0b9ae2f42ddbc63aed7eb | 114,632 |
def local_to_timestamp(dt, tz):
"""Convert a local time, assumed in timezone tz, into a milisecond UTC timestamp"""
return int(dt.replace(tzinfo=tz).timestamp() * 1000) | ae9d6e3879d57a578e770fa4b1628b9a8b06d350 | 114,634 |
def isabs(s):
# type: (str) -> bool
"""Test whether a path is absolute"""
return s.startswith('/') | 6d7eefc8d37375898e7cfd4b99c2ed2f1c6a2bb8 | 114,636 |
import requests
def get_currency_rates(base):
"""
Downloads data for selected base currency from ExchangeRatesAPI.
Parameters
----------
base : str
Currency code in 3-letter code format or as symbol.
Returns
-------
dict
Conversion rates for selected bsae currency.
"""
URL = "https://api.exchangeratesapi.io/latest?base={}".format(base)
try:
response = requests.get(URL)
response.raise_for_status()
return response.json()["rates"]
except requests.exceptions.HTTPError as err:
print("Http Error: ", err)
except requests.exceptions.ConnectionError as errc:
print("Connection Error: ", errc)
except requests.exceptions.Timeout as errt:
print("Timeout Error: ", errt)
except requests.exceptions.RequestException as e:
print("Oops: ", e) | a067d5c2e767c48f6a70fc8bf21b7fca0c9bc708 | 114,638 |
import re
def parse_asn(asn):
"""Parse a string containing an autonomous system number, returning the
autonomous system number as int."""
try:
return int(asn)
except ValueError:
pass
m = re.match("[aA][sS]([0-9]+)$", asn)
if m:
return int(m[1]) | ce5a07542729ef0a347d7f21182078afc8757952 | 114,641 |
from pathlib import Path
def find_artefacts(base_dir, type="models", target_file="model.py"):
"""
Find a Mantra artefacts directory
:param base_dir: Base directory to look at
:param type: Can be `models`, `data`, `tasks`
:param target_file: File use to identify the artefact
:return:
"""
base_path = Path(base_dir, type)
all_artefacts = []
if base_path.exists():
subdirs = [p for p in base_path.iterdir() if p.is_dir()]
while len(subdirs) > 0:
subdir = subdirs.pop()
subdir_files = [p for p in subdir.iterdir()]
# found the target file in this subdir!
if target_file in [p.name for p in subdir_files]:
all_artefacts.append(str(subdir))
else:
# go into further subdirs
subdirs.extend([p for p in subdir.iterdir() if p.is_dir()])
return all_artefacts | 646f51444732bd4fd01f7817cf78498ec9c6ed4c | 114,645 |
def mySub(a, b):
"""
Find the absolute difference between the two inputs
Parameters
----------
a : int or float
First number
b : int or float
Second number
Returns
-------
int or float
Returns the difference between the two inputs
"""
diff = 0
if a>b:
diff = a-b
elif b>a:
diff = b-a
else:
diff = 0
return diff | 894c9081fdc735b46972dd8ed497e5899f8815ad | 114,646 |
from datetime import datetime
def date_to_datetime(d):
"""
Convert a date to a datetime.
"""
return datetime(d.year, d.month, d.day) | 398d5918fffab35f77018e6b5afdd4bf46e98eff | 114,647 |
import base64
def encode(data):
"""encode given data as base64"""
return base64.b64encode(data) | c141bdcc3afa1f2244c802e7d6982a0cdfd870bb | 114,648 |
def getErrorRates(data, thresh):
"""Returns the true positive (detection or recall rate), false positive,
true negative, false negative, and precision rates, given some data and a threshold.
Precision is pos/(pos+fpos).
The data should be pairs of (value, label) pairs. For sweeping a threshold over a set of values,
use the getAllErrorRates function instead."""
# compute counts
pos = sum(1.0 for c, l in data if c > thresh and l > 0)
fpos = sum(1.0 for c, l in data if c > thresh and l <= 0)
neg = sum(1.0 for c, l in data if c <= thresh and l <= 0)
fneg = sum(1.0 for c, l in data if c <= thresh and l > 0)
npos = sum(1.0 for c, l in data if l > 0)
nneg = sum(1.0 for c, l in data if l <= 0)
# normalize values
try:
precision = pos/(pos+fpos)
except ZeroDivisionError:
precision = 0.0
pos /= npos
fpos /= nneg
neg /= nneg
fneg /= npos
return (pos, fpos, neg, fneg, precision) | 29261ce90cdc719f8fc15b9247d15bcabcad495f | 114,649 |
def generate_routes(ip_set, v4_only=False):
"""Generate route lines for vpn configuration."""
lines = []
for ip in ip_set.iter_cidrs():
if ip.version == 4:
# route network netmask gateway metric
lines.append(f"route {ip.network} {ip.netmask} vpn_gateway default")
elif ip.version == 6 and not v4_only:
# route network/bits gateway metric
lines.append(f"route-ipv6 {ip} default default")
return lines | 54b115b7a60552f96da74472e5fa435c7f6e92bd | 114,652 |
def create_annotationlist_id(manifest_info, canvas_id, annolist_idx, opts):
"""
Return (uri, filename) for annotation list
"""
prefix = opts['url_prefix']
if not prefix:
# use manifest id as prefix
prefix = manifest_info['id']
scheme = opts['annolist_name_scheme']
if scheme == 'canvas':
# use last part of canvas id
canvas_part = canvas_id.split('/')[-1]
fn = canvas_part + '-annolist.json'
uri = prefix + '/' + fn
else:
fn = f"annolist-{annolist_idx}.json"
uri = prefix + '/' + fn
return uri, fn | 7913611623c1f598d37e2f2f347412c61d186e4e | 114,654 |
def _get_answer(context, context_words, answer_start, answer_end):
"""Get answer given context, context_words, and span.
Args:
context: A list of bytes, to be decoded with utf-8.
context_words: A list of a list of bytes, to be decoded with utf-8.
answer_start: An int for answer start.
answer_end: An int for answer end.
Returns:
A list of bytes, encoded with utf-8, for the answer.
"""
context = context.decode('utf-8')
context_words = [word.decode('utf-8') for word in context_words]
pos = 0
answer_start_char = None
answer_end_char = None
for i, word in enumerate(context_words):
pos = context.index(word, pos)
if answer_start == i:
answer_start_char = pos
pos += len(word)
if answer_end == i:
answer_end_char = pos
break
assert answer_start_char is not None, (
'`answer_start` is not found in context. '
'context=`%s`, context_words=`%r`, '
'answer_start=%d, answer_end=%d') % (context, context_words, answer_start,
answer_end)
assert answer_end_char is not None, (
'`answer_end` is not found in context. '
'context=`%s`, context_words=`%r`, '
'answer_start=%d, answer_end=%d') % (context, context_words, answer_start,
answer_end)
answer = context[answer_start_char:answer_end_char].encode('utf-8')
return answer | e5cee4407bd453bf178609cfe578d79bcb8f4ca5 | 114,655 |
def get_base(node):
"""Return current node's basal node in tree
Parameters
----------
node : skbio.TreeNode
query node
Returns
-------
skbio.TreeNode
basal node of query node
Notes
-----
A "basal" node is defined as a child node of root.
See Also
--------
scikit-bio's `root` function
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(['(((a,b)n6,(c,d)n5)n3,((e,f)n4,g)n2)n1;'])
>>> node = tree.find('a')
>>> print(get_base(node).name)
n3
"""
if node.is_root():
raise ValueError('Root has no base.')
cnode = node
while True:
pnode = cnode.parent
if pnode.is_root():
break
cnode = pnode
return cnode | b697db7d129f4851d4032cc99ae24ab2b8441e28 | 114,657 |
import collections
def group_by(f, coll):
"""
Returns a ``dict`` of the elements of ``coll`` keyed by the result of ``f``
on each element. The value at each key will be a list of the corresponding
elements, in the order they appeared in ``coll``.
"""
groups = collections.defaultdict(list)
for e in coll:
groups[f(e)].append(e)
return dict(groups) | 3ccf4dd1b290ed7334e9b3bd3e1bb1e7902ebce4 | 114,658 |
def get(l, idx, default):
"""
Safe list get
:param l: list
:param idx: index
:param default: default value
:return:
"""
try:
return l[idx]
except IndexError:
return default | 23146f02d83685bff4ce8144e2e6ea39f4c93169 | 114,660 |
from typing import Dict
import yaml
def read_config(path: str) -> Dict:
"""Load config based on provided filepath.
Param
---------
path: str
Path to provided file.
Returns
---------
Dict
configuration file in a dictionary form.
"""
with open(path, "rb") as confile:
config = yaml.safe_load(confile)
return config | 6df96effbdf3bf39f8309994b4c44cd8f1e4f5a7 | 114,661 |
def zpn(z, c, n=2):
"""
nth order polynomial generating a beautiful set
Parameters
----------
z: Complex
scalar, vector or matrix to evaluate the function on elementwise
c: Complex
scalar, vector or matrix, the starting value(s)
"""
return z**n + c | 01cd6b19c5c8d62985ed6f06d4cc748635ef1602 | 114,664 |
def requires_special_home_display(powerline, name):
"""Returns true if the given directory name matches the home indicator and
the chosen theme should use a special home indicator display."""
return (name == '~' and powerline.theme.HOME_SPECIAL_DISPLAY) | c4e8a25dc9e805a83033549c2cdf6b8789e44fed | 114,667 |
import csv
def parse_probesets(probes_tsv):
"""
Parse probes_tsv input
"""
tracks, tnames = [], []
with open(probes_tsv) as fin:
for bedpath, tname in csv.reader(fin, delimiter='\t'):
tracks.append(bedpath)
tnames.append(tname)
return tracks, tnames | 405505fc6dec74b9359be8e03509ee7550930ac7 | 114,668 |
import six
def ensure_unicode(obj):
"""
If the input object is a string, make sure it is returned as a Unicode
string, as follows:
* If the input object already is a Unicode string, it is returned unchanged.
* If the input object is a Byte string, it is converted to a Unicode string
using the UTF-8 encoding.
* Otherwise, the input object was not a string and is returned unchanged.
"""
if isinstance(obj, six.binary_type):
return obj.decode("utf-8")
return obj | d3da80b3d62e9706786db3ffb9e11f3d7e60ab78 | 114,669 |
def _possibly_sorted(x):
"""Bypass the sorted() call in reprlib._possibly_sorted
This is mostly for ensuring reproducibility in tests.
"""
return list(x) | b7dd511299567631900260a1130ad8e9034cdf4d | 114,670 |
def three_dig_format(digit):
"""
Create string of 3 digits given any digit < 1000
"""
if digit.isdigit() and len(digit) < 4:
if len(digit) == 3:
return digit
elif len(digit) == 2:
return "0" + digit
elif len(digit) == 1:
return "00" + digit | 152a896e59c115a49b5744724bba4b897fb2544e | 114,675 |
import torch
def doUpdate(X, Y, dt):
"""
Updates the node attributes that describe each nucleotide's position (translational, rotational). Use a Euclidean update.
Each row in X contains the following entries:
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
n rx ry rz bx by bz nx ny nz vx vy vz Lx Ly Lz
Each row in Y contains the following entries:
0 1 2 3 4 5
ax ay az atx aty atz
Inputs:
X : node attribute matrix containing each nucleotide's position and orientation in shape [n_nodes, n_features]
Y : decoder output containing translational and rotational accelerations for each nucleotide in shape [n_nodes, n_state_vars]
dt : scalar giving the time step of the ground truth data
Outputs:
X_next : node attribute matrix for the next time step in shape [n_nodes, n_features]
"""
# create X_next, the node attribute matrix for the next time step
X_next = torch.zeros_like(X)
# update translational velocity (vx, vy, vz)
X_next[:, 10:13] = X[:, 10:13] + dt * Y[:, 0:3]
# update rotational velocity (Lx, Ly, Lz)
X_next[:, 13:] = X[:, 13:] + dt * Y[:, 3:]
# update translational position (rx, ry, rz)
X_next[:, 1:4] = X[:, 1:4] + dt * X_next[:, 10:13]
# update backbone base versor (bx, by, bz)
X_next[:, 4:7] = X[:, 4:7] + dt * X_next[:, 13:]
# update normal versor (nx, ny, nz)
X_next[:, 7:10] = X[:, 7:10] + dt * X_next[:, 13:]
return X_next | 750c6099a29dc2f0949bbc4601752b5e18f62d2d | 114,677 |
def parse_tuple(my_str):
"""
Parse input parameters which can be tuples.
"""
# remove any kind of parenthesis
for c in (")", "]", "}"):
my_str = my_str.rstrip(c)
for c in ("(", "[", "{"):
my_str = my_str.lstrip(c)
# split tuple elements if any
str_list = my_str.split(",")
# remove trailing whitespaces
str_list = [s.rstrip() for s in str_list]
str_list = [s.lstrip() for s in str_list]
return str_list | 88978c8ea5c5a41e9f593124815248f5f8f805ad | 114,678 |
import json
def json_dumps(obj):
"""A wrapper to `json.dumps` that provides the common arguments used
throughout the code
Args:
obj (object): Object to convert to JSON
Returns:
str: String containing formatted JSON
"""
def sort(o):
if isinstance(o, list):
if len(o) > 0 and isinstance(o[0], str):
o.sort()
else:
for i in o:
sort(i)
elif isinstance(o, dict):
for k,v in o.items():
sort(v)
return o
return json.dumps(sort(obj), indent=2, sort_keys=True) | 5e68f8c1b95c5aba9bc2d9b0910c252b7874b212 | 114,679 |
import hashlib
def calculate_file_hash(file_name: str, buffer_size: int = 64 * 1024) -> str:
"""Calculates the hash of a data file
Parameters
----------
file_name : str
file to calculate
Returns
-------
str
SHA1 hash for file
"""
# If the file is large we do not want to hash it in one go
_input_hasher = hashlib.sha1()
with open(file_name, "rb") as in_f:
_buffer = in_f.read(buffer_size)
while len(_buffer) > 0:
_input_hasher.update(_buffer)
_buffer = in_f.read(buffer_size)
return _input_hasher.hexdigest() | c954b48a05883440eb71de6a52717228e3d39d7e | 114,682 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.