content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def _shift_arr_retain(M, dir, n):
"""
Shift an array along one of the eight (inter)cardinal directions.
Pixels padded to the edges of the axes retain the value from the
original array.
Parameters
----------
M : ndarray
Input array.
dir : int
Direction along to shift the array, ranging from 0 (north) to 7
(northwest).
n : int
Number of pixels to be shifted.
Returns
-------
S : ndarray
Shifted array.
"""
S = M.copy()
if dir == 0: # north
S[:-n - 1, :] = M[1 + n:, :]
elif dir == 1: # northeast
S[:-n - 1, 1 + n:] = M[1 + n:, :-n - 1]
elif dir == 2: # east
S[:, 1 + n:] = M[:, :-n - 1]
elif dir == 3: # southeast
S[1 + n:, 1 + n:] = M[:-n - 1, :-n - 1]
elif dir == 4: # south
S[1 + n:, :] = M[:-n - 1, :]
elif dir == 5: # southwest
S[1 + n:, :-n - 1] = M[:-n - 1, 1 + n:]
elif dir == 6: # west
S[:, :-n - 1] = M[:, 1 + n:]
elif dir == 7: # northwest
S[:-n - 1, :-n - 1] = M[1 + n:, 1 + n:]
return S
|
b413774dadc11a27e09aada2953e49c3aec9c03d
| 67,379
|
import math
def normalize_angle(angle):
"""
Normalizes specified angle (in radians) to 0 to 2*PI
:param angle: specified angle (in radians)
:return: normalized angle
"""
res = angle
while res > math.pi:
res -= 2.0 * math.pi
while res <= -math.pi:
res += 2.0 * math.pi
return res
|
b296f76041c022bc026e09803516454b4b30b5aa
| 67,384
|
def string_to_int(string):
"""
Convert string to int
:param string: An instance of type string
:return: Integer
"""
return int(string.replace(',', ''))
|
e572afd6d15d4850aea20796601f3f2ec3263d27
| 67,386
|
import re
def GetSizeAnnotationToDraw(annotationList):#{{{
"""Get the size of annotation field"""
maxSize = 0
for anno in annotationList:
m1 = re.search("nTM\s*=\s*[0-9]*", anno)
m2 = re.search("group of [0-9]*", anno)
m3 = re.search("[^\s]+", anno)
pos1 = 0
pos2 = 0
pos3 = 0
if m1:
pos1 = m1.end(0)
if m2:
pos2 = m2.end(0)
if m3:
pos3 = m3.end(0)
size = max(pos1,pos2, pos3)
if size > maxSize:
maxSize = size
return maxSize
|
f45ff9153718c5fedb254fa3716d2d66fc7d60f2
| 67,387
|
import re
def valid_email(email=None):
"""Returns True if argument is a string with valid email adddress"""
if not email:
return False
pattern = r"[^@]+@[^@]+\.[^@]+"
p = re.compile(pattern)
if p.match(email):
return True
return False
|
1c6cd70d8eb6bb0053dee25b7cb0f9f4055395b3
| 67,389
|
import re
def parse_speaker(path):
"""
Get speaker id from a BAS partitur file
Parameters
----------
path : str
a path to the file
Returns
-------
str or None
the speaker id
"""
speaker = ''
with open(path, 'r', encoding='utf8') as f:
lines = f.readlines()
for line in lines:
splitline = re.split("\s", line)
if splitline[0] == 'SPN:':
return splitline[1].strip()
return None
|
31f65b13903324b1b941b2beb6c28edde430ffa1
| 67,391
|
def is_in_string(char, string):
"""Check for char occurence in provided string
>>> is_in_string('3', str(15))
False
>>> is_in_string('5', str(15))
True
"""
for i in range(0, len(string)):
if string[i] == char:
return True
return False
|
ea8051e6b18cff137fe18dd972e11721f61785a0
| 67,394
|
def location_to_field_association(loc):
"""Helper to map location to VTK/Enum value"""
if loc == "Point":
return 0
if loc == "Cell":
return 1
if loc == "Field":
return 3
return 4
|
38a826986a72c087d37893862e83fc68275f9099
| 67,396
|
def convert_vks_rows_to_sorted_kvs_rows(vks_rows):
"""Converts value-key-score rows to key-value-score rows, sorted by key."""
key_to_vss = {}
for value, key, score in vks_rows:
if type(score) is float:
# Use the default (which is '%.6f') format
score = '%f' % score
if key not in key_to_vss:
key_to_vss[key] = []
key_to_vss[key].append((value, score))
keys = sorted(key_to_vss.keys(), key=lambda k: k.encode('utf-8'))
output = []
for key in keys:
# stable sort, so it's ok for values that have the same scores as their
# "natural" order in the list will be preserved; this is important for
# rows from the plain BPMF list.
vs_rows = sorted(key_to_vss[key], key=lambda vs: float(vs[1]), reverse=True)
for vs_row in vs_rows:
output.append((key, vs_row[0], vs_row[1]))
return output
|
27bf8ed1b9a1af7a4a86a29b9cd7b63c86c597ac
| 67,399
|
def matrix_multiply(a, b):
""" The :py:func:`matrix_algebra.matrix_multiply` function computes
the product of two matrices.
Args:
a (numpy.ndarray): first matrix
b (numpy.ndarray): second matrix
Returns:
(numpy.ndarray): product of a and b
"""
return a @ b
|
7524f4a1aa1a3273c2ff99e86cae6a2c3803b852
| 67,403
|
import torch
def numpyify(tensors):
"""Converts an array or a dict of tensors to numpy arrays.
"""
if isinstance(tensors, tuple):
return tuple(numpyify(t) for t in tensors)
if isinstance(tensors, torch.Tensor):
return tensors.clone().detach().cpu().numpy()
if hasattr(tensors, 'numpyify'):
return tensors.numpyify()
return tensors
|
1abe90a35abc92b291e3a7c84beeb579b34359a5
| 67,404
|
import re
def get_integral_and_diff_info(formula:str) -> list:
"""
def: operator(argument, action var) , eg., diff(x**2/2, x)
Take a formula that has integral(s) or derivative(s) or both ((based on
Sympy syntax)), and goves:
1- the operator: diff, integral
2- formula inside the operator (argument)
3- the variable where the operator is based upon (action var)
formula = 'sin(x) + diff((a-2/u**2),z) - integrate(e*r,r**2)'
out ---> [('diff', '(a-2/u**2)', 'z'), ('integrate', 'e*r)', 'r**2')]
Parameters
----------
formula : str
DESCRIPTION.
Returns
-------
list of tuples of three items, or empty list
[(operator, argument, action var), ...].
"""
formula = formula.replace(' ','')
reg_expr = r'(diff|integrate)\(([\(\)\w+\-*/]*),([\w+\-*/]+)'
return re.findall(reg_expr, formula)
|
0b98ee849135b0935b8369731214936196765fde
| 67,409
|
def get_hostname(results):
"""Get Hostname
Args:
results (Element): XML results from firewall
Returns:
hostname (str): A string containing the hostname
"""
hostname = results.find('./result/system/hostname').text
return hostname
|
8cb576f1a13d1f5afe465333ce6b7a3c6fdd3b09
| 67,414
|
import random
def random_str(Nchars=6, randstrbase='0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'):
"""Return a random string of <Nchars> characters. Characters are sampled
uniformly from <randstrbase>.
"""
return ''.join([randstrbase[random.randint(0, len(randstrbase) - 1)] for i in range(Nchars)])
|
47b4132d81a104af81eed6b6c23b6060111dfc7c
| 67,417
|
import re
def _get_data_glob(data):
"""
Construct a glob expression from the data expression
"""
return re.sub(r'{[^{}]*}', '*', data)
|
b4add01693f3147849dc14f642d2acdd09c962c1
| 67,418
|
def project(a):
"""De-homogenises a vector"""
return a[:-1]/float(a[-1])
|
bbdecb2b720e0b02ccac2324078907811504ae86
| 67,422
|
def blit(im1, im2, pos=None, mask=None):
"""Blit an image over another.
Blits ``im1`` on ``im2`` as position ``pos=(x,y)``, using the
``mask`` if provided.
"""
if pos is None:
pos = (0, 0) # pragma: no cover
else:
# Cast to tuple in case pos is not subscriptable.
pos = tuple(pos)
im2.paste(im1, pos, mask)
return im2
|
ba3aa75d7d37a3fb4f35343988fa1de42f0aed50
| 67,423
|
from typing import List
def order_by_name(order_list: List[dict], order_name: str) -> int:
"""This method returns the current index of an order by its name. If the order does not exist, -1 is returned.
"""
for index, order in enumerate(order_list):
if order['name'] == order_name:
return index
return -1
|
5466672c63b1015476c1a04261ac88052b5b191b
| 67,425
|
def _escapeAnchor(name):
"""
Escape name to be usable as HTML anchor (URL fragment)
"""
result = []
for c in name:
oc = ord(c)
if oc < 48 or (57 < oc < 65) or (90 < oc < 97) or oc > 122:
if oc > 255:
result.append("$%04x" % oc)
else:
result.append("=%02x" % oc)
else:
result.append(c)
return u"".join(result)
|
84c995916d4399b99d0379ad4879f46d94881f52
| 67,428
|
import time
def formatTime(timeData):
"""格式化时间字符串
:param timeData: 时间字符串
:return: 格式化后的时间字符串, "2022-03-01"
"""
try:
res = time.strptime(timeData, "%Y-%m-%d")
except ValueError:
raise ValueError("请输入正确的时间范围,例如 2022-02-01")
return time.strftime("%Y-%m-%d", res)
|
0157fe33b8b4738bc5021f5c4eac45decf874dac
| 67,430
|
def calculatePossessions(reg_season_games):
"""Adds a column for Tm and Opp number of possessions to provided dataframe
Uses Basketball Reference method:
https://www.basketball-reference.com/about/glossary.html
Note TmPoss == OppPoss
Arguments:
reg_season_games {DataFrame} -- Games to calculate possessions
Returns:
DataFrame -- Input DataFrame + TmPoss and OppPoss
"""
reg_season_games['TmPoss'] = (
0.5 * ((reg_season_games['TmFGA']
+ 0.4 * reg_season_games['TmFTA']
- 1.07 * (reg_season_games['TmORB'] /
(reg_season_games['TmORB']
+ reg_season_games['OppDRB']))
* (reg_season_games['TmFGA']
- reg_season_games['TmFGM'])
+ reg_season_games['TmTO'])
+ (reg_season_games['OppFGA']
+ 0.4 * reg_season_games['OppFTA']
- 1.07 * (reg_season_games['OppORB'] /
(reg_season_games['OppORB']
+ reg_season_games['TmDRB']))
* (reg_season_games['OppFGA']
- reg_season_games['OppFGM'])
+ reg_season_games['OppTO'])))
reg_season_games['OppPoss'] = reg_season_games['TmPoss']
return reg_season_games
|
0b4ad6eead85f27096497415e564accecf551a1d
| 67,431
|
def get_link_enjambment(previous_token, next_token):
"""
Checks if a link enjambment exists between two lines
:param previous_token: The word before a newline character
:param next_token: The word before a newline character
:return: Link type or None if not found
"""
if next_token.n_rights > 0 or (next_token.head == previous_token.head):
if ((previous_token.dep_ in ('ROOT', 'nsubj')
and previous_token is next_token.head)
or (next_token.nbor().is_ancestor(previous_token))):
return [previous_token.pos_, next_token.pos_]
return None
|
14854e4072ba95a071f370ce33b695cb2ffaabac
| 67,432
|
def jaccard_similarity(b1, b2):
"""Jaccard similarity between two set b1 and b2
:param b1: set of index if there is a rate for business b1
:param b2: set of index if there is a rate for business b2
:return: jaccard similarity of two sets
"""
return len(b1.intersection(b2))/len(b1.union(b2))
|
a1da6b361573e6b3ab34322a6782eeec3d34f482
| 67,433
|
def get_generic_constraint_rhs(data, intervention) -> dict:
"""
Get generic constraint right-hand-side term
Parameters
----------
data : dict
NEMDE case file dictionary
intervention : str
Intervention flag - '0' -> no intervention constraints, '1' -> intervention constraints included
Returns
-------
rhs : dict
Dictionary with keys = ConstraintIDs, values = constraint RHS
"""
constraints = (data.get('NEMSPDCaseFile').get('NemSpdOutputs')
.get('ConstraintSolution'))
# Container for constraint RHS terms
rhs = {}
for i in constraints:
# Check intervention flag
if i['@Intervention'] == intervention:
rhs[i['@ConstraintID']] = float(i['@RHS'])
return rhs
|
73268811434b9d618cd2e218002df649d89cec88
| 67,434
|
from typing import Any
def is_timestamp(value: Any) -> bool:
"""Check if value is a valid timestamp."""
if isinstance(value, bool):
return False
if not isinstance(value, (int, float, str)):
return False
try:
float(value)
return True
except ValueError:
return False
|
5ff15846fad6ecc71c2de15f901cf855dd5d006b
| 67,443
|
def _parse_package_name(package):
"""
Splits npm package name into [@scope, name]
"""
return package.split("/") if package.startswith("@") else [None, package]
|
6ff88f10064e8bcb9c6568572a3cc2424fff70a1
| 67,445
|
def make_cup_links(cups):
"""Reformat list of cups into a dict with cup pointing to next cups."""
cup_links = dict()
prev_cup = cups[0]
for cup in cups[1:]:
cup_links[prev_cup] = cup
prev_cup = cup
cup_links[prev_cup] = cups[0]
return cup_links
|
0281cae0d2f6cefe2b0f48b0dd71e5800561b2f5
| 67,446
|
def sort_by_size(L):
"""
Return a copy of precinct list L, sorted into decreasing order by size.
"""
answer = L[:]
answer.sort()
answer.reverse()
return answer
|
151dcd7c108494b3998b6e106c234d1c7dbff376
| 67,450
|
def data_to_rows(data):
"""Unzip column-wise data from doc._.data into rows"""
col_data = [data[key] for key in data.keys()]
row_data = list(zip(*col_data))
return row_data
|
e7e55d8fa6026dcf325665eaf7d05274cb7d8f0b
| 67,462
|
def shorten_titl(str_in, nout=5):
"""Shorten the title with *, so it can still be matched by
glob"""
if len(str_in) > nout * 2:
str_out = str_in[:5] + '*' + str_in[-5:]
else:
str_out = str_in
return str_out
|
19fd358ba94646f076e8795a86eba7568705c475
| 67,463
|
def linear(x):
"""Linear activation function. Simply returns `x`."""
return x
|
8e9c2fbe1cbaaa3374a25225b4f184ab0f00e173
| 67,464
|
def is_matrix(block):
"""
Returns true if block is a matrix.
A matrix must be a Python list of lists where each list has length
greater than 1 and all lists must be same length
"""
return (all([isinstance(r,list) for r in block]) and all([len(block[0])==len(r) for r in block]))
|
56b23130d0dff0d43ed53dcfc7f6ec606cdb2b1a
| 67,469
|
def remove_repeated_asn(path):
""" remove repeated ASN in the give path
Args:
path (list of ASN): ASN can be int for str if IXP hop
Returns:
list of ASN
"""
removed = []
for idx, hop in enumerate(path):
if idx == 0:
removed.append(hop)
elif hop != path[idx-1]:
removed.append(hop)
return removed
|
3c9900a3c2cdb3236926a87e9404aa027a9afaa8
| 67,470
|
from typing import Tuple
import re
def ints(text: str) -> Tuple[int, ...]:
"""Return tuple of integers extracted from arbitrary text."""
return tuple(map(int, re.findall("-?[0-9]+", text)))
|
08e6307ba69c759e7b23309f825bf5a643b8c33a
| 67,472
|
def config_parser_to_dict(config_parser):
"""
Convert a ConfigParser to a dictionary.
"""
response = {}
for section in config_parser.sections():
for option in config_parser.options(section):
response.setdefault(section, {})[option] = config_parser.get(section, option)
return response
|
0178217f13c04ec0fd4e1a0648ce7314659db9b5
| 67,473
|
def _TransformOperationTimestamp(metadata):
"""Extract operation start timestamp from metadata."""
if 'statusHistory' in metadata:
return metadata['statusHistory'][0]['stateStartTime']
elif 'startTime' in metadata:
return metadata['startTime']
return ''
|
2baa47eacb492c09025a0b7604506409bfb1a323
| 67,477
|
def minidom_get_text(nodelist):
"""
gets the text in a minidom nodelist for something like
<tag>this text</tag>
nodelist: the childNodes field of the minidom node
"""
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return "".join(rc)
|
2f9a781cdd6c675fb0ec0ef5268a824144e8838d
| 67,478
|
def numberOfRows(cur, table):
"""
Returns the number of rows in the provided table
"""
cur.execute("SELECT Count(*) FROM {}".format(table))
return cur.fetchone()[0]
|
7676e7894ac7372099c90f7f0f1c1f51bf23602c
| 67,479
|
def remove_file_suffix(path):
"""
Remove the deployment/develop file prefix in the path, for example, the develop of java is .java and the deployment is .class.
This is to match if the file name of the path has a prefix like .java, and the deploy path may have the prefix like .class,
in this case, it should be matching.
For example, passing
/home/test/src/test.java to the function
will return
/home/test/src/test
"""
if path.endswith(('.java', '.class', '.c', '.cpp' '.o', '.exe', '.py', '.pyc')):
return path.rsplit('.', 1)[0]
return path
|
aee68f8a8b58b98d386f457fe0ef56eb9d9d275b
| 67,480
|
def JavaReturnValueToC(java_type):
"""Returns a valid C return value for the given java type."""
java_pod_type_map = {
'int': '0',
'byte': '0',
'char': '0',
'short': '0',
'boolean': 'false',
'long': '0',
'double': '0',
'float': '0',
'void': ''
}
return java_pod_type_map.get(java_type, 'NULL')
|
f0a751bc89f7ba95ce4717c45c18c02babda453b
| 67,486
|
def buildapiurl(proto="http", host="127.0.0.1",
action=None):
"""
Create a URL for the Cuckoo API
:param proto: http or https
:param host: Hostname or IP address
:param port: The port of the Cuckoo API server
:param action: The action to perform with the API
:returns: The URL
"""
if action is None:
return None
else:
return "{0}://{1}/api{2}/".format(proto, host, action)
|
82c17208cfc0b516ac22db1ecbfb9c8237765cf2
| 67,491
|
import math
def bollinger(strategy, instrument, period=20, matype=None, field=None):
"""
The Bollinger band width kernel
:param Strategy strategy: the instance of the Strategy class
:param Instrument instrument: the instrument object
:param int period: the window of rolling average
:param str matype: type of moving average, either simple, 'sma' or exponensial, 'ema'
:param str field: which field to use for the moving average. default is 'Close'
:return double: the score
"""
matype = matype or 'ema'
field = field or 'Close'
profile = instrument.snapshot(strategy.rule)[field].tail()
profile['nu'] = strategy.moving_average(instrument, period, matype=matype, field=field).tail()
profile['Sigma'] = strategy.moving_standard_deviation(instrument, period, matype=matype, field=field).tail()
profile[['d' + col for col in profile.columns]] = profile.diff()
def _score1(x):
return (-1.25 * x * x * x * x + 22.25 * x * x + 1.) / 100.
def _score2(x):
return -math.atan(x) / 3 + .5
price = profile.get('Price')[-1]
sigma = profile.get('Sigma')[-1]
nu = profile.get('nu')[-1]
n = (price - nu) / sigma
s1 = _score1(n)
d_price = profile.get('dPrice')[-1]
d_sigma = profile.get('dSigma')[-1]
m = d_price / d_sigma
s2 = _score2(m)
return s1 * s2
|
bca04857722f56d967a22eb1b6fc4913eceb9a26
| 67,493
|
def _to(l):
"""To
Converts all addresses passed, whether strings or lists, into one singular
list
Arguments:
l (list(str|str[])): The list of addresses or lists of addresses
Returns:
list
"""
# Init the return list
lRet = []
# Go through each item in the list
for m in l:
# If we got a list
if isinstance(m, (list,tuple)):
lRet.extend(m)
# Else, we got one address
else:
lRet.append(m)
# Return the full list
return lRet
|
22d9741cf4a0cced66b348ddc2af670f9f345271
| 67,496
|
def m_make_matrix(number_rows, number_columns, entry_fn):
"""
Returns a number_rows x number_columns matrix whose (i,j)th entry is entry_fn(i, j)
"""
return [[entry_fn(i, j) for j in range(number_columns)] for i in range(number_rows)]
|
fb819f7272e32379b63898bf3b8a67d61c2d9c67
| 67,509
|
def src2https(src: str):
"""Formats src urls to https type
"""
return f"https:{src}"
|
b033cbd9ee841ad80161858036021b1bd532ee06
| 67,518
|
import struct
def packRequest_window_change(geometry):
"""
Pack a window-change request so that it is suitable for sending.
@type geometry: L{tuple}
@param geometry: A tuple of (rows, columns, xpixel, ypixel)
"""
(rows, cols, xpixel, ypixel) = geometry
return struct.pack('>4L', cols, rows, xpixel, ypixel)
|
924051d1d1fd0ba6f98abd1f9c2c052aba80f0f7
| 67,529
|
def chunk_it(seq, num):
"""
Chunk a sequence in N equal segments
:param seq: Sequence of numbers
:param num: Number of chunks
:return: chunked start and end positions
"""
# find average chunk size
avg = len(seq) / float(num)
out = []
last = 0.0
# until the end of sequence
while last < len(seq):
# append the value to a bin
out.append(seq[int(last):int(last + avg)])
last += avg
return out
|
9e671f7848477ab8579c4f08f4e8f37e9cba6069
| 67,532
|
import re
def filter_regex(values, regexs):
"""Filters list of `values` by list of `regexs`.
Paramters
---------
values: list
list of `str` values.
regexs: list
list of `str` regexs.
Returns
-------
list
Sorted `list` of values in `values` that match any regex in `regexs`.
"""
if not isinstance(values, list):
values = [values]
if not isinstance(regexs, list):
regexs = [regexs]
filtered = set()
for value in values:
for regex in regexs:
if re.search(regex, value):
filtered.add(value)
return sorted(list(filtered))
|
d5c018ba6cb9b780826c5d001af52a0c458c876d
| 67,535
|
def msg_crc16(msg: bytes) -> int:
"""Compute CRC16 of a bytes message."""
crc = 0xFFFF
for item in msg:
next_byte = item
crc ^= next_byte
for _ in range(8):
lsb = crc & 1
crc >>= 1
if lsb:
crc ^= 0xA001
return crc
|
c3aad4732e3cf238442917f9f651858438a06501
| 67,547
|
def find_nested_meta_first_bf(d, prop_name):
"""Returns the $ value of the first meta element with
the @property that matches @prop_name (or None).
"""
m_list = d.get('meta')
if not m_list:
return None
if not isinstance(m_list, list):
m_list = [m_list]
for m_el in m_list:
if m_el.get('@property') == prop_name or m_el.get('@rel') == prop_name:
return m_el
return None
|
47e62cf12b0fc62bd6e32b1961c6f9b1ba1190ed
| 67,551
|
def read_mutations(mutations_file):
"""
Read mutations file into memory for processing.
"""
with open(mutations_file, encoding="utf8", errors="ignore") as infile:
mutations = infile.read().splitlines()
print("[+] Mutations list imported: {} items".format(len(mutations)))
return mutations
|
c4c8abfcdbf6a87a052a7e4a9cf56d0a03eb37e0
| 67,555
|
def filter_keys(model_keys):
""" Remove untrainable variables left in pytorch .pth file.
Args:
model_keys: List of PyTorch model's state_dict keys
Returns:
A string list containing only the trainable variables
from the PyTorch model's state_dict keys
"""
to_remove = []
for key in model_keys:
if ("attn.relative_position_index" in key) or ("attn_mask" in key):
to_remove.append(key)
for a in to_remove:
model_keys.remove(a)
return model_keys
|
1dc1ec8f57c74e279bec9af9cd2b0d884661e604
| 67,557
|
def file_to_string(path: str) -> str:
""" Reads path and returns SDL string """
with open(path, "r") as f:
string = f.read()
return string
|
79dde7eb87fdaf7a412dee8928f384be76d60b0b
| 67,558
|
def friend_date(a, b):
"""Given two friends, do they have sny hobbies in common?
- a: friend #1, a tuple of (name, age, list-of-hobbies)
- b: same, for friend #2
Returns True if they have any hobbies in common, False is not.
>>> elmo = ('Elmo', 5, ['hugging', 'being nice'])
>>> sauron = ('Sauron', 5000, ['killing hobbits', 'chess'])
>>> gandalf = ('Gandalf', 10000, ['waving wands', 'chess'])
>>> friend_date(elmo, sauron)
False
>>> friend_date(sauron, gandalf)
True
"""
if set(a[2]) & set(b[2]):
return True
else:
return False
# can even do by converting to boolean!
#
# return bool(set(a[2] & set(b[2])
|
64c78b8b22d9f8da3f5ecf94883019c09a9500ae
| 67,559
|
def _product(a,scalar):
""" multiply iterable a by scalar """
return tuple([scalar*x for x in a])
|
3a6e49b28ebcd4701a5aa7810dd1bb674cd41b44
| 67,562
|
def patching_test(value):
"""
A test function for patching values during step tests. By default this
function returns the value it was passed. Patching this should change its
behavior in step tests.
"""
return value
|
bc51f687b8f31037911ecafebba2b4f2a2e757f9
| 67,565
|
def _version(name):
"""Return the version component of a package name."""
return name.rpartition("-")[2]
|
966da2c3d5be3d4991d5593f990f37e0683f16fc
| 67,568
|
def make_apply_H(H):
"""Generate a function that applies H to Ψ."""
def apply_H(psi):
return H @ psi
return apply_H
|
336d7be135d6c40f7e2c53ef5c94da5c4887c38e
| 67,575
|
def mirror_search_terms(terms):
"""
Interchange the sidedness of a query
:param terms: List of strings matching (H|L)[0-9]+(l|r)?
<has/lacks><ringid>[<left/right>]
:return: The same terms with occurrences of 'left' and 'right' interchanged
"""
terms = [term.replace('l', 'Q').replace('r', 'l').replace('Q', 'r') for term in terms]
return terms
|
4d44eae7d3ca1301ce7d1aba42b4ef8737ae852f
| 67,577
|
def mutate(df, **kwargs):
"""
Creates new variables (columns) in the DataFrame specified by keyword
argument pairs, where the key is the column name and the value is the
new column value(s).
Args:
df (pandas.DataFrame): data passed in through the pipe.
Kwargs:
**kwargs: keys are the names of the new columns, values indicate
what the new column values will be.
Example:
diamonds >> mutate(x_plus_y=X.x + X.y) >> select_from('x') >> head(3)
x y z x_plus_y
0 3.95 3.98 2.43 7.93
1 3.89 3.84 2.31 7.73
2 4.05 4.07 2.31 8.12
"""
return df.assign(**kwargs)
|
874fec9cc6d756c57b9002fcb7c5c3131a29a9a8
| 67,588
|
def get_neighbours(
row: int, column: int, rows: int, cols: int
) -> list[tuple[int, int]]:
"""Return a list of neighbouring locations
Args:
row (int): The row of the point to find neighbours for
column (int): The column of the point to find neighbours for
rows (int): The total number of rows available
cols (int): The total number of columns available
Returns:
list[tuple[int, int]]: A list of coordinate pairs
"""
return [
(max(0, row - 1), column),
(min(rows - 1, row + 1), column),
(row, max(0, column - 1)),
(row, min(cols - 1, column + 1)),
]
|
5aa31a6c4216de6f609a98fdaf70fc59cf5e445a
| 67,595
|
from typing import Iterable
from typing import List
def unique_in_order(iterable: Iterable) -> list:
"""
Takes as argument a sequence and returns a list
of items without any elements with the same value
next to each other and preserving the original
order of elements.
:param iterable:
:return:
"""
result: List = []
for i in iterable:
if len(result) == 0 or i != result[-1]:
result.append(i)
return result
|
e46b28199e0919963e88ecfea0c282305eb7df7a
| 67,598
|
def _clean_message(output: str) -> str:
"""Strips the succeeding new line and carriage return characters."""
return output.rstrip("\n\r")
|
63cad56be4c8a752807246f90f494c05ea67ef37
| 67,603
|
def _find_party_idx(party_id, endpoint_list):
"""
return the index of the given party id in the endpoint list
:param party_id: party id
:param endpoint_list: list of endpoints
:return: the index of endpoint with the party_id, or -1 if not found
"""
for idx in range(0, len(endpoint_list)):
if party_id == int(endpoint_list[idx].split(":")[0]):
return idx
return -1
|
7a49335fb2522bc7c19e87a112b768353c82bed2
| 67,606
|
import re
def get_sections(text):
"""Returns docstring sections.
Args:
text (str): The raw docstring for extracting sections.
Returns:
sections (list): The extracted sections.
"""
pattern = re.compile(
r"(?<=\s)(Args|Returns|Example|Demo)\s*:\n", flags=re.S
)
sections = []
if text is not None:
tokens = []
tokens.append((None, 0, 'Overview'))
for m in pattern.finditer(text):
s, e = m.span()
header = m.group(0)[:-2]
tokens.append((s, e, header))
tokens.append((len(text), None, None))
for cur, nxt in zip(tokens, tokens[1:]):
_, s, header = cur
e, _, _ = nxt
body = text[s:e]
sections.append({'header': header, 'body': body})
return sections
|
8ae7ad19c25b68411d307aa81fbdb3edbde3226a
| 67,611
|
def is_number(item: str) -> bool:
"""Return True if the string is a number, False otherwise.
"""
try:
float(item)
return True
except TypeError:
return False
except ValueError:
return False
|
29e3bb6783616a36fe2b6276ee73d20826a0b919
| 67,612
|
def merge_cluster_pair(clusts, gap_ind):
"""
Merge two clusters which boarder a gap
Notes:
* the new cluster will be a continuous range of indices
* The gap index "i" will merge clusters "i" and "i+1"
"""
# Get the clusters up to the gap index
new_clusts = clusts[:gap_ind].copy()
# Add cluster as a range of indices from the merged clusters
new_clusts.append(list(range(clusts[gap_ind][0],clusts[gap_ind+1][-1]+1)))
# Add the remaining clusters
new_clusts.extend(clusts[gap_ind+2:])
return new_clusts
|
732c1d0827573740422eb0fca60a3396f50f43dd
| 67,614
|
from datetime import datetime
def time_from_str(src, date_format):
"""
Create a datetime object from a time string and a given format
:param src: time string
:param date_format: defines how to interpret the time string
:return: datetime.datetime object
"""
return datetime.strptime(src, date_format)
|
90ee84f2ecb0d7d2a1865b152b8386730964f094
| 67,619
|
import itertools
def grouper(iterable, n):
"""Split an iterable into chunks of length `n`, padded if required."""
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=None)
|
a40ea874ee009c73c54745b267d1a2650b711e6f
| 67,620
|
def wrap_url(s, l):
"""Wrap a URL string"""
parts = s.split('/')
if len(parts) == 1:
return parts[0]
else:
i = 0
lines = []
for j in range(i, len(parts) + 1):
tv = '/'.join(parts[i:j])
nv = '/'.join(parts[i:j + 1])
if len(nv) > l or nv == tv:
i = j
lines.append(tv)
return '/\n'.join(lines)
|
4da93ee941b26711bc21a0653a4307a3edc72a64
| 67,626
|
def rearrange(letter, string, pos):
"""
Function that generates new strings by inserting
the letter in all possible positions within the string.
Returns a list with new strings.
"""
if pos == len(string):
return [string + letter]
else:
current_word = string[: pos] + letter + string[pos: ]
return [current_word] + rearrange(letter, string, pos + 1)
|
a229960b702056a21784c978376ea64aaecd8e5a
| 67,628
|
import re
def image_basename(image_name):
"""
Get image name without tag
"""
if re.search(':[^:]+$', image_name):
image_name = re.sub('(:[^:]+)$', '', image_name)
return image_name
|
64926a0af96f37e1948d0a8401fff939b279e948
| 67,635
|
from typing import List
def xor_buf(b1: List, b2: List) -> bytes:
"""
xor_buf - xor two lists
Args:
b1: List
b2: List
Returns:
bytes: the result
"""
return bytes([b1[i] ^ b2[i] for i in range(0, min(len(b1), len(b2)))])
|
13da554ab305ee9547e00d054de9d44397201c73
| 67,637
|
def create_average_data(json_data):
"""
Args:
json_data -- School data in tree structure.
Returns:
dict -- Average of financial data across schools that provided data.
"""
avg = {}
counts = {}
for school in json_data:
for year in json_data[school]:
if year < 2005:
continue
if year not in avg:
avg[year] = json_data[school][year]
counts[year] = {}
for elem in json_data[school][year]:
counts[year][elem] = 1
else:
for elem in json_data[school][year]:
if elem not in avg[year]:
avg[year][elem] = json_data[school][year][elem]
counts[year][elem] = 1
else:
avg[year][elem] += json_data[school][year][elem]
counts[year][elem] += 1
for year in avg:
for elem in avg[year]:
avg[year][elem] = avg[year][elem] / counts[year][elem]
return avg
|
ec10dbd27c67bd56041503a71587ab04d2221582
| 67,639
|
def net2list(net_root):
"""
Use topological order(BFS) to print the op of a net in a list
"""
bfs_queue = []
op_list = []
cur = net_root
for _, n in cur.ops.iteritems():
bfs_queue.append(n)
while bfs_queue:
node = bfs_queue[0]
bfs_queue = bfs_queue[1:]
op_list.append(node.op)
for _, n in node.ops.iteritems():
bfs_queue.append(n)
return op_list
|
824b7b9a46f72d44a8efed73c9ed0e1d658bacb3
| 67,642
|
def scores(payoff):
"""
The scores matrix excluding self-interactions
Parameters
----------
payoff : list
A matrix of the form:
[
[[a, j], [b, k], [c, l]],
[[d, m], [e, n], [f, o]],
[[g, p], [h, q], [i, r]],
]
i.e. one row per player, containing one element per opponent (in
order of player index) which lists payoffs for each repetition.
Returns
-------
list
A scores matrix of the form:
[
[a + b + c, j + k + l],
[d + e + f, m + n+ o],
[h + h + i, p + q + r],
]
i.e. one row per player which lists the total score for each
repetition.
In Axelrod's original tournament, there were no self-interactions
(e.g. player 1 versus player 1) and so these are also excluded from the
scores here by the condition on ip and ires.
"""
nplayers = len(payoff)
repetitions = len(payoff[0][0])
scores = []
for ires, res in enumerate(payoff):
scores.append([])
for irep in range(repetitions):
scores[-1].append(0)
for ip in range(nplayers):
if ip != ires:
scores[-1][-1] += res[ip][irep]
return scores
|
b35c40d8b1dab71aa49baf0d3b268fad31a4e3a5
| 67,643
|
def top_bbox_from_scores(bboxes, scores):
"""
Returns the top matching bounding box based on scores
Args:
bboxes (list): List of bounding boxes for each object
scores (list): List of scores corresponding to bounding boxes given by bboxes
Returns:
matched_bbox: The bounding box with the maximum score
"""
bbox_scores = [(bbox, score) for bbox, score in zip(bboxes, scores)]
sorted_bbox_scores = sorted(bbox_scores, key=lambda x: x[1], reverse=True)
matched_bbox = sorted_bbox_scores[0][0]
return matched_bbox
|
86f2cfdff4fd4166b37d9cb76933a074be225c8e
| 67,646
|
def get_serial_number(deck):
"""
Remove unwanted characters from the end of the serial number
and return sanatized serial number
:params deck: stream deck
:returns correct serial number of stream deck
"""
with deck:
serialnumber = deck.get_serial_number()
serialnumber = serialnumber.replace("\x00", "").replace("\x01", "")
return serialnumber
|
d66b0fd0d58dfa948e0091b07fe8609aad92388b
| 67,647
|
def scale_bounding_boxes(bounding_boxes, orig_width, new_width):
"""
Scale a list of bounding boxes to reflect a change in image size.
Args:
bounding_boxes: List of lists of [x1, y1, x2, y2], where
(x1, y1) is the upper left corner of the box, x2 is the width
of the box, and y2 is the height of the box.
orig_width: Width of the images to which bounding_boxes apply
new_width: Width of the target images to which the bounding boxes
should be translated
Returns:
A new list of bounding boxes with the appropriate scaling factor
applied.
"""
scale_factor = new_width / orig_width
ret = []
# Use a for loop because OpenCV doesn't play well with generators
for bbox in bounding_boxes:
new_bbox = []
for elem in bbox:
new_elem = round(float(elem) * scale_factor)
new_bbox.append(new_elem)
ret.append(new_bbox)
return ret
|
f670e9d37853075f169e933ff5128edbd300aa9f
| 67,649
|
import re
def _camel_to_python(name):
"""Converts camelcase to Python case."""
return re.sub(r'([a-z]+)([A-Z])', r'\1_\2', name).lower()
|
228f31ed3a6d736e28dfab35733dcc78e12a0634
| 67,651
|
import builtins
def using_ipython() -> bool:
"""Check if code is run from IPython (including jupyter notebook/lab)"""
return hasattr(builtins, '__IPYTHON__')
|
c20354250d324cd28d2f2e5f101fe50db20cec2b
| 67,653
|
def _nmono_linesearch(obj, grad, x, d, obj_hist, M=10, sig1=0.1,
sig2=0.9, gam=1e-4, maxiter=100):
"""
Implements the non-monotone line search of Grippo et al. (1986),
as described in Birgin, Martinez and Raydan (2013).
Parameters
----------
obj : real-valued function
The objective function, to be minimized
grad : vector-valued function
The gradient of the objective function
x : array_like
The starting point for the line search
d : array_like
The search direction
obj_hist : array_like
Objective function history (must contain at least one value)
M : positive integer
Number of previous function points to consider (see references
for details).
sig1 : real
Tuning parameter, see references for details.
sig2 : real
Tuning parameter, see references for details.
gam : real
Tuning parameter, see references for details.
maxiter : positive integer
The maximum number of iterations; returns Nones if convergence
does not occur by this point
Returns
-------
alpha : real
The step value
x : Array_like
The function argument at the final step
obval : Real
The function value at the final step
g : Array_like
The gradient at the final step
Notes
-----
The basic idea is to take a big step in the direction of the
gradient, even if the function value is not decreased (but there
is a maximum allowed increase in terms of the recent history of
the iterates).
References
----------
Grippo L, Lampariello F, Lucidi S (1986). A Nonmonotone Line
Search Technique for Newton's Method. SIAM Journal on Numerical
Analysis, 23, 707-716.
E. Birgin, J.M. Martinez, and M. Raydan. Spectral projected
gradient methods: Review and perspectives. Journal of Statistical
Software (preprint).
"""
alpha = 1.
last_obval = obj(x)
obj_max = max(obj_hist[-M:])
for iter in range(maxiter):
obval = obj(x + alpha*d)
g = grad(x)
gtd = (g * d).sum()
if obval <= obj_max + gam*alpha*gtd:
return alpha, x + alpha*d, obval, g
a1 = -0.5*alpha**2*gtd / (obval - last_obval - alpha*gtd)
if (sig1 <= a1) and (a1 <= sig2*alpha):
alpha = a1
else:
alpha /= 2.
last_obval = obval
return None, None, None, None
|
63e7e6504d34392a076b606b170c15320f2fbe5b
| 67,654
|
import re
def permutation_length(nonterminal):
"""
:param nonterminal: a non-terminal of the form: '[P1234*2_1]
:return: the length of the permutations within the non-terminal
"""
NT_PERMUTATION = re.compile(r'P([0-9]+)')
matches = NT_PERMUTATION.search(nonterminal)
if matches is not None:
assert matches is not None, 'bad format %s' % nonterminal
permutation = matches.group(1)
return len(permutation)
else:
return 0
|
de562c009499a27d77e304ba0c00e07128f9d5e3
| 67,657
|
def generate_pattern(lvl, states, rewrite_rules):
"""
Inputs:
lvl
-- integer number
-- the number of times (iterations) rewrite rules will be applied
states -- string, the initial state (axiom) of the system
rewrite_rules
-- dictionary
-- keys (character) -> symbols
-- values (string) -> replacement rules
Returns string of symbols.
"""
# In each iteration: check every character in states, replace valid symbol
# with rewrite rule or copy character, and update states
for _ in range(lvl + 1):
states = ''.join([rewrite_rules.get(symbol, symbol) for symbol in states])
# Clean states form rewrite rule flags/symbols
drawing_rules = 'F+-'
states = ''.join([symbol for symbol in states if symbol in drawing_rules])
return states
|
52f1ca17060ab4c9add239bde1821e9fda77f8fb
| 67,659
|
def _(translate):
"""Identity function to mark strings for translation."""
return translate
|
a959a7acd75573f69c650168cf7d1064c875514f
| 67,667
|
import struct
def DecodeVarint(in_file):
"""
The decoding of the Varint32 is copied from
google.protobuf.internal.decoder and is only repeated here to
avoid depending on the internal functions in the library. If the
end of file is reached, return (0, 0).
"""
result = 0
shift = 0
pos = 0
# Use a 32-bit mask
mask = 0xffffffff
while 1:
c = in_file.read(1)
if len(c) == 0:
return (0, 0)
b = struct.unpack('<B', c)[0]
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
if result > 0x7fffffffffffffff:
result -= (1 << 64)
result |= ~mask
else:
result &= mask
return (result, pos)
shift += 7
if shift >= 64:
raise IOError('Too many bytes when decoding varint.')
|
473d89c2178cb75fa7213448cbdd6ad3ef70ee56
| 67,670
|
def calc_deltas(numbers) -> list:
"""Calculate the delta between each point and the next."""
deltas = list()
for n in range(len(numbers) - 1):
d = numbers[n + 1] - numbers[n]
deltas.append(d)
return deltas
|
b3ef22ab1a1623d854fcb2263f46ad0417d9cb40
| 67,672
|
import random
def _shuffle(x):
"""
Shuffles an iterable, returning a copy (random.shuffle shuffles in place).
"""
x = list(x)
random.shuffle(x)
return x
|
662627b2b4ae79f630c975cdb8d86837524579ab
| 67,677
|
def sort_dict(src_dict):
"""
Sort given dictionary
:param src_dict: source dict
:return: sorted dictionary
"""
sorted_dict = {k: src_dict[k] for k in sorted(src_dict.keys())}
return sorted_dict
|
71ceb261848eb7458e71ea4ea0b4cd9f1d6164a3
| 67,680
|
def generate_grid_region(distance, thickness, x, y, width, height):
"""Generate Grid structures in a specific area.
Args:
distance: Distance from the center of one "wall" to the other.
radius: Distance from one edge of the "wall" to the other.
x: x coordinate of the rectangular drawing area.
y: y coordinate of the rectangular drawing area.
width: width of the rectangular drawing area.
height: height of the rectangular drawing area.
return:
(horizontal_points, vertical_points): Set of points in each
direction.
"""
pair_distance = thickness + (distance - thickness)
walls_x_axis = int(width / pair_distance)
walls_y_axis = int(height / pair_distance)
# Calculating the gap on each side to fit the structure right in the
# middle of the area
gap_x_axis = (width - (walls_x_axis * pair_distance))/2 #gap both sides of the rectangle
gap_y_axis = (height - (walls_y_axis * pair_distance))/2
# Each "wall" is a rectangle determined by 2 coordinates in oposite vertices
horizontal_points = []
vertical_points = []
for col in range(0, walls_x_axis):
corner = x + gap_x_axis + col*distance
horizontal_points.append(((corner, y),(corner + thickness, y - height)))
for row in range(0, walls_y_axis):
corner = y - gap_y_axis - row*distance
vertical_points.append(((x , corner),(x + width,corner - thickness)))
return (horizontal_points, vertical_points)
|
e7ded7b938fccadb4dd4caf011de784f613746f6
| 67,684
|
def paginate(page):
"""
Render a pagination block with appropriate links, based on the given Django
Page object.
"""
context = {
'page': page
}
return context
|
2a817a0e971743e0739020957d2684d44ecff7c7
| 67,688
|
import json
import base64
def get_message_data(msg):
"""Extracts the message data from the sso message
Note: the message is not verified here!
"""
return json.loads(base64.b64decode(msg.split(', ')[0]))
|
652807cec841df771a070a81952156de02e1d560
| 67,695
|
async def hello_world():
"""A very simple Hello World example that simply returns a json response.
This is where you would add additional information about the endpoint.
As you can see you can use standard docStrings for this section.
"""
return {"Message": "Hello World!"}
|
6ce9e5c582ded176e82dae2f3990771469058c35
| 67,696
|
def get_values_missing(meta_data, sep=', '):
"""
Read the meta data and for each column return the values used to indicate missing data.
:params meta_data: data frame with the meta data of values
:params sep: The separator of values
:return: a dictionary where the keys are column names and values are a list
"""
x = meta_data.loc[meta_data.Meaning == 'unknown', ['Attribute', 'Value']]
x.loc[:, 'Value'] = x.loc[:, 'Value'].str.split(sep)
na_dict = x.set_index(['Attribute']).loc[:, 'Value'].to_dict()
return na_dict
|
907a57e189989a7b6c16c188917dd1be478823a3
| 67,703
|
from typing import List
def get_list_of_num_docs(start, multiplier, count) -> List[int]:
"""
returns list of `number of docs` to be indexed
e.g. - for start=512, multiplier=4, count=4, returns [512, 2048, 8192, 32768]
we'll run the stress-test for different num_docs & benchmark it
"""
return [int(start) * (int(multiplier) ** i) for i in range(int(count))]
|
d2f4de94e08587f957b05f113175681746643ac3
| 67,710
|
def _validate(validation_func, err, *args):
"""Generic validation function that returns default on
no errors, but the message associated with the err class
otherwise. Passes all other arguments into the validation function.
:param validation_func: The function used to perform validation.
:param err: The error class to catch.
:param args: The arguments to pass into the validation function.
:return: Validation error message, or empty string if no error.
"""
try:
validation_func(*args)
except err as validation_err:
return str(validation_err)
return ''
|
bd7b5831a203aef0f414cd650ea6b1b7ea01623c
| 67,712
|
import re
def _FilterPaths(input_api):
"""Returns input files with certain paths removed."""
files = []
for f in input_api.AffectedFiles():
file_path = f.LocalPath()
# Filter out changes in web_tests/.
if ('web_tests' + input_api.os_path.sep in file_path
and 'TestExpectations' not in file_path):
continue
if '/PRESUBMIT' in file_path:
continue
# Skip files that were generated by bison.
if re.search(
'third_party/blink/renderer/' +
'core/xml/xpath_grammar_generated\.(cc|h)$', file_path):
continue
files.append(input_api.os_path.join('..', '..', file_path))
return files
|
4e4e571cac8a82021be7180cea088b5a1d2b4dcc
| 67,714
|
def get_setting(key, paired_attributes, meta=None, default_value="", remove=False):
"""
Looks at the attributes of the code and the metadata of the document (in that order) and returns the value when it
finds one with the specified key.
:param key: The key or keys that should be searched for. Only the result for the first key found will be returned.
:type key: str | list[str]
:param paired_attributes: The attributes for the code.
:type paired_attributes: dict[str, str]
:param meta: The metadata of the document.
:type meta: dict[str, str]
:param default_value: The value that should be found if the key(s) can't be found.
:type default_value: str | object
:param remove: Should the setting be removed from the attributes if it was found there.
:type remove: bool
:return: The value that is associated with the key or the default value if key not found.
:rtype: str | object
"""
if not isinstance(key, list):
key = [key]
for single_key in key:
if single_key in paired_attributes:
return paired_attributes.pop(single_key) if remove else paired_attributes[single_key]
if meta is not None and single_key in meta:
return meta[single_key]
return default_value
|
ad4f03063f6a95ab3af5c353fdcd89fa90a952ac
| 67,717
|
def verify_patient_id(patient_id):
"""
This function is meant to check if the patient id is the right
format
This function first checks to see if the patient id is an integer.
If it is an integer than that number is returned without
manipulation. If the patient_id is a string then this function checks
to see if it is a numeric string. If it is then the function converts
the patient id into an integer and returns it. If the patient id is
the wrong format, then this function returns false.
:param patient_id: a number identifying a patient
:return: either an integer patient id or False
"""
if type(patient_id) == int:
return patient_id
if type(patient_id) == str:
if patient_id.isdigit():
return int(patient_id)
return False
|
1d7952088f7e0ff4fc853a784ac751a444f96301
| 67,718
|
def editor_not_blocked(merged: list):
"""
Check for Editor "no blocks on any merged account" criterion.
Parameters
----------
merged : list
A list of merged accounts for this Editor as returned by globaluserinfo.
Returns
-------
bool
Answer to the question: is the editor's free of blocks for all merged accounts?
"""
# If, for some reason, this information hasn't come through,
# default to user not being valid.
if not merged:
return False
else:
# Check: not blocked on any merged account.
# Note that this looks funny since we're inverting the truthiness returned by the check for blocks.
return False if any("blocked" in account for account in merged) else True
|
001c003d9090231502363ef65f0e29552deca9e7
| 67,723
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.