content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def matMultDef(a, b):
""" Berechnet das Produkt der Matrizen a, b iterativ nach Definition. """
a_rows, a_cols, b_cols = len(a), len(a[0]), len(b[0]) # Variablennamen für Reihen/Spalten von a,b
c = [[0 for i in range(b_cols)] for j in range(a_rows)] # 0-gefüllte Matrix c der Dimension b_cols*a_rows
# Durchführung der Matrixmultiplikation per Definition für Reihen von a, Spalten von b, Spalten von a
for i in range(a_rows):
for j in range(b_cols):
for k in range(a_cols):
c[i][j] += (a[i][k] * b[k][j])
result = c
return result | 8fcb7915419471b5f7527727fe5fa3dc3e2342d2 | 105,047 |
def _congruent(a: int, b: int, n: int) -> bool:
"""
Returns true if a is congruent to b modulo n.
"""
assert type(a) is int
return (a % n) == (b % n) | 73e9ded677a042792f6458cff7ceffb5932b532c | 105,050 |
def CanLen(input):
"""
Return whether it is valid to call len on the supplied input
"""
try:
len(input)
return True
except TypeError:
return False | 1471ad3b529eaa214d2cccbacf1e157c14561a7b | 105,052 |
import string
def fix_latex_command_regex(pattern, application='match'):
"""
Given a pattern for a regular expression match or substitution,
the function checks for problematic patterns commonly
encountered when working with LaTeX texts, namely commands
starting with a backslash.
For a pattern to be matched or substituted, and extra backslash is
always needed (either a special regex construction like \w leads
to wrong match, or \c leads to wrong substitution since \ just
escapes c so only the c is replaced, leaving an undesired
backslash). For the replacement pattern in a substitutions, specified
by the application='replacement' argument, a backslash
before any of the characters abfgnrtv must be preceeded by an
additional backslash.
The application variable equals 'match' if pattern is used for
a match and 'replacement' if pattern defines a replacement
regex in a re.sub command.
Caveats: let pattern just contain LaTeX commands, not combination
of commands and other regular expressions (\s, \d, etc.) as the
latter will end up with an extra undesired backslash.
Here are examples on failures:
>>> re.sub(r'\begin\{equation\}', r'\[', r'\begin{equation}')
'\\begin{equation}'
>>> # match of mbox, not \mbox, and wrong output:
>>> re.sub(r'\mbox\{(.+?)\}', r'\fbox{\g<1>}', r'\mbox{not}')
'\\\x0cbox{not}'
Here are examples on using this function:
>>> from scitools.misc import fix_latex_command_regex as fix
>>> pattern = fix(r'\begin\{equation\}', application='match')
>>> re.sub(pattern, r'\[', r'\begin{equation}')
'\\['
>>> pattern = fix(r'\mbox\{(.+?)\}', application='match')
>>> replacement = fix(r'\fbox{\g<1>}', application='replacement')
>>> re.sub(pattern, replacement, r'\mbox{not}')
'\\fbox{not}'
Avoid mixing LaTeX commands and ordinary regular expression
commands, e.g.:
>>> pattern = fix(r'\mbox\{(\d+)\}', application='match')
>>> pattern
'\\\\mbox\\{(\\\\d+)\\}'
>>> re.sub(pattern, replacement, r'\mbox{987}')
'\\mbox{987}' # no substitution, no match
"""
problematic_letters = string.ascii_letters if application == 'match' \
else 'abfgnrtv'
for letter in problematic_letters:
problematic_pattern = '\\' + letter
if letter == 'g' and application == 'replacement':
# no extra \ for \g<...> in pattern
if r'\g<' in pattern:
continue
ok_pattern = '\\\\' + letter
if problematic_pattern in pattern and not ok_pattern in pattern:
pattern = pattern.replace(problematic_pattern, ok_pattern)
return pattern | def77f6a896406d7b7af40ac1638b303ecf6b042 | 105,056 |
def remove_none_values(data) -> dict:
"""Remove any `None`-valued items from input dict and return a clean dict."""
return {key: value for key, value in data.items() if value is not None} | 71160ad68f6d050a380e3e98b32b74e8c5a9b7f9 | 105,057 |
def as_mutable_matrix(matrix):
"""
sympy sometimes converts matrices to immutable objects
this can be reverted by a call to .as_mutable()
this function provides access to that call as a function
(just for cleaner syntax)
"""
return matrix.as_mutable() | b574977aeafc62d54384bb8ae1fecc02740de9d0 | 105,058 |
import torch
def pad_avail(avails: torch.Tensor, pad_to: int) -> torch.Tensor:
"""Pad avails to `pad_to` size
:param avails: avails to be padded, should be (B,N,P) and we're padding P
:type avails: torch.Tensor
:param pad_to: nums of points we want
:type pad_to: int
:return: the padded avails (B,N,pad_to)
:rtype: torch.Tensor
"""
batch, num_els, num_points = avails.shape
pad_len = pad_to - num_points
pad = torch.zeros(batch, num_els, pad_len, dtype=avails.dtype, device=avails.device)
return torch.cat([avails, pad], dim=-1) | d6c34ccca00b7a41fc4c64970c595891bdafb075 | 105,071 |
def tokens_to_ids(tokenizer, tokens):
"""
Returns a dict of 'token: id' for tokens in a tokenizer.
"""
if hasattr(tokenizer, "token_to_id"):
return {t: tokenizer.token_to_id(t) for t in tokens}
else:
return {t: tokenizer.convert_tokens_to_ids(t) for t in tokens} | 21cd340378ed00a074fb1458876c58b5099ed64e | 105,073 |
def me(conn):
"""Return my GitHub account name."""
return conn.me().login | 5e2c48e001663cf240157209b75f454842228f5f | 105,074 |
def _atomReprAsInt(s: str) -> int:
"""Translate CLVM atom repr to int."""
if s.startswith("0x"):
return int(s, base=16)
elif s.startswith('"'):
return int.from_bytes(s[1:-1].encode("ascii"), "big")
return int(s) | 0fc8cf80b719e38b0d520fc415331978f036afa0 | 105,077 |
from pydantic import BaseModel # noqa: E0611
def get_elements_of_model_type(object_of_interest, type_of_interest):
"""
Return a flat list of a given type of pydantic object based on a presumed encompasing root object.
One warning. This object preserves the underlying object tree. So when you use this function do NOT recurse on the
results or you will end up with duplication errors.
"""
loi = []
if type(object_of_interest) == type_of_interest:
loi.append(object_of_interest)
# keep going
if type(object_of_interest) is list:
for item in object_of_interest:
loi.extend(get_elements_of_model_type(item, type_of_interest))
if isinstance(object_of_interest, BaseModel):
for field in object_of_interest.__fields_set__:
if field == '__root__':
continue
loi.extend(get_elements_of_model_type(getattr(object_of_interest, field), type_of_interest))
return loi | ba055cb323ea6783a9b63585bcd525e412bdbb25 | 105,079 |
def check_disjoint_filter(record_id:str, disjoint_id_sets:dict, record_ids_map:dict)->bool:
"""This function checks if the record_id contains any common ids with the disjoint datasets.
If a common ids is found, the check fails.
This function is used in filter.py.
Args:
record_ids (str): record id for a recording.
disjoint_id_sets (Dict[str, Set[str]]): dictionary that maps the ids along which the output dataset
will be disjoint to the set of ids included in the `disjoint_datasets`.
record_ids_map (Dict[str, Dict[str, str]]): dictionary to maps record_id to other ids like
speaker, lesson, line (or target-sentence).
Returns:
(bool): True if the ids associated with the record_id are not contained in any of
the `disjoint_ids_sets`. Otherwise, False.
"""
# assumes the check passes (not the safest initial assumption but it makes the logic cleaner)
pass_check = True
# names of the ids along which the output dataset will be disjoint
for id_name, dj_id_set in disjoint_id_sets.items():
disjoint_id = record_ids_map[record_id][id_name]
# if the id is contained in the id_set of the disjoint_datasets, the check fails
if disjoint_id in dj_id_set:
pass_check = False
break
return pass_check | 8d496b5018da783c9ea43afa19886a1e0f1aa5e7 | 105,081 |
from typing import Union
import pathlib
def create_directory(path: Union[pathlib.Path, str]) -> pathlib.Path:
"""Creates recursively the given directory under `path`.
Args:
path (str): Path of the directory, we want to create.
Returns:
pathlib.Path: The path.
"""
path = pathlib.Path(path)
if not path.is_dir():
path.mkdir(parents=True, exist_ok=False)
return path | 29fe921c7fd41d897241a370e4463125ada7de1f | 105,083 |
def get_text_or_binary(filename):
"""Read the first 1024 and attempt to decode it in utf-8. If this succeeds,
the file is determined to be text. If not, its binary."""
with open(filename, 'rb') as f:
chunk = f.read(1024)
try:
chunk.decode('utf-8')
return 'text/plain'
except UnicodeDecodeError:
return 'application/octet-stream' | 65dfe9147fd0bba1bf9a71097057900c3932c4f2 | 105,087 |
def stations_level_over_threshold(stations, tol):
"""Takes in a list of monitoring station objects, returns a list of tuples that contain the station and relative
water level, sorted by the relative water level"""
newlist = []
for station in stations:
if station.relative_water_level() != None and station.relative_water_level() > tol:
newlist.append((station,station.relative_water_level()))
return sorted(newlist, key = lambda tup:tup[1], reverse = True) | bafffbb31c17648a84d28b61c0b2d901aa7003c8 | 105,088 |
def is_abstract_class(cls):
""" Returns boolean telling whether given class is abstract or not.
A class is abstract if it has not implemented any abstractmethod or
abstractproperty of base classes.
"""
return bool(getattr(cls, "__abstractmethods__", False)) | d89acf617b30e78c8eda7b91b6453044425f38dd | 105,097 |
def json_decomment(json, prefix='#', null=False):
"""
Remove any JSON object emember whose name begins with 'prefix'
(default '#') and return the result. If 'null' is True, replace
the prefixed items with a null value instead of deleting them.
"""
if type(json) is dict:
result = {}
for item in json.keys():
if item.startswith(prefix):
if null:
result[item] = None
else:
next
else:
result[item] = json_decomment(json[item], prefix=prefix,
null=null)
return result
elif type(json) is list:
result = []
for item in json:
result.append(json_decomment(item, prefix=prefix, null=null))
return result
else:
return json | cf26288369994ab00b1afdcb2da22423b9598377 | 105,100 |
def unique(lst):
"""return list with unique elements from argument list"""
res = []
for l in lst:
if not l in res:
res.append(l)
return res | 0f587b204727ba6d1db268f679c2e1acf72fcaa8 | 105,107 |
def delete_duplicates(ls):
"""
Question 6.6: Delete duplicates from sorted array,
return number of elements remainint
"""
if not len(ls):
return 0
write_idx = 0
for idx, elt in enumerate(ls):
if idx > write_idx and elt != ls[write_idx]:
write_idx += 1
ls[write_idx] = elt
return write_idx + 1 | 1b9042c9bf5635168fce52bffeae6304510daacc | 105,110 |
from typing import Optional
def calculate_min_periods(
window: int,
min_periods: Optional[int],
num_values: int,
required_min_periods: int,
floor: int,
) -> int:
"""
Calculates final minimum periods value for rolling aggregations.
Parameters
----------
window : passed window value
min_periods : passed min periods value
num_values : total number of values
required_min_periods : required min periods per aggregation function
floor : required min periods per aggregation function
Returns
-------
min_periods : int
"""
if min_periods is None:
min_periods = window
else:
min_periods = max(required_min_periods, min_periods)
if min_periods > window:
raise ValueError(f"min_periods {min_periods} must be <= window {window}")
elif min_periods > num_values:
min_periods = num_values + 1
elif min_periods < 0:
raise ValueError("min_periods must be >= 0")
return max(min_periods, floor) | 985c637aa69a352eece8f32cdde8f79e8e1b890a | 105,115 |
import random
def setRandomParameters(net,seed=None,randFunc=random.random):
"""
Sets parameters to random values given by the function randFunc (by
default, uniformly distributed on [0,1) ).
"""
random.seed(seed)
net.setOptimizables( randFunc(len(net.GetParameters())) )
return net.GetParameters() | 628425843b0c683362ef8f479c410c1c68a4333c | 105,116 |
def med_fib(n):
"""Returns the nth fibonacci number. Fib(0) = 0 and Fib(1) = 1 for these purposes."""
return n if n < 2 else med_fib(n-1) + med_fib(n-2) | eb9b220987373b9e23f14647a16cb2e2da61f7bb | 105,122 |
def is_square( m ):
"""
Test whether a numpy matrix is square.
Args:
m (np.matrix): The matrix.
Returns:
(bool): True | False.
"""
return m.shape[0] == m.shape[1] | 015a347304a973f1425924997844afc872529af8 | 105,126 |
import calendar
def which_day_of_the_week(year: int, month: int, day: int) -> str:
"""determine which day of the week the year, month, day date is
>>> which_day_of_the_week(2016, 7, 29)
Friday
"""
weekdays = []
weekdays.append('Monday')
weekdays.append('Tuesday')
weekdays.append('Wednesday')
weekdays.append('Thursday')
weekdays.append('Friday')
weekdays.append('Saturday')
weekdays.append('Sunday')
return weekdays[calendar.weekday(year, month, day)] | 31c420a78e1def8e0d25b57ccf6143a2198a0210 | 105,129 |
def all_(*constraints):
"""All given button constraints must evaluate to true."""
return lambda form: all(constraint(form) for constraint in constraints) | a86e80957129b36690ede204b77c69214fcbec16 | 105,130 |
def invert_bitstring(string):
""" This function inverts all bits in a bitstring. """
return string.replace("1", "2").replace("0", "1").replace("2", "0") | 9e5e4ff6af107213bf4cab10a2069332fc50c3cc | 105,133 |
import inspect
import re
def get_caller(stack_index=2, root_dir=None):
# pylint: disable=g-doc-args
"""Returns file.py:lineno of your caller.
A stack_index of 2 will provide
the caller of the function calling this function. Notice that stack_index
of 2 or more will fail if called from global scope.
"""
caller = inspect.getframeinfo(inspect.stack()[stack_index][0])
# Trim the filenames for readability.
filename = caller.filename
if root_dir is not None:
filename = re.sub("^" + root_dir + "/", "", filename)
return "%s:%d" % (filename, caller.lineno) | c57a2f02abd8920393cbfdee9f4ca00a3fe51ba1 | 105,134 |
def load_txt(fname, mode='r'):
"""Loads a .txt file to memory.
Parameters
----------
fname : str
File name + path.
mode : str, optional
File open mode.
Returns
-------
list of strings
"""
assert fname, 'Must input a valid file name.'
with open(fname, mode=mode, encoding="utf-8") as f:
data = f.read()
split_lines = data.split('\n')
return split_lines[:-1] | d39d3d6449be9cec59f803286b2045caa03103bb | 105,136 |
def any2utf8(text, errors='strict', encoding='utf8'):
"""Convert a unicode or bytes string in the given encoding into a utf8 bytestring.
Parameters
----------
text : str
Input text.
errors : str, optional
Error handling behaviour if `text` is a bytestring.
encoding : str, optional
Encoding of `text` if it is a bytestring.
Returns
-------
str
Bytestring in utf8.
"""
if isinstance(text, str):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return str(text, encoding, errors=errors).encode('utf8') | 2ee30a96ef226699b840980da9d497d0954e5a06 | 105,137 |
def compute_rsi(data, time_window):
"""
Compute rsi values
:param data: dataframe for calculate rsi
:param time_window: time window, ex: 14 (for 14 data times)
:return:
"""
diff = data.diff(1).dropna() # drop na (if any)
# Get only the dimension of the differences
up_chg = 0 * diff
down_chg = 0 * diff
# Get changes
up_chg[diff > 0] = diff[diff > 0]
down_chg[diff < 0] = diff[diff < 0]
# Calculate averages
up_chg_avg = up_chg.ewm(com=time_window-1, min_periods=time_window).mean()
down_chg_avg = down_chg.ewm(com=time_window-1, min_periods=time_window).mean()
rs = abs(up_chg_avg / down_chg_avg)
rsi = 100 - 100 / (1 + rs)
return rsi | 5c5a95e0529ee7b5e6ad9f7684aa0ba9c356b1dd | 105,138 |
def _write_instance(entity, value, sep = '&'):
"""Helper function to write the search string.
Helps to write the search string, can handle lists and values.
The different entities have to be known and can be found out with the documentation.
Parameters
----------
entity : string
The name of the search Parameters.
value : string or list
The values corresponding to the entity.
sep : string
The separator between the search parameters and values, most times OS takes '&'.
Returns
-------
string
String with the 'entities=values&....', such that Openspecimen can dissolve it.
"""
instance= ''
if isinstance(value,list):
for val in value:
instance += str(entity) + '=' + str(val).replace(' ', '+') + str(sep)
else:
instance += str(entity) + '=' + str(value).replace(' ', '+') + str(sep)
return instance | b09665294e830597a3825cdbd97bcb030f1aaada | 105,141 |
def checkdataremains(func):
"""Decorate a filtering method, to raise a ValueError if all data filtered."""
def wrapper(*args, **kwargs):
reflections = func(*args, **kwargs)
if not reflections:
raise ValueError("All data has been filtered from the reflection table")
return reflections
return wrapper | 7a1f05dd523f87c3536b84919dd0c732e25ed0f8 | 105,147 |
def normalizeLongitude(longitude):
"""
Normalize a longitude into the range -180 to 180, not including 180.
Args:
longitude: A longitude in signed decimal degrees.
"""
while longitude < -180:
longitude = longitude + 360
while longitude >= 180:
longitude = longitude - 360
return longitude | 4a5c92926b45109179a54ca96613c625afda9df1 | 105,152 |
def _user_can_edit(user, group_profile):
"""Can the given user edit the given group profile?"""
return user.has_perm("groups.change_groupprofile") or user in group_profile.leaders.all() | 9ebcea9592b821fe3ec3a25186204342c119815c | 105,157 |
def conjx(x, *args):
"""Return the conjugate of x multiplied by arguments *args."""
result = x.conjugate()
for y in args:
result *= y
return result | cb95db70b4f59667f435baf6dcf6d7c665a28d76 | 105,159 |
import ipaddress
def getBroadCast(inIP):
"""Return broadcast IP."""
myNet = ipaddress.ip_network(str(inIP), strict=False)
return str(myNet.broadcast_address) | 96e3ad05af124b41fd7868646de5769cb6c7a75b | 105,161 |
from typing import Union
from pathlib import Path
def read_file(path: Union[Path, str], mode='r') -> str:
"""Get contents of given file as text string."""
with Path(path).open(mode) as f:
return f.read() | a85a9445d9ac0daa99859fcc22e8048d6f7e8864 | 105,163 |
def _get_pattern_strings(compiled_list):
"""Returns regex pattern strings from a list of compiled regex pattern objects.
Args:
compiled_list (list): of regular expression patterns to extract strings
from
Returns:
list: A list of regex pattern strings extracted from compiled list of
regex pattern objects.
"""
return [pattern.pattern for pattern in compiled_list] | 555087d6fc0548a149dfec4acbc1fa1643b381ae | 105,166 |
def _allocate_expr_id(allocator, exprmap):
""" Allocate a new expression id checking it is not already used.
Args:
allocator: Id allocator
exprmap: Map of existing expression names
Returns:
New id not in exprmap
"""
id = allocator.allocate()
while id in exprmap:
id = allocator.allocate()
return id | 98321e169d354e97e650e0ea9a5235315529c1dc | 105,167 |
def chirp_mass_and_total_mass_to_symmetric_mass_ratio(chirp_mass, total_mass):
"""
Convert chirp mass and total mass of a binary to its symmetric mass ratio.
Parameters
----------
chirp_mass: float
Chirp mass of the binary
total_mass: float
Total mass of the binary
Return
------
symmetric_mass_ratio: float
Symmetric mass ratio of the binary
"""
return (chirp_mass / total_mass) ** (5 / 3) | 0a531cf5a875cca9ef337ca233f00564f205a6d3 | 105,168 |
def quoteList(strns):
"""Given a list of strings, return a single string like '"string1", "string2",...'
Note: in SQLite, double quotes are for escaping table and column names;
single quotes are for string literals.
"""
return ','.join(['"'+s+'"' for s in strns]) | 82b56c96e822e80524a1b14aae033d5c7a78685a | 105,169 |
def id_2_name(sub) -> dict:
"""
Create Mapping for id to sub name
:param sub: Tuple of one subscription
:return: mapping_dict
"""
return {sub.subscription_id: sub.subscription_name} | 91fc36331e372d405d9ef816b479a7e0707cb2d4 | 105,170 |
def split_container_name(name):
"""
Takes a container name (e.g. samtools:1.7--1) and returns a list (e.g. ['samtools', '1.7', '1'])
>>> split_container_name('samtools:1.7--1')
['samtools', '1.7', '1']
"""
return name.replace("--", ":").split(":") | 3b5c60d492f5bfb3c11d358cb051ed7f8e47cfee | 105,173 |
import re
def cut_section(start, end, s, position=0):
"""
extract the piece of text between start pattern and end pattern starting at
position <position>
returns a tuple (subsection, end-position)
"""
if start is None and end is None:
return (s, 0)
if start is not None:
start_re = re.compile(start, re.DOTALL)
start_m = start_re.search(s, position)
if start_m:
_s = start_m.start()
else:
_s = position
else:
_s = position
if end is not None:
end_re = re.compile(end, re.DOTALL)
end_m = end_re.search(s, _s)
if end_m:
_e = end_m.end()
else:
_e = len(s)
else:
_e = len(s)
result = (s[_s:_e], _e)
return result | 4ce24b0397ee436a373ce1b7353185f938f63828 | 105,176 |
import requests
def breeding_compatibility(pokemon_species_url: str, pokemon_name: str) -> int:
"""Function to get the number of breeding matches
for a specified pokemon.
Args:
pokemon_species_url (str): URL to get pokemon's base
form data from pokeAPI.
pokemon_name (str): Pokemon name to search for breeding matches
Returns:
int: Number of breeding matches for specified pokemon.
"""
matches = set()
pokemon_egg_group_data = requests.get(url=pokemon_species_url + pokemon_name)\
.json()['egg_groups']
egg_groups_urls = [egg_group['url'] for egg_group in pokemon_egg_group_data]
for url in egg_groups_urls:
egg_group_data = requests.get(url=url).json()
matches |= {poke['name'] for poke in egg_group_data['pokemon_species']}
return len(matches) | 3610d7f9c1c230cb62b4f2fac6391c21141d4cff | 105,177 |
def convective_facex(gridx, gridy, ivar):
"""Convection operator for the x-face grid.
Arguments
---------
gridx : grid object (x-direction)
Grid containing data in x-direction.
gridy : grid object (y-direction)
Grid containing data in y-direction.
ivar: string
Name of the grid variable to be operated on.
Returns
-------
F : numpy.ndarray
Convective terms in the x direction as an array of floats.
"""
u = gridx[ivar][0,0,:,:]
v = gridy[ivar][0,0,:,:]
dx, dy = gridx.dx, gridy.dy
u_P = u[1:-1, 1:-1]
u_W = u[1:-1, :-2]
u_E = u[1:-1, 2:]
u_S = u[:-2, 1:-1]
u_N = u[2:, 1:-1]
v_sw = v[:-1, 1:-2]
v_se = v[:-1, 2:-1]
v_nw = v[1:, 1:-2]
v_ne = v[1:, 2:-1]
F = - (((u_P + u_E)**2 - (u_W + u_P)**2) / (4 * dx) +
((u_P + u_N) * (v_nw + v_ne) -
(u_S + u_P) * (v_sw + v_se)) / (4 * dy))
return F | 73edf55a521be652738ccdd950d59137e09740ad | 105,179 |
from typing import Dict
def stream_success(output_line: Dict, ignore_exceptions: bool) -> Dict:
"""Add `_jc_meta` object to output line if `ignore_exceptions=True`"""
if ignore_exceptions:
output_line.update({'_jc_meta': {'success': True}})
return output_line | 7a1e438633eb2de020e7af86142e9edbd3b03e81 | 105,182 |
def end_of_block(text, pos):
"""Return the position of the closing '}' starting from pos
"""
assert text[pos] == '{'
cnt = 1; i = pos
while cnt > 0:
k = text.find('{', i + 1)
i = text.find('}', i + 1)
if k >= 0 and k < i:
cnt += 1
i = k
else:
cnt -= 1
return i | 59df8f3a2080efe8285b85553becf041b76a4107 | 105,183 |
def combine_address_row(row):
""" Combines street addresses on a row level
Args:
one row that contains variables "Street Prefix", "Street Number",
"Street Name", "Street Suffix"
Returns:
str with one combined address
"""
if row['Street Prefix'] == "":
ans = row['Street Number'] + ' ' + row['Street Name'] + ' ' + row['Street Suffix']
else:
ans = row['Street Number'] + ' ' + row['Street Prefix'] + ' ' + \
row['Street Name'] + ' ' + row['Street Suffix']
return ans.strip() | 3241f0a3e56e46e5b8b01d1a1df134df700d03cf | 105,187 |
def get_release_date(data):
"""Get release date."""
date = data.get("physicalRelease")
if not date:
date = data.get("inCinemas")
return date | 65066b370c91b7b47853e1348e3022af9522108c | 105,190 |
import re
def gettag_name(tagstr):
"""
>>> gettag_name('%<topic type=danxuan description=单选题>')
'topic'
>>> gettag_name('</topic>')
'topic'
"""
m = re.search(r'</?([a-zA-Z_]+)', tagstr)
if m:
return m.group(1)
else:
return None | 16720b61261d67178508f7ab74f41dd75f342faa | 105,192 |
def representative_feature(path, values):
"""Helper function for TSEL filter. Returns the representative node of a
given path.
Args:
path (list): Path containing some node names.
values (dict): values containing nodes and their values.
Returns:
str: Name of most valuable/representative node of the given path.
"""
max_value = -1
rep_node = None
for node in path:
if node == "VRN":
continue
if values[node] > max_value:
max_value = values[node]
rep_node = node
return rep_node | 6e8119e0f83106f2b023b46acd4d00b092e5e715 | 105,195 |
def sec2days(seconds):
"""Seconds to number of days"""
return seconds / (24.0 * 3600) | 6052b2136b4e0ff925f34b305e421af77126947e | 105,197 |
import re
def camelize_classname(base, tablename, table):
""" Produce a 'camelized' class name, e.g.
Converts a database table name to camelcase. Uses underscores to denote a
new hump. E.g. 'words_and_underscores' -> 'WordsAndUnderscores'
see https://docs.sqlalchemy.org/en/13/orm/extensions/automap.html#overriding-naming-schemes
Parameters
----------
base : ~sqlalchemy.ext.automap.AutomapBase
The AutomapBase class doing the prepare.
tablenname : str
The string name of the Table
table : ~sqlalchemy.schema.Table
The Table object itself
Returns
-------
str
A string class name
"""
return str(tablename[0].upper() + re.sub(r'_([a-z])',
lambda m: m.group(1).upper(),
tablename[1:])) | fd06648269431e458daa57a73a4d521788568c30 | 105,202 |
import re
def get_object_from_line(line, prefix, column):
"""Returns the object name after the prefix
``line`` -- Text in the line where cursor is.
``prefix`` -- Prefix determined by sublime.
``column`` -- Index of the cursor in the line.
"""
re_str = r'(?:\s)([^\s]+)(?:\.{0})$'.format(prefix)
match = re.search(re_str, line[:column])
if match:
return match.group(1)
else:
return None | d3e21da8d71f46faf841b1e9ceecf44ab8204df1 | 105,207 |
def average(list):
"""Computes the average of any input list, in our case the list of energies
:param list: Any list (of numbers)
:type list: list
:return: The arithmetic average of the list
:rtype: float or int
"""
sum=0
for i in list:
sum+=i
return sum/len(list) | 667cb384bada6b0bc6583179506960e76f46495b | 105,208 |
def recall_at_n(ranks, n=3):
"""
Calculate recall @ N
Function taken from: https://github.com/google/retrieval-qa-eval/blob/master/squad_eval.py
Args:
-----
ranks: (list) predicted ranks of the correct responses
return:
-------
Recall@N: (float)
"""
num = len([rank for rank in ranks if rank <= n])
return num / len(ranks) | c9b26a15ebf27e2e1783402a82682f422a49ea2c | 105,210 |
from jinja2.runtime import Undefined
def domainname(string):
"""Return just the domain portion of *string* or ``Undefined``."""
try:
return string.split('.', 1)[1]
except IndexError:
return Undefined(hint=f"No domain in {string!r}") | f3018eae4a256a132bb0ad076f426dfe872ee038 | 105,211 |
def read_f1(f):
"""Given a micro_f1 file, read precision, recall and F1"""
with open(f, 'r') as fin:
s = fin.readline()
splits = s.split('\t')
return float(splits[2]), float(splits[3]), float(splits[4]) | b46b4f8f22051efd3ba9d963360cbc63042179c8 | 105,212 |
def MinMaxScaler(X):
"""
(X-X.min())/(X.max()-X.min())
Parameters
-----------
X : 2d array-like shape(n_samples, n_features)
Returns
----------
X : 2d array-like shape(n_samples, n_feature)
values will be in [0,1]
"""
return (X - X.min(axis=0))/(X.max(axis=0)-X.min(axis=0)) | 6d77a6be1f20c41a5ec1ac99dff2bdb151b170ae | 105,215 |
def refactor_descriptor(descriptor, type_policy, distance_policy, charge_policy):
"""
Refactors/discretizes the provided descriptor according to the given policies.
:param descriptor: molecular descriptor
:param type_policy: a atom type policy
:param distance_policy: a distance policy
:param charge_policy: a charge policy
:return: the refactored descriptor with feature values discretized
"""
new_descriptor = []
for curr_atom_types, curr_distances, curr_partial_charges in descriptor:
new_atom_types = [type_policy.discretize(a) for a in curr_atom_types]
new_distances = [distance_policy.discretize(d) for d in curr_distances]
new_partial_charges = [charge_policy.discretize(c) for c in curr_partial_charges]
new_descriptor.append((new_atom_types, new_distances, new_partial_charges))
return new_descriptor | eda94fea8b7bfb2067ab36513be2880604d29576 | 105,223 |
import torch
def synthetic_data(w, b, num_examples):
"""
Generate y = Xw + b + noise.
"""
X = torch.normal(0, 1, (num_examples, len(w)))
y = torch.matmul(X, w) + b
y += torch.normal(0, 0.01, y.shape)
return X, y.reshape((-1, 1)) | bde8d38eefedd808a3fa87e4cdf52f5f868210ec | 105,227 |
def amiroot(dev):
""" Checks if we are root user or not
Args: dev(obj): pyez connection to device
Returns: True if we are root, False if not
Raises: None
"""
result = False
op_cli = dev.rpc.request_shell_execute(command="/usr/bin/whoami")
if hasattr(op_cli, 'xpath'):
user = op_cli.xpath('.')
result = bool(user[0].text.strip('\n') == 'root')
return result | eed737f8cbc1765287e21a86c68a3e5864a8d8e8 | 105,228 |
def _convert_to_dataset_class(df, dataset_class, expectations_config=None, autoinspect_func=None):
"""
Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectations_config
"""
if expectations_config is not None:
# Cast the dataframe into the new class, and manually initialize expectations according to the provided configuration
df = dataset_class(df)
df._initialize_expectations(expectations_config)
else:
# Instantiate the new Dataset with default expectations
try:
df = dataset_class(df, autoinspect_func=autoinspect_func)
except:
raise NotImplementedError(
"read_csv requires a Dataset class that can be instantiated from a Pandas DataFrame")
return df | 460baf6c529b2aef0e05a18d6167918085950b11 | 105,232 |
def _check_availability(name):
"""
Test that the source and target modules for *name* are
available and return them.
:raise ImportError: If the source or target cannot be imported.
:return: The tuple ``(gevent_module, target_module, target_module_name)``
"""
# Always import the gevent module first. This helps us be sure we can
# use regular imports in gevent files (when we can't use gevent.monkey.get_original())
gevent_module = getattr(__import__('gevent.' + name), name)
target_module_name = getattr(gevent_module, '__target__', name)
target_module = __import__(target_module_name)
return gevent_module, target_module, target_module_name | f5d82ced55b900bf1adcf65d43aa1e2c1bc22959 | 105,237 |
def _vmomentsurfaceIntegrand(vz,vR,vT,R,z,df,sigmaR1,gamma,sigmaz1,n,m,o): #pragma: no cover because this is too slow; a warning is shown
"""Internal function that is the integrand for the vmomentsurface mass integration"""
return vR**n*vT**m*vz**o*df(R,vR*sigmaR1,vT*sigmaR1*gamma,z,vz*sigmaz1,
use_physical=False) | 592e11597dfd026349c063960c8f7e1d94ed95b9 | 105,239 |
def _call_if_not_instance_of(key, cls, *args, **kwargs):
""" If key is not an instance of cls, call it with *args and **kwargs, otherwise just return it. """
return key(*args, **kwargs) if not isinstance(key, cls) else key | bacba5793d29589cf776630f4bc35c22544a4760 | 105,242 |
def _stringify(s):
"""Convert input to string, wrap with quotes if already a string"""
return '"{}"'.format(s) if isinstance(s, str) else str(s) | 56ab8ff17d02d8cca46c7c84b1c06930407c51b9 | 105,244 |
def csv(arg):
"""Returns a list from a `csv` input argument.
"""
return [x.strip() for x in arg.split(',')] | 1c53a32fac6b3f10a7d86f9abd3efefcb304482b | 105,245 |
def are_domains_equal(domain1, domain2):
"""Compare two International Domain Names.
:Parameters:
- `domain1`: domains name to compare
- `domain2`: domains name to compare
:Types:
- `domain1`: `str`
- `domain2`: `str`
:return: True `domain1` and `domain2` are equal as domain names."""
domain1 = domain1.encode("idna")
domain2 = domain2.encode("idna")
return domain1.lower() == domain2.lower() | b446ee4931c125a5fac7699b15fe546e780fcb9f | 105,248 |
def hsb_to_rgb(h, s, v, a):
"""Simple hsv to rgb conversion. Assumes components specified in range 0.0-1.0."""
tmp = h * 5.9999
hi = int(tmp)
f = tmp - hi
p = v * (1 - s)
q = v * (1 - f * s)
t = v * (1 - (1 - f) * s)
if hi == 0:
r, g, b = v, t, p
elif hi == 1:
r, g, b = q, v, p
elif hi == 2:
r, g, b = p, v, t
elif hi == 3:
r, g, b = p, q, v
elif hi == 4:
r, g, b = t, p, v
else:
r, g, b = v, p, q
return r, g, b, a | 5b27258f9505729095c832505c0ded40cc678362 | 105,259 |
from pathlib import Path
def _resolve_path(directory: str) -> Path:
"""Return directory as absolute Path.
Arguments:
directory {str} -- Path to directory, possibly relative.
Returns:
Path -- Absolute Path.
"""
path = Path(directory)
return path.resolve() | 9f586fd3d2ab1755ec341429d361901f5fe12555 | 105,260 |
def strip_sources(sources, parent_len):
"""Given a list of source directories and a common parent
directory, strip the source directories to their locations
relative to the common parent"""
result = []
for source in sources:
new_source = ""
for directory in enumerate(source.split("\\")):
if directory[0] >= (parent_len-1):
new_source += (directory[1]) + "\\"
result.append(new_source)
return result | 3ae6d722c87dc4d9d7f8c4ed03c92fa8974eb60b | 105,261 |
def get_projection_slices(image, shape, yx0=None):
"""Get slices needed to project an image
This method returns the bounding boxes needed to
project `image` into a larger image with `shape`.
The results can be used with
`projection[bb] = image[ibb]`.
Parameters
----------
image: array
2D input image
shape: tuple
Shape of the new image.
yx0: tuple
Location of the lower left corner of the image in
the projection.
If `yx0` is `None` then the image is centered in
the projection.
Returns
-------
bb: tuple
`(yslice, xslice)` of the projected image to place `image`.
ibb: tuple
`(iyslice, ixslice)` of `image` to insert into the projection.
bounds: tuple
`(bottom, top, left, right)` locations of the corners of `image`
in the projection. While this isn't needed for slicing it can be
useful for calculating information about the image before projection.
"""
Ny, Nx = shape
iNy, iNx = image.shape
if yx0 is None:
y0 = iNy // 2
x0 = iNx // 2
yx0 = (-y0, -x0)
bottom, left = yx0
bottom += Ny >> 1
left += Nx >> 1
top = bottom + iNy
yslice = slice(max(0, bottom), min(Ny, top))
iyslice = slice(max(0, -bottom), max(Ny - bottom, -top))
right = left + iNx
xslice = slice(max(0, left), min(Nx, right))
ixslice = slice(max(0, -left), max(Nx - left, -right))
return (yslice, xslice), (iyslice, ixslice), (bottom, top, left, right) | 74fffcb95f2338efaf8a56d826602febb50d601e | 105,262 |
def wrap(value, wrapper='"'):
"""Jinja2 map filter to wrap list items in a string on both sides.
E.g.: ['a', 'b', 'c'] -> ['"a"', '"b"', '"c"']
"""
return wrapper + value + wrapper | d3b6fdce921d924bba263323c2f12863182f4d2d | 105,263 |
def duplicate(string, times):
"""Returns a string that is the concatenation of the given string repeated the
given number of times."""
result = ""
for dummy in range(0, times):
result += string
return result | 09777946163686479d6fa789443c41cd4653a246 | 105,265 |
from warnings import warn
import torch
def extract_mean_std(hnet_outputs, return_logvar=False):
"""Extract mean and standard deviation for a multivariate Gaussian
distribution with diagonal covaiance matrix from a hypernetwork that outputs
mean and log-variance.
.. deprecated:: 1.0
Please use a main network wrapper such as
:class:`probabilistic.gauss_mnet_interface.GaussianBNNWrapper` or the
function :func:`decode_diag_gauss` rather than working with
the hypernet output directly.
Args:
hnet_outputs: See docstring of method :func:`sample_diag_gaus_weights`.
return_logvar (optional): If set, a third value will be returned,
corresponding to the log-variance.
Returns:
Two lists of tensors: `mean` and `std`.
"""
warn('Please use a main network wrapper such as class' +
'"probabilistic.gauss_mnet_interface.GaussianBNNWrapper" or the' +
'function "decode_diag_gauss" rather than working with ' +
'the hypernet output directly.', DeprecationWarning)
assert(len(hnet_outputs) % 2 == 0)
n = len(hnet_outputs) // 2
mean = hnet_outputs[n:]
logvar = hnet_outputs[:n]
std = [torch.exp(0.5 * logvar[i]) for i in range(n)]
if return_logvar:
return mean, std, logvar
return mean, std | 6c0c748b3da4d37949f7a9d4552f3205fdbfd9c6 | 105,266 |
def _EXAMPLE(s):
"""Helper to provide uniform appearance for examples in cmdline options"""
return ", e.g. %r" % s | e48fff1b6138ce7d862af19db8d9064aa68bcfd6 | 105,280 |
def even_chunker(seq, n_chunks):
"""Given a sequence, returns that sequence divided into n_chunks of (roughly) the same size.
In other words, it returns a list of lists.
When len(seq) is evenly divisible by n_chunks, all chunks are the same size.
When len(seq) is not evenly divisible by n_chunks, all chunks have a length within +/- 1 of
all other chunks.
Some examples of the length of the return chunks for len(seq) == 100:
n_chunks == 3: [33, 33, 34]
n_chunks == 5: [20, 20, 20, 20, 20]
n_chunks == 6: [16, 17, 17, 16, 17, 17]
n_chunks == 7: [14, 14, 14, 15, 14, 14, 15]
n_chunks == 8: [12, 13, 12, 13, 12, 13, 12, 13]
n_chunks == 9: [11, 11, 11, 11, 11, 11, 11, 11, 12]
n_chunks == 15: [6, 7, 7, 6, 7, 7, 6, 7, 7, 6, 7, 7, 6, 7, 7]
"""
length = len(seq)
return [seq[i * length // n_chunks: (i + 1) * length // n_chunks]
for i in range(n_chunks)] | 936c4c262644eec119eec9d73a6f61171a584408 | 105,281 |
def growth_rate_dependency_to_temperature(last_24_canopy_t):
"""
Equations 9.28
growth_rate = 0.0047 * last_24_canopy_t + 0.06
Returns: growth rate dependency to temperature [-]
"""
return 0.0047 * last_24_canopy_t + 0.06 | d223af0a4d35f2aca6efdd5bbf9e3a5c81042b5c | 105,284 |
def make_vocab(vocab_file):
"""Convert a file of newline separated words into a Python list and return it."""
vocab = []
with open(vocab_file, 'r') as v:
vocab = v.read().splitlines()
return vocab | ea3068067200090639cf99488fe7760d0c703435 | 105,285 |
import copy
def insertion_sort(data, reverse=False):
"""
Implement insertion sort.
:param data: list data
:param reverse: if reverse is True, DESC, else ASC.
:return: list data
"""
iter(data)
copy_data = copy.deepcopy(data)
if len(copy_data) < 2:
return copy_data
for i in range(1, len(copy_data)):
for j in range(i):
great_or_less = copy_data[i] > copy_data[j] if reverse else copy_data[i] < data[j]
if great_or_less:
temp = copy_data[i]
copy_data[j + 1:i + 1] = copy_data[j:i]
copy_data[j] = temp
return copy_data | b755d1dbc356276edd3323992aea88e3d31d0145 | 105,287 |
def _d_print(inputs, name: str = 'Foo'):
"""Print shape of inputs, which is tensor or list of tensors."""
is_list = isinstance(inputs, (list, tuple))
print('{}: ({})'.format(name, 'List' if is_list else 'Single'))
if not is_list:
print(' ', inputs.dtype, inputs.shape, inputs.name, flush=True)
else:
for ten in inputs:
print(' ', ten.dtype, ten.shape, ten.name, flush=True)
print('', flush=True)
return inputs | 947477077c7f50a8be45371e6a3806b8db858286 | 105,289 |
def firstAvailable(colorSet):
"""Return smallest non-negative integer not in the given set of colors."""
count = 0
while True:
if count not in colorSet:
return count
count += 1 | 737619149b87e35795a750cdf435bc85daeed6f2 | 105,291 |
def element_to_int(element, attribute=None):
"""Convert ``element`` object to int. If attribute is not given, convert ``element.text``.
:param element: ElementTree element
:param attribute: attribute name
:type attribute: str
:returns: integer
:rtype: int
"""
if attribute is not None:
return int(element.get(attribute))
else:
return int(element.text) | 0c59a129e460bbad888ebdc9ad789683e2b2f2b9 | 105,298 |
def license_mapper(package_data, package):
"""
Update package licensing and return package. Licensing structure for FreeBSD
packages is a list of (non-scancode) license keys and a 'licenselogic' field.
"""
license_logic, licenses = package_data.get('licenselogic'), package_data.get('licenses')
if not licenses:
return
# licenselogic is found as 'or' in some cases in the wild
if license_logic == 'or' or license_logic == 'dual':
lics = [l.strip() for l in licenses if l and l.strip()]
lics = ' OR '.join(lics)
# licenselogic is found as 'and' in some cases in the wild
elif license_logic == 'and' or license_logic == 'multi':
lics = [l.strip() for l in licenses if l and l.strip()]
lics = ' AND '.join(lics)
# 'single' or default licenselogic value
else:
lics = [l.strip() for l in licenses if l and l.strip()]
lics = ', '.join(lics)
package.declared_license = lics or None
return package | 834a0e65f8ff74203281498bf66dda7a9fb57791 | 105,300 |
def first_sentence(str):
"""
Return the first sentence of a string - everything up to the period,
or the whole text if there is no period.
>>> first_sentence('')
''
>>> first_sentence('Incomplete')
''
>>> first_sentence('The first sentence. This is ignored.')
'The first sentence.'
"""
return str[0:str.find('.') + 1] | 576021b14a78877c92a8e3e40974cbcadf8e19e1 | 105,305 |
from datetime import datetime
def convert_time_string(date_str):
""" Change a date string from the format 2018-08-15T23:55:17 into a datetime object """
dt, _, _ = date_str.partition(".")
dt = datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S")
return dt | 371ef15f7834ae2c36757421bbf7da47dec2331e | 105,306 |
def _sequence_to_latex(seq, style='ket'):
"""
For a sequence of particle states generate LaTeX code.
Parameters
----------
seq : list of ints
List of coordinates for each particle.
style : 'ket' (default), 'bra' or 'bare'
Style of LaTeX (i.e. |01> or <01| or 01, respectively).
Returns
-------
latex : str
LaTeX output.
"""
if style == 'ket':
latex = "$\\left|{0}\\right\\rangle$"
elif style == 'bra':
latex = "$\\left\\langle{0}\\right|$"
elif style == 'bare':
latex = "${0}$"
else:
raise Exception("No such style.")
return latex.format("".join(map(str, seq))) | 343dfc0d73d1d25456c15474d4314fd7daa8ed56 | 105,308 |
def _cross_np(fld_a, fld_b):
"""cross product of two vector fields"""
ax, ay, az = fld_a.component_views()
bx, by, bz = fld_b.component_views()
prodx = ay * bz - az * by
prody = -ax * bz + az * bx
prodz = ax * by - ay * bx
return fld_a.wrap([prodx, prody, prodz]) | f57cb9cefa90b04a24637fd63ba83cf62ad436c9 | 105,313 |
def last_n_average_threshold(threshold, n, utilization):
""" The averaging CPU utilization threshold algorithm.
:param threshold: The threshold on the CPU utilization.
:type threshold: float,>=0
:param n: The number of last CPU utilization values to average.
:type n: int,>0
:param utilization: The history of the host's CPU utilization.
:type utilization: list(float)
:return: The decision of the algorithm.
:rtype: bool
"""
if utilization:
utilization = utilization[-n:]
return sum(utilization) / len(utilization) > threshold
return False | 6fa6c4a0ce44b57ef5d9c76ce26eef37cc11f5c7 | 105,318 |
def convert_ms_to_frames(chunk_size_ms, framerate):
"""
Convert chunk size in milleconds to chunk size in number of frames
Framerate is in hz (cycles per second), chunk_size is in ms
So we need to multiply framerate by chunk_size_ms/1000 to get the
chunk size in frames. Round down to nearest int.
"""
return int(framerate * (chunk_size_ms / 1000.0)) | 7ad93781a88e55bc23405556f0d24fac23b484b9 | 105,319 |
def to_tuple(set_sentences):
"""
Change each sentence to tuple of words.
INPUT: set_sentences a set of sentences
OUTPUT: set_tuples a set of corresponding tuples
"""
result = set()
for sentence in set_sentences:
result.add(tuple(sentence.split()))
return result | ad6fb70fa1b8c851bbd83d26286c682c73583104 | 105,320 |
import socket
def port_is_free(port: int) -> bool:
"""Determine if a port is in use"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", port))
return True
except OSError:
return False
finally:
s.close() | c74e1a05e356206217e49742edc5ee8290a0d2b1 | 105,321 |
from functools import reduce
def compose(*trafos):
"""Compose a sequence a transformations."""
return reduce(lambda a, b: a(b), trafos) | 6d87f0fad0952ec4c8634c802d18dc7beebf60d9 | 105,328 |
from typing import List
def obtener_texto(ruta: str) -> List[str]:
"""Lee un archivo de texto y devuelve una lista de líneas
:param ruta: Ruta del archivo a leer
:ruta type: str
:return: Lista de líneas del archivo
:ruta type: List[str]
"""
with open(ruta, encoding="utf-8") as f:
return f.read().splitlines() | a3590b747343fea5b31e76a694f329e75056fa7f | 105,331 |
def lemmatise(tokens):
"""
Lemmatise nested list of tokens
Parameters
----------
tokens: list
a nested list containing lists of tokens or a list of spacy dcs
Returns
-------
lemmas: list
a nested list of lemmas
"""
lemmas = [[word.lemma_ for word in comment] for comment in tokens]
return lemmas | 292c53c066da41911bc2d7e77cec120581c7c7ad | 105,332 |
from pathlib import Path
import inspect
def _get_default_state_path(gui):
"""Return the path to the default state.json for a given GUI."""
gui_path = Path(inspect.getfile(gui.__class__))
path = gui_path.parent / 'static' / 'state.json'
return path | 0ef478be3391bacb667864f46f6ea4c9cae4bc44 | 105,338 |
import re
def parse_for_order_by_desc(sql, col_name):
"""Checks whether the SQL statement contains an ORDER BY clause for the
specified column name, followed by DESC (meaning descending order);
returns True if so, otherwise False.
"""
desc_expr = re.compile(r"ORDER BY([\s\w.]*,)*\s+(\w+\.)?" + col_name.upper() + "\s+DESC")
result = desc_expr.search(sql)
if result:
return True
else:
return False | dc616f4e6584b51e313cb5e9cf457d015cecf56a | 105,340 |
def read1(filename):
""" read tex file into lines for processing
"""
f = open(filename)
lines = f.readlines()
f.close()
return lines | 3987724a82b03b3b55872c1d0eec5a3bb35499db | 105,343 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.