content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import requests
def consume(session: requests.Session, response: requests.Response, key="items"):
"""
Consume all pages of an API request.
"""
items = response.json()[key]
while "_next" in response.json()["_links"]:
response = session.get(response.json()["_links"]["_next"])
response.raise_for_status()
items.extend(response.json()[key])
return items | cc77a0d47a8568edff9a112796fcec2781e3f075 | 44,631 |
from typing import Dict
import os
def set_build_tools(triplet: str) -> Dict:
"""
Sets the environment for compilation. This will actually set
TOOL=triplet+tool where tool can be gcc, g++, etc...
:param triplet: a triplet in the form "riscv64-linux-gnu"
:return a dictionary corresponding to the environment
"""
env = os.environ.copy()
env["CC"] = triplet + "-gcc"
env["CXX"] = triplet + "-g++"
env["LD"] = triplet + "-ld"
env["AR"] = triplet + "-ar"
env["AS"] = triplet + "-as"
env["STRIP"] = triplet + "-strip"
env["RANLIB"] = triplet + "-ranlib"
return env | ed6d1844995ad307ddb095684ed2ae1b8a147bfa | 44,633 |
import sys
import os
def get_system_info():
"""
Return os and desktop information
Param: None
Return: os_name - name of the current operating system
- linux, windows, osx, unknown
desktop_env - name of the desktop environment
- gnome, unity, aqua, Explorer, Unknown
"""
sys_type= sys.platform.lower()
if sys_type == "linux":
os_name = sys_type
desktop_env = os.environ.get("DESKTOP_SESSION", "unknown").lower()
elif sys_type == "win32":
os_name = 'windows'
desktop_env = "explorer"
elif sys_type == "darwin":
os_name = "osx"
desktop_env = "aqua"
else:
os_name = "unknown"
desktop_env = "unknown"
return os_name, desktop_env | 16aed55d8252f2d60cc7a889658eb096991f782c | 44,635 |
def base26(w):
"""Convert string into base26 representation where a=0 and z=25."""
val = 0
for ch in w.lower():
next_digit = ord(ch) - ord('a')
val = 26*val + next_digit
return val | b929ec5c9dcfbc4254fc0f4d646ac24537c22b72 | 44,636 |
def grade_distribution(parsed_list, max_grade, bin_number=10):
"""
This funtion calculates the distribution of the given grades by splitting
them into 'n' equal bins (intervals) and finding the number of grades
corresponding to each bin. The bins are left-closed, right-open:
[a, b) = x | a <= x < b, except from the last one that is closed:
[c, d] = x | c <= x <= d.
:param parsed_list: the parsed list of the grades
:param max_grade: the maximum grade that you can score
:param bin_number: the number of bins that is calculated in the
distribution, default is 10
:return: a list of the number of grades in each bin
"""
bin_length = max_grade / bin_number
grade_distribution_list = [0] * bin_number
for item in parsed_list:
index = int(item / bin_length)
if index == bin_number:
grade_distribution_list[index-1] = (
grade_distribution_list[index-1] + 1
)
else:
grade_distribution_list[index] = grade_distribution_list[index] + 1
return grade_distribution_list | 73fbd33cd1c7f9fb043f62d949749a39c2db33d1 | 44,637 |
import sys
def get_instance(module, class_name, *args, **kwargs):
"""
Return an instance of the object based on module name and class name.
:param module: module name.
:param class_name: name of class to instantiate.
:param args: args to pass to class.
:param kwargs: keyword args to pass to class.
:returns: instance of class.
"""
__import__(module)
f = getattr(sys.modules[module], class_name)
return f(*args, **kwargs) | 45293abcc649af2ede4f5ff7a25e0c0347f41507 | 44,639 |
def getCumPos(chrom,pos):
"""
getCumulativePosition
"""
n_chroms = int(chrom.max())
x = 0
for chrom_i in range(1,n_chroms+1):
I = chrom==chrom_i
if I.any():
pos[I]+=x
x=pos[I].max()
return pos | 25e3781c3341215df78b8eb8cbbc3a37e53a3d4c | 44,642 |
def poly_filename():
"""
"""
return 'solution.csv' | 870af4a4c63fe6e449a773b6b055d88a57310c9f | 44,644 |
def normalise_version_str(version_num, normal_len):
"""Normalise the length of the version string by adding .0 at the end
>>> normalise_version_str(1.0, 3)
1.0.0
>>> normalise_version_str(1.0.0, 4)
1.0.0.0
"""
version_num_parts = len(version_num.split("."))
if version_num_parts < normal_len:
for i in range(version_num_parts, normal_len):
version_num = version_num + ".0"
return version_num | e844416c617a2ca61399848a5d0090eafcfcb832 | 44,646 |
import warnings
def get_weight(target_easy_negative, easy_negative, hard_negative):
"""
Args:
target_easy_negative ([type]): [description]
easy_negative ([type]): [description]
hard_negative ([type]): [description]
Returns:
w_h, w_e: scaling factor for hard and easy and negative for achieving the
target_easy_negative
"""
w_e = target_easy_negative / easy_negative
transfer_weight = easy_negative - target_easy_negative
if transfer_weight < 0:
warnings.warn(
"Transfering weight from hard negative to easy negative")
w_h = 1 + transfer_weight / hard_negative
return w_h, w_e | 56d8bcbcfc21145a164ca1882d245225e13cf5c7 | 44,647 |
def _parse_single_key(keystr: str) -> str:
"""Get a keystring for QKeySequence for a single key."""
return 'Shift+' + keystr if keystr.isupper() else keystr | 2e9d54622871b6c347d7569bdc67d63754381b4f | 44,650 |
def normalize_empty_to_none(text):
"""Return verbatim a given string if it is not empty or None otherwise."""
return text if text != "" else None | 4d15f1101efe87e58e3855cef5d131b1c5eacef6 | 44,651 |
def run_if_true(function, boolean, data):
"""
Run the given function if the boolean is true.
Else return the data.
"""
if boolean:
return function(data)
return data | ea9facfed120a8fdd88717375c76c93b77edbfcf | 44,652 |
def _find_weather_data(tables):
"""`tables` is a list of dataframes returned by `pandas.read_html`"""
for i, raw in enumerate(tables):
if len(raw) > 16:
return raw | dd2cf0be1d4591d9def6a10e728ebf23738db608 | 44,653 |
def stations_level_over_threshold(stations, tol):
"""Returns a list of tuples of stations at which the latest water level
is greater than the input tolerance"""
res = []
for station in stations:
if (station.relative_water_level() != None) and (station.relative_water_level() > tol):
res.append(tuple((station.name, station.relative_water_level())))
res.sort(key = lambda x:x[1], reverse=True)
return res | d7363c8836d7a80c8430f1b4b4792000aaa4c268 | 44,655 |
import os
def parse_sample_file_path(parser, args):
"""
Return validated input file path or terminate program.
"""
try_help_msg = "Try '%s -h' for more information" % parser.get_prog_name()
if len(args) != 1:
parser.error("Please provide a valid file path\n%s" % try_help_msg)
sample_file_path = args[0]
if not os.path.exists(sample_file_path):
parser.error("File '%s' does not exist\n%s" % (sample_file_path, try_help_msg))
if not os.path.isfile(sample_file_path):
parser.error("'%s' is not a file\n%s" % (sample_file_path, try_help_msg))
return sample_file_path | 683aceac3436800061e6db1d955342a2c2fd8a87 | 44,657 |
def personal_top_three (scores):
"""
:param scores - list of high scores
:return: list of int - the three highest scores from the list
"""
result = []
scores_copy = scores.copy ()
scores_copy.sort (reverse = True)
if len (scores) <= 3:
return scores_copy
return [scores_copy[0], scores_copy[1], scores_copy[2]] | ed1d6302403db6cdb917ff6db88027e9a6159eb6 | 44,658 |
def get_synplas_morph_args(config, precell=False):
"""Get morphology arguments for Synplas from the configuration object.
Args:
config (configparser.ConfigParser): configuration
precell (bool): True to load precell morph. False to load usual morph.
Returns:
dict: dictionary containing morphology arguments.
"""
# load morphology path
if precell:
morph_path = config.get("Paths", "precell_morph_path")
else:
morph_path = config.get("Paths", "morph_path")
return {
"morph_path": morph_path,
"do_replace_axon": config.getboolean("Morphology", "do_replace_axon"),
} | 3410bf9aa3987dc626310affb8ebbd13b382c5d9 | 44,659 |
def get_f_C_i(f_ang_C, f_sh_C):
"""開口部iの冷房期の取得日射熱補正係数 式(3b)
Args:
f_ang_C(float): 冷房期の垂直入射に対する斜入射の規準化日射熱取得率
f_sh_C(float): 冷房期の日除け効果係数
Returns:
float: 開口部iの冷房期の取得日射熱補正係数
"""
return f_ang_C * f_sh_C | 1b2db34a14fae093c5603e4352992a87431831d9 | 44,660 |
import os
import glob
def get_test_parameter_file_list():
"""Contains the test parameter file paths
Returns:
list[str]
"""
absolute_current_file_path = os.path.abspath(__file__)
current_directory = os.path.dirname(absolute_current_file_path)
test_directory = os.path.join(current_directory, 'test_parameter_files')
search_pattern = test_directory + "/*.json"
test_parameter_file_list = glob.glob(search_pattern)
print("returning test_parameter_file_list: " + str(test_parameter_file_list))
return test_parameter_file_list | f6fe15975422adeb6a7122e53be4f4ab687059d7 | 44,661 |
def load_cov( cov_file ):
"""! @brief load all information from coverage file """
cov = {}
with open( cov_file, "r" ) as f:
line = f.readline()
header = line.split('\t')[0]
tmp = []
while line:
parts = line.strip().split('\t')
if parts[0] != header:
cov.update( { header: tmp } )
header = parts[0]
tmp = []
tmp.append( float( parts[-1] ) )
line = f.readline()
cov.update( { header: tmp } )
return cov | d067aa2a8742b7555778a1cd0f38be641cc6ed56 | 44,662 |
def checkPointsButtonClicked(game, clickX, clickY):
"""
Returns true if points button was clicked, else false
"""
return (game.pointsButtonObj is not None and
game.pointsButtonObj.collidepoint(clickX, clickY)) | f95c7190db951b072d7517a3ec16641358a8a027 | 44,663 |
def seq_to_list(seq):
""" Convert non-sequence to 1 element sequence, tuples to lists
"""
if not isinstance(seq, (list, tuple)):
return [seq]
return list(seq) | 460275a31e023f47dd0b0fba0289e0c3804c6f34 | 44,664 |
def mock_purge_success(url, request):
"""
Mock a success Purge request
"""
return {'status_code': 201,
'content-type': 'application/json',
'server': 'Apache',
'content-location': '/ccu/v2/purges/1234-456-7890',
'content': {
"estimatedSeconds": 420,
"progressUri": "/ccu/v2/purges/1234-456-7890",
"purgeId": "1234-456-7890",
"supportId": "123456789",
"httpStatus": 201,
"detail": "Request accepted.",
"pingAfterSeconds": 420}
} | eef7a0325bf81a98339fa82ba022bd994675de8e | 44,665 |
import re
def should_filter(line, num):
"""Test the given line to see if it should be included. Excludes shebang
lines, for now.
"""
# Filter shebang comments.
if num == 0 and line.startswith('#!'):
return True
# Filter encoding specification comments.
if num < 2 and line.startswith('#') and re.search('coding[:=]', line):
return True
return False | 3c82c67ce9b569d99c26fea37b37a4e1dbc51cd4 | 44,666 |
from typing import Tuple
def decode_seat_of(pass_: list, n_rows: int = 128, n_cols: int = 8) -> Tuple[int, int]:
"""Return row and column number from boarding pass information."""
row_range = [1, n_rows]
col_range = [1, n_cols]
for char in pass_[:7]: # decode row
if char == "F":
row_range[1] -= (row_range[1] - row_range[0] + 1) // 2
elif char == "B":
row_range[0] += (row_range[1] - row_range[0] + 1) // 2
for char in pass_[7:]: # decode column
if char == "L":
col_range[1] -= (col_range[1] - col_range[0] + 1) // 2
elif char == "R":
col_range[0] += (col_range[1] - col_range[0] + 1) // 2
return row_range[0] - 1, col_range[0] - 1 | 65a5502ad4eea05db08b4127bb6af6db1ad41db2 | 44,667 |
def read_rrup_file(rrup_file):
"""Read rupture(?) file"""
rrups = dict()
with open(rrup_file) as f:
next(f)
for line in f:
station, __, __, rrup, rjbs, rx, ry = line.rstrip().split(",")
rrup = float(rrup)
rjbs = float(rjbs)
if rx == "X":
rx = None
ry = None
else:
rx = float(rx)
ry = float(ry)
values = (rrup, rjbs, rx, ry)
rrups[station] = values
return rrups | f6b00218b412ac08873fd089fcb34a8f57375736 | 44,668 |
def log_dir():
"""Nмя произвольного каталога."""
return r'D:\log_dir1' | 150744b1c11bb475f35a7d997ceafcc4556a0c7e | 44,669 |
import os
def build_image_list(path):
"""
Recursive function for building a list of image files (jpg and png) for the path specified. The function is
recursively invoked for each sub-directory.
:param path: the path from which to search for image files. Any directories found in the path will result in this
function being recursively invoked.
:return: list of full path names of image files found.
"""
image_list = []
for filename in os.listdir(path):
full_path = os.path.join(path, filename)
if (os.path.isdir(full_path)):
image_list.append(build_image_list(full_path))
else:
if filename.endswith(('.jpg', '.png', '.jpeg')):
image_list.append(full_path)
return image_list | 9ec3a53c63471a3a1e73fc8c0bedaad4c02d6788 | 44,670 |
def get_country(countries):
""" Get country code as input from user and check if country code in the list of countries obtained from the preprocessed data. Return country code in uppercase.
:param countries: the list of country code
:type countries: list
:returns: the entered country code
:rtype: str
"""
while True:
country_input = input("Select country code which you want to compare yearly?\n").lower()
if country_input != "n" and country_input.upper() not in countries:
print("Wrong option!")
continue
else:
if country_input == "n":
country = None
break
elif country_input.upper() in countries:
country = country_input.upper()
break
return country | a043198eaf0614c60397e7f3df875d3a2ac6e9ba | 44,671 |
def gen_composite_processor(*funcs):
"""Applies the each func on the metrics in a chain"""
def composite_processor(host, name, val, timestamp):
tmp = host, name, val, timestamp
for func in funcs:
tmp = func(*tmp)
if tmp is None:
break
return tmp
return composite_processor | 9c0ee4a7f987fc458a309c9eccf4878c1f548ca5 | 44,672 |
def prepend(v, l):
""" Given a value, prepend it to an iterable or a list
Returns a concrete list if given a list and a generator otherwise.
Ignores None as l
"""
if isinstance(v, list):
tmp = v
tmp.extend(l)
return tmp
else:
def generator(): # pragma: no cover
yield v
try: # pragma: no cover
for x in l:
yield x
except TypeError: # pragma: no cover
pass
return generator() | 1aa01f9d988a7e5b907e969ee36df6ec6e6998e9 | 44,673 |
def e_filter(e):
"""Filters out unwanted elements"""
ignore_tags=('sdfield',)
if e.tag in ignore_tags:
return True | cb14154bedee50e9913749d381cf2db2690d7843 | 44,674 |
import re
def is_valid_zcs_container_id(zcs_container_id):
"""
Validates Zadara Container Services (ZCS) container IDs, also known as the
ZCS container "name". A valid ZCS container name should look like:
container-00000001 - It should always start with "container-" and end with
8 hexadecimal characters in lower case.
:type zcs_container_id: str
:param zcs_container_id: The ZCS container name to be validated.
:rtype: bool
:return: True or False depending on whether zcs_container_id passes
validation.
"""
if zcs_container_id is None:
return False
match = re.match(r'^container-[0-9a-f]{8}$', zcs_container_id)
if not match:
return False
return True | 9b8ef0e6281def4d09e787123e9582c9c4be9c84 | 44,675 |
import torch
def simple_contrstive_loss(vi_batch, vi_t_batch, mn_arr, temp_parameter=0.1):
"""
Returns the probability that feature representation for image I and I_t belong to same distribution.
:param vi_batch: Feature representation for batch of images I
:param vi_t_batch: Feature representation for batch containing transformed versions of I.
:param mn_arr: Memory bank of feature representations for negative images for current batch
:param temp_parameter: The temperature parameter
"""
# Define constant eps to ensure training is not impacted if norm of any image rep is zero
eps = 1e-6
# L2 normalize vi, vi_t and memory bank representations
vi_norm_arr = torch.norm(vi_batch, dim=1, keepdim=True)
vi_t_norm_arr = torch.norm(vi_t_batch, dim=1, keepdim=True)
mn_norm_arr = torch.norm(mn_arr, dim=1, keepdim=True)
vi_batch = vi_batch / (vi_norm_arr + eps)
vi_t_batch = vi_t_batch/ (vi_t_norm_arr + eps)
mn_arr = mn_arr / (mn_norm_arr + eps)
# Find cosine similarities
sim_vi_vi_t_arr = (vi_batch @ vi_t_batch.t()).diagonal()
sim_vi_t_mn_mat = (vi_t_batch @ mn_arr.t())
# Fine exponentiation of similarity arrays
exp_sim_vi_vi_t_arr = torch.exp(sim_vi_vi_t_arr / temp_parameter)
exp_sim_vi_t_mn_mat = torch.exp(sim_vi_t_mn_mat / temp_parameter)
# Sum exponential similarities of I_t with different images from memory bank of negatives
sum_exp_sim_vi_t_mn_arr = torch.sum(exp_sim_vi_t_mn_mat, 1)
# Find batch probabilities arr
batch_prob_arr = exp_sim_vi_vi_t_arr / (exp_sim_vi_vi_t_arr + sum_exp_sim_vi_t_mn_arr + eps)
neg_log_img_pair_probs = -1 * torch.log(batch_prob_arr)
loss_i_i_t = torch.sum(neg_log_img_pair_probs) / neg_log_img_pair_probs.size()[0]
return loss_i_i_t | 55c8973fce464e440540d7a67305b87f985e75e5 | 44,676 |
import struct
def int2vector_unpack(data, unpack = struct.unpack):
"""
Given a serialized int2vector, unpack it into a list of integers.
An int2vector is a type used by the PostgreSQL catalog.
"""
return unpack("!32h", data) | 0adcffbe71bf20d29169a545bf7679648f63323c | 44,677 |
import numpy
def _binary_image_to_grid_points(binary_image_matrix):
"""Converts binary image to list of grid points.
This method is the inverse of `_grid_points_to_binary_image`.
:param binary_image_matrix: See documentation for
`_grid_points_to_binary_image`.
:return: rows_in_object: Same.
:return: columns_in_object: Same.
"""
return numpy.where(binary_image_matrix) | 1b9c78e2aa0d48e003c399c21bfce22cf33ff641 | 44,679 |
def get_hmt_balance(wallet_addr, token_addr, w3):
""" Get hmt balance
Args:
wallet_addr: wallet address
token_addr: ERC-20 contract
w3: Web3 instance
Return:
Decimal with HMT balance
"""
abi = [
{
"constant": True,
"inputs": [{"name": "_owner", "type": "address"}],
"name": "balanceOf",
"outputs": [{"name": "balance", "type": "uint256"}],
"type": "function",
}
]
contract = w3.eth.contract(abi=abi, address=token_addr)
return contract.functions.balanceOf(wallet_addr).call() | 4f968d48908ed0739aa3ab5cc7316e756d51bdb2 | 44,680 |
from typing import List
def encode_varint(value: int) -> bytes:
"""Encodes a single varint value for serialization."""
b: List[int] = []
if value < 0:
value += 1 << 64
bits = value & 0x7F
value >>= 7
while value:
b.append(0x80 | bits)
bits = value & 0x7F
value >>= 7
return bytes(b + [bits]) | 63a5428702c23e98e2d8bd82051d26e2b20cbf51 | 44,681 |
def recursive_merge_dicts(a, b):
"""Recursively merge two dictionaries.
Entries in b override entries in a. The built-in update function cannot be
used for hierarchical dicts, see:
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth/3233356#3233356
Parameters
----------
a : dict
dictionary to be merged
b : dict
dictionary to be merged
Returns
-------
c : dict
merged dict
Examples
--------
>>> from gammapy.utils.scripts import recursive_merge_dicts
>>> a = dict(a=42, b=dict(c=43, e=44))
>>> b = dict(d=99, b=dict(c=50, g=98))
>>> c = recursive_merge_dicts(a, b)
>>> print(c)
{'a': 42, 'b': {'c': 50, 'e': 44, 'g': 98}, 'd': 99}
"""
c = a.copy()
for k, v in b.items():
if k in c and isinstance(c[k], dict):
c[k] = recursive_merge_dicts(c[k], v)
else:
c[k] = v
return c | c4ed405ae2de4b7c7567a68cb25fb2d62cc5a196 | 44,682 |
def bk_column_names():
"""Returns list of column names from test piecash books, that are needed in creation of several bokeh views."""
columns = ["Category", "MonthYear", "Price", "Product", "Date", "Currency", "Shop"]
return columns | 21fe96d4c85ec008a0a32631e207d1d554c4c8dd | 44,683 |
def double_char(s):
"""Return word with double characters."""
word = ''
for i in s:
word += i * 2
return word | 7460bf28267dafa68b776c4b24d32c9084272731 | 44,684 |
def formatString(s):
"""Add a predefined number of spaces after the ':' character."""
try:
index = s.index(': ')
s0 = '{:<20}'.format(s[0:index])
s0 += s[index:]
return s0
except ValueError: # can be thrown by string.index()
return s | c58fbe1a0208ef894bf95f029798885835c6a7da | 44,685 |
def _is_git_index_exists(repo_ctx, repo_local_cache_path):
""" Checks that a local git index exists at the repository cache path
Args:
repo_ctx: Repository context of the calling rule
repo_local_cache_path: Path of the repository cache folder
Returns:
Boolean indicating if local git index exists for the repository
"""
return repo_ctx.path(repo_local_cache_path + "/.git").exists | 8064cd6afedd429d6b8ff83bea7c0cdd6a906437 | 44,686 |
def is_path_like(obj, attr=('name', 'is_file', 'is_dir', 'iterdir')):
"""test if object is pathlib.Path like"""
for a in attr:
if not hasattr(obj, a):
return False
return True | 858cfc52144e19d539f4074d50776cf03281fcc4 | 44,687 |
def read_label_map(path_to_labels):
"""Read from the label map file and return a class dictionary which
maps class id (int) to the corresponding display name (string).
This functions expects a txt, not pbtxt file
"""
with open(path_to_labels,'r') as f:
return {i: c for i,c in enumerate(f.read().split('\n'))} | 9e7d1e3445cbc21b48389f0d0666a281e2b0eee2 | 44,688 |
import argparse
def parser(options=None):
""" Parser for auto VetRedRockGui
Parameters
----------
options
Returns
-------
"""
parser = argparse.ArgumentParser(description='Run the VetRedRockGUI on\
RedRock output')
parser.add_argument(
"-i",
"--initials",
type=str,
help="optional: create new output with your initials")
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(namespace=options)
return args | 92feda1a9095c984e1809539229c96d5e8ac4d45 | 44,690 |
from typing import List
def config_noisy(shots: int, optimization_level: int, transpiler_seed: int,
initial_layout: List[int], seed_simulator: int, private_config: dict
) -> dict:
"""The configuration loading for the noisy simulation.
Args:
shots: Number of shots to be used in the noisy simulation of the qcircuit.
optimization_level: Level of optimisation for the qiskit transpiler.
transpiler_seed: Seed for the transpilation process of the quantum circuit.
initial_layout: Initial layout used by a quantum computer, in case the noisy
simulation tries to mimic a certain quantum computer (e.g., IBM Cairo).
seed_simulator: The seed for the overarching simulation.
private_config: Dictionary specifying mainly the ibmq token to access ibm
real quantum computers, in case the simulation should mimic a certain
real quantum computer.
Returns:
Dictionary of the configuration for the noisy simulation of the quantum circuit.
"""
config_noisy = {
"backend_config": {"shots": shots,
"optimization_level": optimization_level,
"transpiler_seed": transpiler_seed,
"initial_layout": initial_layout,
"seed_simulator": seed_simulator
},
"ibmq_api": private_config["IBMQ"]
}
return config_noisy | 2937ee790e7c9f91874e3b6581b1345cfce52c7f | 44,692 |
def _draw_leg_bbox(ax):
"""
Draw legend() and fetch it's bbox
"""
fig = ax.figure
leg = ax.get_legend()
fig.canvas.draw()
return leg.get_frame().get_bbox() | 51c30cfb1f91bcf4ad0e53e8fddab404ff398e60 | 44,693 |
import torch
def sig(gain: float, offset: float, x: torch.Tensor) -> torch.Tensor:
"""Computes element-wise sigmoid function.
Args:
gain: The sigmoid function gain.
offset: The sigmoid function offset.
Returns:
The sigmoid function evaluated for each element in the tensor.
"""
return 1 / (1 + (offset * (1 - x) / x)**gain) | 2fb626d5a75b5859003bf0960afbce4b6a9b5f52 | 44,694 |
import argparse
def get_args():
"""
Parse and return the inputs arguments.
"""
parser = argparse.ArgumentParser(
description='LCLS2 MPS RTM Test Application')
parser.add_argument(
'--yaml',
type=str,
required=True,
dest='yaml_file',
help='Path to the top level YAML file (000TopLevel.yaml)')
parser.add_argument(
'--ip-addr',
type=str,
required=True,
dest='ip_addr',
help='FPGA IP Address')
parser.add_argument(
'--root-name',
type=str,
default='NetIODev',
dest='root_name',
help='RTM CPSW root device name (default = "NetIODev")')
parser.add_argument(
'--manual',
action='store_true',
help='Use I/O manual test procedure (i.e. without the tester device)')
return parser.parse_args() | 76635d1bd5fbddec49f5fc1fc3de921d9adbfae3 | 44,695 |
def default_not_in_transformer(row, lhs, rhs):
"""
Performs the in check of the lhs is not in the rhs. If the lhs has an
`is_not_in` method this is used, if not the `not in` operator is used.
:param row: The row being checked (not used)
:param lhs: The left hand side of the operator
:param rhs: The right hand side of the operator
:return: True if lhs is not in right, False otherwise
"""
if hasattr(lhs, "is_not_in"):
return lhs.is_not_in(rhs)
else:
return lhs not in rhs | d0a1974a9f127a014acbd92ed64cbf8aea8181cb | 44,697 |
def get_list(env, name, default=None):
"""Get a list from the environment
The input is assumed to be a comma-separated list of strings.
If ``name`` is not found in ``env``, return ``default``. Note
that ``default`` is returned as-is, so you should usually specify it as a
list::
ALLOWED_HOSTS = get_list(os.environ, 'ALLOWED_HOSTS', ['localhost', ])
"""
if default is None:
default = []
if name not in env:
return default
return env[name].split(',') | efbab4f49b065293101b1f42aecac002a53ebf98 | 44,698 |
def find_pages_range(soup):
"""
:param soup: soup object search result page
:returns: max page number of results
"""
pages = soup.find("ul", {'class': "pages_ul_inner"})
if pages:
results = pages.find_all('a')
page_max = results[-1].string
elif pages is None:
page_max = 1
else:
raise AttributeError
return int(page_max) | def07a17f42a50def8887b5197d12c7a0f650ce0 | 44,700 |
def list_salesforce_api_versions():
"""Helper function to return a list of supported API versions"""
return [
{
"version": "20.0",
"label": "Winter '11",
"url": "/services/data/v20.0"
},
{
"version": "21.0",
"label": "Spring '11",
"url": "/services/data/v21.0"
},
{
"version": "26.0",
"label": "Winter '13",
"url": "/services/data/v26.0"
}
] | 3eb434a106bfbed018071f889a56f2f0bf4281c2 | 44,701 |
def get_env():
"""
Get settings file directory.
"""
return 'settings.environment.local' | 660c66b21aaf6a2d671456450804838cf28a8e0d | 44,702 |
def filter_title(job):
"""Filter out job title and link to listing."""
job_title = job.find('a', attrs={'class': 'job-link'}).text
link = job.find('a', href=True).attrs['href']
job_link = 'https://stackoverflow.com{}'.format(link)
return(job_link, job_title) | cdda84b4e8dd35ba233b60bd76d2a190c583a5a8 | 44,704 |
def bytes_to_int(bs):
""" converts a big-endian byte array into a single integer """
v = 0
p = 0
for b in reversed(bs):
v += b * (2**p)
p += 8
return v | 275801405b629a68c77bd07e5ddd10db376c2963 | 44,706 |
def getcookies():
"""
Input:
raw_cookies(str): 豆瓣cookie
Return:
cookies(dict):豆瓣cookie
"""
while True:
try:
raw_cookies = input('\n--------------------\
\n\n请输入你的cookie(最后不要带空格):')
cookies = {}
for line in raw_cookies.split(';'):
key,value = line.split('=',1)
cookies[key] = value
break
except Exception as e:
print('\ncookie输入错误:\n',e)
return cookies | 2298c9034c8e32846581e5dcf3644bc9162c13a1 | 44,707 |
import subprocess
import shlex
def os_cmd(cmd):
"""
Wrapper around subprocess
@param cmd: command to be run
@return: exit code of the process and tuple of stdout and stderr of the process
"""
p = subprocess.run(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p.returncode, (p.stdout.decode('utf-8').split('\n'), p.stderr.decode('utf-8').split('\n')) | bdc2eae499ac0371e314c4bf66e5366f9d6a55d3 | 44,708 |
def zipl(*lists):
"""
Equivalent to python2 zip, return a list instead
of a generatory in python3
"""
return list(zip(*lists)) | 1e2d7fb73782b678e172fda6cb1ac5e9fce625d0 | 44,709 |
from datetime import datetime
def get_FY_short(the_date=datetime.now()):
"""Return 2-digit current fiscal year as integar"""
if the_date.month > 6:
return the_date.year - 2000
else:
return the_date.year - 2001 | 8bbd206f8e5ef0dfc10ea2c416c4e41a285ee9ab | 44,711 |
def to_download_url(path):
"""Returns an url to filebrowser views.py download function """
return '/filebrowser/option?name=download_resource&path=' + path | 3b7a8e88ec476906fec1294f690a4267f80da32c | 44,712 |
from typing import List
def half_range(data: List[float]) -> float:
"""
Calculates the absolute uncertainty associated with a set of data.
Parameters
----------
data
List/tuple of the data
"""
return (max(data) - min(data))/2 | d84ec375a0f3a5c8a29661a2d6c53cff15b7761f | 44,713 |
def get_available_summary_methods():
"""Output the available metrics for summarizing output scores"""
return ["mean", "median"] | cd982d66d241aa7cf8731214babc8689a8c9a74f | 44,714 |
def _isnan(num):
"""Determines whether a string or float is NaN
(1, sign, diagnostic info as string) => NaN
(2, sign, diagnostic info as string) => sNaN
0 => not a NaN
"""
num = str(num).lower()
if not num:
return 0
#get the sign, get rid of trailing [+-]
sign = 0
if num[0] == '+':
num = num[1:]
elif num[0] == '-': #elif avoids '+-nan'
num = num[1:]
sign = 1
if num.startswith('nan'):
if len(num) > 3 and not num[3:].isdigit(): #diagnostic info
return 0
return (1, sign, num[3:].lstrip('0'))
if num.startswith('snan'):
if len(num) > 4 and not num[4:].isdigit():
return 0
return (2, sign, num[4:].lstrip('0'))
return 0 | 15e920fb45dcec819e5036444a1671b4f8fe7593 | 44,715 |
import csv
def read_employees(csv_file_location):
"""
Takes a CSV file, registers a dialect, reads the CSV file and returns an employee list
"""
with open(csv_file_location) as file:
csv.register_dialect('empDialect', skipinitialspace=True, strict=True)
reader=csv.DictReader(file,dialect = 'empDialect')
employee_list = []
for row in reader:
employee_list.append(row)
return employee_list | 795114273ddc1930a968a516397781801ae22ab1 | 44,716 |
def generate_all_ev_combinations():
"""Generate all possible stat investment combinations."""
combinations = {}
combinations["atk"] = []
combinations["spa"] = []
atk_combinations = []
atk_combinations.append((False, False))
atk_combinations.append((True, False))
atk_combinations.append((False, True))
atk_combinations.append((True, True))
for combination in atk_combinations:
result_dict = {}
result_dict["max_evs"] = combination[0]
result_dict["positive_nature"] = combination[1]
combinations["atk"].append(result_dict)
combinations["spa"].append(result_dict)
combinations["hp"] = []
combinations["def"] = []
combinations["spd"] = []
def_combinations = []
def_combinations.append((False, False))
def_combinations.append((True, False))
def_combinations.append((False, True))
def_combinations.append((True, True))
for combination in def_combinations:
result_dict = {}
result_dict["max_evs"] = combination[0]
result_dict["positive_nature"] = combination[1]
combinations["def"].append(result_dict)
combinations["spd"].append(result_dict)
combinations["hp"].append({"max_evs": True})
combinations["hp"].append({"max_evs": False})
return combinations | add6f1ece638bbe927e4164d7617084c3b9af75c | 44,717 |
def parsesummaryfilecontents(info):
"""
parse info from contents of summary file
input: info struct
output: info struct (with even more info)
"""
# for a start, just take the last five lines of the
# summary file (includes a final empty line)
data = open(info.path, 'rt').readlines()
info.lastfivelines = data[-5:]
return info
# end parsesummaryfilecontents() | a0a560b590aaa685c03808dfd99183759b495fe2 | 44,718 |
def get_attribute(class_name, attribute):
"""
Magic method that will be used by the Metaclass created for Itop object.
The function will avoid raising an exception if the atrtibute requested does not exist but rather return None.
Parameters
----------
attributes: str
The attribute from the class that you want to retrieve the value.
"""
return None | ec9dfd2a6235b484d00e9663f9954a9cd94b7974 | 44,721 |
def coll_types(coll):
"""
Return iterator over types in collection
"""
return coll.types() | eb66707a0c3397973aff16e4893c6d33b3b6c333 | 44,722 |
import os
def read_sequence(filepath):
"""
Read in a single sequence given a .fa file
"""
basename = os.path.basename(filepath)
name = os.path.splitext(basename)[0]
with open(filepath, "r") as f:
input = f.readlines()
seq = ''
for row, text in enumerate(input[1:]):
line = text.strip()
seq += line
#print(seq)
#print(type(seq))
return seq, name | db3f9c18121721b32665405ddd288ebf05374322 | 44,723 |
def get_words_from_dictionary(lemmas):
"""
Get original set of words used for analysis.
:param lemmas: A dictionary where keys are lemmas and values are sets
or lists of words corresponding to that lemma.
:type lemmas: dict(str): list(str)
:return: Set of words that exist as values in the dictionary
:rtype: set(str)
"""
words = set()
for lemma in lemmas:
words.update(set(lemmas[lemma]))
return words | 2ddf017811cbe91a5cfcf4fbc37730554ec77d2a | 44,725 |
import random
def get_random_lat_long():
"""
Return a tuple with random latitude and longitudes.
:param tuple(float, float): latitude, longitude
"""
return (random.uniform(-90.0, 90.0), random.uniform(-90.0, 90.0)) | fd60221ebbf807df349991d606049755ca24d01e | 44,726 |
import hashlib
def hash_list(argv, size=8):
"""
Proposes a hash for the list of arguments.
@param argv list of arguments on the command line.
@param size size of the hash
@return string
"""
st = "--".join(map(str, argv))
hash = hashlib.md5()
hash.update(st.encode("utf-8"))
res = hash.hexdigest()
if len(res) > 8:
return res[:8]
return res | b9371185889fd37408ff9f46b41e874f8dd9a029 | 44,727 |
def get_first_timestamp(busses: list[int]) -> int:
"""Get the first timestamp"""
return NotImplemented | 465bd641e41cef1ff5207511d09d9512ab131386 | 44,728 |
def get_table_1():
"""表1 空気搬送ファンの比消費電力 f_SFP
Args:
Returns:
list: 表1 空気搬送ファンの比消費電力 f_SFP
"""
table_1 = (0.4, 0.2)
return table_1 | 20c833ff3b65ccef39adc69f3fd4f1bea314d8a3 | 44,730 |
from typing import Optional
import re
import time
def _generate_default_job_name(image_uri: Optional[str]) -> str:
"""
Generate default job name using the image uri and a timestamp
Args:
image_uri (str, optional): URI for the image container.
Returns:
str: Job name.
"""
if not image_uri:
job_type = "-default"
else:
job_type_match = re.search("/amazon-braket-(.*)-jobs:", image_uri) or re.search(
"/amazon-braket-([^:/]*)", image_uri
)
job_type = f"-{job_type_match.groups()[0]}" if job_type_match else ""
return f"braket-job{job_type}-{time.time() * 1000:.0f}" | 06d983299008652a503ba9dbf943502290016394 | 44,733 |
import json
def _get_example(line):
"""Extract relevant fields from json.
Args:
line: String for json line.
Returns:
example_id: integer.
question: string.
contexts: List of strings.
context_indices: List of (int32, int32) tuples.
"""
json_example = json.loads(line)
example_id = json_example["example_id"]
long_answer_candidates = json_example["long_answer_candidates"]
contexts = []
context_indices = []
for candidate in long_answer_candidates:
if candidate["top_level"]:
# Get candidate start and end indices.
start_index = candidate["start_token"]
end_index = candidate["end_token"]
context_indices.append((start_index, end_index))
# Get candidate contexts.
candidate_tokens = json_example["document_tokens"][start_index:end_index]
candidate_tokens = [t["token"] for t in candidate_tokens]
candidate_str = u" ".join(candidate_tokens)
candidate_str = candidate_str.lower()
contexts.append(candidate_str)
# Get question.
question = u" ".join(json_example["question_tokens"])
question = question.lower()
return example_id, question, contexts, context_indices | f57cf475c55566bf5a4f19699aab63d54839535d | 44,735 |
from typing import List
from typing import Union
def make_it_fit(cell_list: List[str], limit: int) -> Union[None, List[str]]:
"""
This function attempts to shorten a list of strings by finding and
elimininating empty string elements from left to rigth. If succesfull
it will return the modified list. Otherwise it will return None.
"""
if len(cell_list) <= limit:
return cell_list
else:
while sum([len(x) == 0 for x in cell_list]):
cell_list.remove("")
if len(cell_list) == limit:
return cell_list
else:
return None | 48884f977074b82f391c7fb989e08dab1d285bf7 | 44,736 |
import time
def _generate_container_name() -> str:
"""Generate a Docker container name for use in DockerPolicy."""
return 'ros2launch-sandboxed-node-{}'.format(time.strftime('%H%M%S')) | 28eb30dd5c4a49e4342feeec94d09dda9f3f4c53 | 44,737 |
def getSigGenIDN(sg):
"""
This function returns the Sig Gen IDN
Parameters:
sg : socket connection
Returns:
IDN : Sig gen IDN response as string
"""
sg.sendall(b'*IDN?\r\n') # Get system identification
response = sg.recv(1024)
return response.decode('utf8') | a9478286a2273a6be1c17a5b06d3d83641d80f8f | 44,738 |
def get_ds(dc, name):
"""
Pick a datastore by its name.
"""
for ds in dc.datastore:
try:
if ds.name == name:
return ds
except: # Ignore datastores that have issues
pass
raise Exception("Failed to find %s on datacenter %s" % (name, dc.name)) | 964a4c997e46d2d655659962479baab53cbb765d | 44,739 |
import string
def plate2robot_loc(row_val, col_val, plate_type='96'):
"""Changing positioning from row (letter) and column (number)
to just numeric position (column-wise) on the plate,
which is needed for TECAN robot positioning.
Using index for identifying well number in plate
[args]
row_val: string
col_vol: string
plate_type: string; plate type to determine well location indexing
"""
# index for converting row to numeric
idx = string.ascii_uppercase
idx = {x:i+1 for i,x in enumerate(idx)}
row_val = idx[row_val]
# getting location on plate
msg = 'Destination location "{}" is out of range'
if plate_type == '96':
loc = (col_val - 1) * 8 + row_val
assert loc > 0 and loc <= 96, msg.format(loc)
elif plate_type == '384':
loc = (col_val - 1) * 16 + row_val
assert loc > 0 and loc <= 384, msg.format(loc)
else:
msg = 'Labware type "{}" not recognized'
raise ValueError(msg.format(plate_type))
return loc | 5b62f5a2d73842fc283cf871906d9bf46f4b33bb | 44,741 |
def server_url(http_server):
"""URL for accessing the http server."""
host, port = http_server.server_address
return f"http://{host}:{port}" | d98ed61cf5f2541740bafac1df2f135e4ff04758 | 44,742 |
def bins(hist):
"""
Get access to the bins of a histogram
the returned object can be indexed and iterated over,
the elements are of the appropriate dimensionality
"""
return hist.__bins__() | 8e887231e4b6349ac93f408f1ea61ae782a71bf6 | 44,743 |
from typing import Dict
def dauphin_info(dauphin_county_dict) -> Dict[str, str]:
"""
Returns a dict with basic info for Dauphin County, including its name and mailing list ID.
"""
return dauphin_county_dict["42043"] | 26b35449a0177171a5ce499dd7af340278a91209 | 44,744 |
from typing import Callable
from typing import Type
from typing import Optional
from typing import Tuple
def make_metaclass(metaclass_callback: Callable[[Type], None], exclude: Optional[Tuple[str, ...]] = None) -> Type:
"""
make a metaclass
:param metaclass_callback: called when a new class is made using the metaclass
:param exclude: names of inheriting classes to not trigger the callback on
:return: the metaclass
"""
exclude = exclude or ()
class Metaclass(type):
def __new__(mcs, name, bases, kwargs):
klass = super().__new__(mcs, name, bases, kwargs)
if name not in exclude:
metaclass_callback(klass)
return klass
return Metaclass | 18193fb296215c77a8f385ba1bb3c4ce888eb866 | 44,746 |
def select_rep(df):
"""select a representative clv per cluster"""
mode_clv = df.clv.mode()
if mode_clv.shape[0] > 0:
# if one or multiple modes are found, return median
return mode_clv.median()
else:
# if no mode is found, then return median
return df.clv.median() | 8176a36d8f08e0550b2ac1a4a1f0ba9069684aab | 44,747 |
def layer_rows(layer):
"""
Given a layer return each row as a list.
"""
els = range((2*(layer-1)-1)*(2*(layer-1)-1) + 1, (2*layer-1)*(2*layer-1) + 1)
side_length = len(els) / 4
return [els[:side_length], els[side_length:2*side_length], els[2*side_length:3*side_length], els[3*side_length:]] | ecab272deb2f7d990b094c2b9a2036c0f8990855 | 44,748 |
import random
def get_random_speechcon():
"""Return speechcon"""
SPEECHCONS = ['hmm','awesome','ah']
text = ("<say-as interpret-as='interjection'>{} !"
"</say-as><break strength='strong'/>")
return text.format(random.choice(SPEECHCONS)) | 76cf14df8a094705ccf14914353a94f72f3685aa | 44,749 |
def grad_sp_solve_nl_parameters(x, parameters, a_indices, b, fn_nl):
"""
We are finding the solution (x) to the nonlinear function:
f = A(x, p) @ x - b = 0
And need to define the vjp of the solution (x) with respect to the parameters (p)
vjp(v) = (dx / dp)^T @ v
To do this (see Eq. 5 of https://pubs-acs-org.stanford.idm.oclc.org/doi/pdf/10.1021/acsphotonics.8b01522)
we need to solve the following linear system:
[ df / dx, df / dx*] [ dx / dp ] = -[ df / dp]
[ df* / dx, df* / dx*] [ dx* / dp ] [ df* / dp]
Note that we need to explicitly make A a function of x and x* for complex x
In our case:
(df / dx) = (dA / dx) @ x + A
(df / dx*) = (dA / dx*) @ x
(df / dp) = (dA / dp) @ x
How do we put this into code? Let
A(x, p) @ x -> Ax = sp_mult(entries_a(x, p), indices_a, x)
Since we already defined the primitive of sp_mult, we can just do:
(dA / dx) @ x -> ag.jacobian(Ax, 0)
Now how about the source term?
(dA / dp) @ x -> ag.jacobian(Ax, 1)
Note that this is a matrix, not a vector.
We'll have to handle dA/dx* but this can probably be done, maybe with autograd directly.
Other than this, assuming entries_a(x, p) is fully autograd compatible, we can get these terms no problem!
Coming back to our problem, we actually need to compute:
(dx / dp)^T @ v
Because
(dx / dp) = -(df / dx)^{-1} @ (df / dp)
(ignoring the complex conjugate terms). We can write this vjp as
(df / dp)^T @ (df / dx)^{-T} @ v
Since df / dp is a matrix, not a vector, its more efficient to do the mat_mul on the right first.
So we first solve
adjoint(v) = -(df / dx)^{-T} @ v
=> sp_solve(entries_a_big, transpose(indices_a_big), -v)
and then it's a simple matter of doing the matrix multiplication
vjp(v) = (df / dp)^T @ adjoint(v)
=> sp_mult(entries_dfdp, transpose(indices_dfdp), adjoint)
and then return the result, making sure to strip the complex conjugate.
return vjp[:N]
"""
def vjp(v):
raise NotImplementedError
return vjp | 0fb77365fe90260f2658c030623a11370b24a485 | 44,750 |
def sample_mocked_geolocation(**params):
"""Create and return a sample mocked geolocation object"""
mock_data = {
'ip': '10.0.0.1',
'country_name':'Mars',
'region_code':'SOL512',
'city': 'RedSand',
'latitude':49.02342,
'longitude':40.34342,
'zip':'1052'
}
mock_data.update(params)
return mock_data | 8506bfd2f9b602e027660401b4fdeb2cea9e92b8 | 44,751 |
import torch
def dhinge_dz(z, t, margin=1.0, trunc_thresh=float('inf'), norm_by_size=True):
"""compute derivative of hinge loss w.r.t. input z"""
tz = z * t
dhdz = (torch.gt(tz, margin - trunc_thresh) * torch.le(tz, margin)).float() * -t
if norm_by_size:
dhdz = dhdz * (1.0 / tz.size()[0])
return dhdz | cbd57534f3bd3ba1a2ca1a690bd337d736b8bcf9 | 44,753 |
def community2name(community):
"""
Given a list of concepts (words), return a string as its name.
"""
return " ".join(community) | c2454a26704bd6930c3c7fdf3bf17619ede918c6 | 44,754 |
def readFile(filepath):
"""Gets string representing the path of the file to be processed
and returns it as a list. It will omit blank lines.
Parameters
----------
filepath : str
The path of the filename to be processed
Returns
-------
lines
a list that contains the lines of the file
"""
try:
with open(filepath) as f_in:
lines = list(line for line in (l.strip() for l in f_in) if line)
except IOError as err:
print(err)
return None
if not lines:
print("Empty file")
exit(1)
return lines | d1dc54b48f7012cbf0f253154af96d673cd00259 | 44,755 |
def drop_users_to_ignore(ignore, license_lists):
"""This function drops the users to ignore during the comparison from each license type list.
Parameters
----------
ignore (DataFrame) : Users to ignore during comparison
license_lists (dict) : dictionary of DataFrames, one for each license type
Returns
-------
license_lists (dict) : same as input minus DataFrame records whose email matched an email in the *ignore* DataFrame
"""
for license in license_lists.keys():
license_lists[license] = license_lists[license][ ~license_lists[license]['User principal name'].isin(ignore['email']) ]
return license_lists | 3aaee5d9c49ee776f6fc5dbd2a8062e104c78845 | 44,756 |
def roundAndFormat1(xxx):
""" changes numbers xxx into string percentages that are nice integer numbers
it is a hack to make sure that numbers like 65.241 become 65, whereas numbers like 6.7 become 7
so its basically rounding to the nearest integer number and then formatting it nicely for legend
"""
st = '{:0.2}'.format(xxx) # round
st = (round(float(st),0))
st = str(st)
st = ('%f' %float(st)).rstrip('0').rstrip('.') # format
return str(st) | 810a744f07e776f5b84aa278e0cce6e83557154d | 44,757 |
import pytz
from datetime import datetime
def generateDateTimeBasedKey():
""" Genera un numero entero basado en la fecha y hora"""
madrid = pytz.timezone("Europe/Madrid")
dtdt = datetime.now(madrid)
dt = dtdt.date()
tm = dtdt.time()
mdate = dtdt.date()
mtime = dtdt.time()
milisec = str(int(dtdt.microsecond / 1000))
while 3 > len(milisec):
milisec = '0' + milisec
#
result = int(mdate.strftime("%d%m%Y") + mtime.strftime("%H%M%S") + milisec)
return result | 0ba3cd294deff5f28fd8e47190864bd948c76024 | 44,759 |
def delete_cookie(cookieName: str, url: str) -> dict:
"""Deletes browser cookie with given name, domain and path.
Parameters
----------
cookieName: str
Name of the cookie to remove.
url: str
URL to match cooke domain and path.
**Experimental**
"""
return {
"method": "Page.deleteCookie",
"params": {"cookieName": cookieName, "url": url},
} | ff233a7e1e4048a35b6ad13f804cbfe088b6a66c | 44,760 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.