content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import random
def random_alphanumeric_string(length=64):
"""
Generate a random alphanumeric string of the specified length.
:param length: Length of the random string
:return: A random string of length length containing only alphabetic and numeric characters
"""
alphabet = 'abcdefghijklmnopqrstuvwxyz'
numbers = '0123456789'
return ''.join([random.choice(list(alphabet) + list(alphabet.upper()) + list(numbers)) for i in range(length)]) | c2910dd57243a110f8f18e9551dc017a66c2bcac | 95,821 |
import torch
from typing import Optional
from typing import Tuple
def assign_labels(
spikes: torch.Tensor,
labels: torch.Tensor,
n_labels: int,
rates: Optional[torch.Tensor] = None,
alpha: float = 1.0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
# language=rst
"""
Assign labels to the neurons based on highest average spiking activity.
:param spikes: Binary tensor of shape ``(n_samples, time, n_neurons)`` of a single
layer's spiking activity.
:param labels: Vector of shape ``(n_samples,)`` with data labels corresponding to
spiking activity.
:param n_labels: The number of target labels in the data.
:param rates: If passed, these represent spike rates from a previous
``assign_labels()`` call.
:param alpha: Rate of decay of label assignments.
:return: Tuple of class assignments, per-class spike proportions, and per-class
firing rates.
"""
n_neurons = spikes.size(2)
if rates is None:
rates = torch.zeros((n_neurons, n_labels), device=spikes.device)
# Sum over time dimension (spike ordering doesn't matter).
spikes = spikes.sum(1)
for i in range(n_labels):
# Count the number of samples with this label.
n_labeled = torch.sum(labels == i).float()
if n_labeled > 0:
# Get indices of samples with this label.
indices = torch.nonzero(labels == i).view(-1)
# Compute average firing rates for this label.
rates[:, i] = alpha * rates[:, i] + (
torch.sum(spikes[indices], 0) / n_labeled
)
# Compute proportions of spike activity per class.
proportions = rates / rates.sum(1, keepdim=True)
proportions[proportions != proportions] = 0 # Set NaNs to 0
# Neuron assignments are the labels they fire most for.
assignments = torch.max(proportions, 1)[1]
return assignments, proportions, rates | fa8752ef9ef5263d384d6841720c24b22c56067e | 95,824 |
from datetime import datetime
def meso_api_dates(start_date, end_date):
"""
Return string-formatted start and end dates for the MesoPy api.
"""
start = str(datetime.strftime(start_date, '%Y%m%d%H%M'))
end = str(datetime.strftime(end_date, '%Y%m%d%H%M'))
return start, end | bca34d2be60d5a53afcbff32be1f48c20c1b5496 | 95,827 |
def GetInterconnectAttachmentRef(resources, name, region, project):
"""Generates an interconnect attachment reference from the specified name, region and project."""
return resources.Parse(
name,
collection='compute.interconnectAttachments',
params={
'project': project,
'region': region
}) | dd31d353e7e30de832ab7091241be7ccf147ad84 | 95,831 |
import json
def read_json(json_path: str):
"""Read a json file
:param json_path: path to json file
:return: json content
"""
with open(json_path) as f:
return json.load(f) | 19449679799ccfcec2e5f6fcaecf1f9ca8d10785 | 95,832 |
def interp_dt(dt0, N, M):
"""
Calculate new dt based on Vickers and Mahrt, 2003 eq. 8
"""
dt_new = ((N - 1) * dt0) / (2**M - 1)
return dt_new | dd45c06c4a0938e3c5a301331f6a1bb873fdee89 | 95,836 |
import six
def get_from_dict_if_exists(key, dictionary, convert_key_to_binary=True):
"""
Get the entry from the dictionary if it exists
Args:
key: key to lookup
dictionary: dictionary to look in
convert_key_to_binary: convert the key from string to binary if true
Returns:
the value of dictionary[key] or None
"""
if convert_key_to_binary:
key = six.b(key)
if key not in dictionary:
return None
return dictionary[key] | 024a00a8e09267ba039f6bc327794288352dd712 | 95,837 |
def _check_callable(func, value):
"""Return true if func(value) returns is true or if *func* is
*value*.
"""
return value is func or func(value) | a24aaeb5a6c7605ae4c202ce0234da01f1eaa89f | 95,842 |
from typing import Dict
from typing import Any
import requests
def get_aws_service_region_mapping_json(services_regions_json_url: str) -> Dict[str, Any]:
"""Read AWS service/region mapping json and return as a dict"""
region_services_resp = requests.get(services_regions_json_url)
return region_services_resp.json() | b1f8ab7ecbccc83f5d56fcb738d9f5baaf113229 | 95,843 |
def read_links(links_filename):
"""Read links file and store in reference list"""
links_list = []
with open(links_filename) as in_f:
is_header = True
for line in in_f:
if is_header:
is_header = False
continue
row = line.strip().split(',')
links_list.append(int(row[0]))
return links_list | 18961b1b9cf7fee6ef3c9434411ba44f7c97077a | 95,844 |
def update_sequence_viewpoint(seq, vp_s, vp_e):
"""
Update sequence viewpoint, i.e. region marked by vp_s (start) and vp_e
(end), converting viewpoint to uppercase, and rest to lowercase.
NOTE that vp_s and vp_e are expected to be 1-based index.
>>> seq = "acgtACGTacgt"
>>> update_sequence_viewpoint(seq, 4, 9)
'acgTACGTAcgt'
>>> seq = "acgtacgtACGTac"
>>> update_sequence_viewpoint(seq, 5, 16)
'acgtACGTACGTAC'
"""
assert seq, "seq empty"
assert vp_s <= vp_e, "vp_s > vp_e"
us_seq = seq[:vp_s-1].lower()
vp_seq = seq[vp_s-1:vp_e].upper()
ds_seq = seq[vp_e:].lower()
new_seq = us_seq + vp_seq + ds_seq
return new_seq | 3d635bfce39c7203895a3ce6516dac777a2a90f2 | 95,845 |
def _compare_across(collections, key):
"""Return whether all the collections return equal values when called with
`key`."""
if len(collections) < 2:
return True
c0 = key(collections[0])
return all(c0 == key(c) for c in collections[1:]) | 453335cea7303b5c16a52550af09eb64df1f0b2e | 95,849 |
def format_exif_timezone_offset(offset: int) -> str:
"""
Serializes a number of minutes to a EXIF time offset ("±HH:MM") string.
"""
sign = 1 if offset >= 0 else -1
if sign == -1:
offset = -offset
sign_str = "+" if sign == 1 else "-"
hours_str = str(offset // 60).zfill(2)
minutes_str = str(offset % 60).zfill(2)
return f"{sign_str}{hours_str}:{minutes_str}" | c5861c85393ebf9df2acae80f9229ee3856fe10f | 95,850 |
import math
def tan_radian(angle):
"""
Renvoie la tangente de l'angle.
Arguments:
angle (float): La mesure d'un angle en radians
"""
return math.tan(angle) | 01183b522b86e1fbe7e8dfddec15babedcf98114 | 95,852 |
import collections
def array_to_points(points):
"""Transform an array of floats into a list of 2 tuples."""
point_list = []
array = collections.deque(points)
while array:
lat = array.popleft()
long = array.popleft()
point = (lat, long)
point_list.append(point)
return point_list | 00f3e14894f38bc53597e55adce683fc35495ce4 | 95,853 |
def pattern_from_url(url_pattern):
"""
Finds the internal stringified pattern for a URL across
Django versions.
Newer versions of Django use URLPattern, as opposed to
RegexURLPattern.
"""
if hasattr(url_pattern, 'pattern'):
pattern = str(url_pattern.pattern)
elif hasattr(url_pattern._regex, 'pattern'):
pattern = str(url_pattern.regex.pattern)
else:
pattern = url_pattern._regex
return pattern | e2269cc4a1ceda27866cc7cc2ed1b74ad00a4483 | 95,858 |
def is_subset(smaller, larger):
"""
checks if a switch event is a subset of a larger one
:param smaller: a (int, int, frozenset(str))
:param larger: a (int, int, frozenset(str))
:return: True if smaller in larger else False
"""
if smaller[2] != larger[2]:
return False
larger_range = range(larger[0], larger[1] + 1)
if smaller[0] in larger_range and smaller[1] in larger_range:
return True
else:
return False | 74bff9f3cc4a03e58740d846e3938ac32c453a24 | 95,861 |
def week(theweek, width):
"""Returns a single week in a string (no newline)."""
days = []
for day in theweek:
if day == 0:
s = ''
else:
s = '%2i' % day # right-align single-digit days
days.append(s.center(width))
return ' '.join(days) | 6b63d4a2d62778d05b2740ed552bef222f171b57 | 95,864 |
def get_student_id(request):
"""Get the student_id from the given request."""
student_id = request.GET.get('student', 'student_1')
return student_id | f68ed1c44f566495af9c8bbec6153c0d179291e8 | 95,866 |
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included | 0cdfca153b6df06a6e382defaab985d6d3c565d4 | 95,872 |
import hashlib
def generate_checksum(check_strings):
"""Create a md5 checksum using each string in the list.
:param check_strings: resource name
:type check_strings: List[str]
:returns: Hash generated from strings.
:rtype: str
"""
m = hashlib.md5()
for entry in check_strings:
m.update(entry.encode('utf-8'))
return m.hexdigest() | 49b9f34d5b44c76daa4a945ab5c42452e0f451d2 | 95,874 |
def normalize_path_elms(path):
"""Replace space & dash into underbar and return it."""
return path.replace(' ', '__').replace('-', '_') | 8db085566eb8980aac30b31a0f3ac4b9272f43fa | 95,880 |
def calculate_bin(memory):
"""Calculates the memory bin (EC2 Instance type) according to the amount of memory in gigabytes needed to process the job."""
if memory < 1.792:
mem_bin = 0
elif memory < 7.168:
mem_bin = 1
elif memory < 14.336:
mem_bin = 2
elif memory >= 14.336:
mem_bin = 3
else:
mem_bin = "nan"
return mem_bin | 8f9fbfb9f4e5e3a9679a0ddbadc61bc440295519 | 95,891 |
def convert_number(num_str, cls=int):
"""带逗号格式的数值字符串转为数值类型
:params num_str: 数值字符串
:params cls: 数值类型,默认为 `int`
:returns: cls 类型的数值
"""
return cls(num_str.replace(",", "")) | cbf2a532c8ed45896cc97f93508e1f9980299006 | 95,897 |
def generate_cert_override(overrides):
"""
Generate the contents for a cert_override.txt file with entries for
the given CertOverrideEntry objects.
"""
contents = (
'# PSM Certificate Override Settings file\n'
'# This is a generated file! Do not edit.\n'
)
for override in overrides:
contents += '\t'.join([
override.host + ':' + override.port,
override.fingerprint_algorithm,
override.fingerprint,
str(override.mask),
override.db_key
]) + '\n'
return contents | cf915e98234cd8a616331991efea78851a06cfb5 | 95,905 |
def calculate_score(imp: list, dats: list) -> float:
"""Calculates the score of a tweet
given the importance of each topic
and the topic distribution of that tweet
Args:
imp (list): importance of topic
dats (list): topic dictribution in a tweet
Returns:
float: score of the tweet with such topic distribution
"""
score = 0
for i in range(0, len(imp)):
score += imp[i]*dats[i]
return score | 36138c765a5cae81c3da92f6316f782a6f3c23d4 | 95,907 |
def split_line(line):
"""Split a line read from file into a name/sequence tuple
Arguments:
line: line of text with name and sequence separated by tab.
Returns:
(name,sequence) tuple.
"""
name = line.strip('\n').split('\t')[0]
seq = line.strip('\n').split('\t')[-1]
return (name,seq) | ff9553dd8339294b851b886ad0768ff410c8d98c | 95,909 |
import tqdm
def progress_bar_wrapper(function, pb_total, pb_description):
"""Enclose given function with a progress bar.
Args:
function (function):
The function to execute.
pb_total (int):
The total to use in the progress bar.
pb_description (str):
The description of the progress bar.
Returns:
The function return value.
"""
with tqdm.tqdm(total=pb_total) as progress_bar:
progress_bar.set_description(pb_description)
return function(progress_bar) | df9bd175b9f49e01f60cfc9513dc0879508ec5fe | 95,911 |
def set_op1_str(nvols):
"""
Build the operand string used by the workflow nodes
Parameters
----------
nvols : int
Returns
-------
strs : string
operand string
"""
strs = '-Tmean -mul %d -div 2' % (int(nvols))
return strs | dd06e4fc1ed2872b341cac88f940d2bc39294a0d | 95,913 |
def sqrt(x):
"""Calculate x**0.5, with x a float scalar or vector."""
return x ** 0.5 | 1d10dfc69f3f5ff8574cca4189e9aae1e419e2c0 | 95,915 |
def get_job_type_word_form(job_count):
"""
Get the singular / plural word form of a job type.
While this function only returns "package" or "packages" it allows external
code to replace the function with custom logic.
:param job_count: The number of jobs
:rtype: str
"""
return 'package' if job_count == 1 else 'packages' | 35d118b1a5b9237c0eadf828b23aae3e02b20309 | 95,916 |
import logging
def parse_submodules_desc_section(section_items, file_path):
"""Find the path and url for this submodule description"""
path = None
url = None
for item in section_items:
name = item[0].strip().lower()
if name == 'path':
path = item[1].strip()
elif name == 'url':
url = item[1].strip()
elif name == 'branch':
# We do not care about branch since we have a hash - silently ignore
pass
else:
msg = 'WARNING: Ignoring unknown {} property, in {}'
msg = msg.format(item[0], file_path) # fool pylint
logging.warning(msg)
return path, url | 33e6ee643210624549420e3364bcb97fcff83f7d | 95,923 |
def default_hook(entries):
"""Default hook called for each entry"""
return entries | 8a5b7d7cca0fbdbaa50aec6f22a7e966a54dc94f | 95,925 |
def __inclst(intlst, maxval):
"""
intlst:list
array representing a number (each item is a digit)
maxval: int
max. allowed value for each item in the list, intlst
Returns
intlst incremented by 1.
"""
p=0
n=len(intlst)
while p<n:
if intlst[p]+1 <= maxval:
intlst[p]+=1
return intlst
else:
intlst[p]=0
p+=1
else:
return None | c9915ec133bba9edaf08380d8fc4e32d7bf78c3c | 95,928 |
import torch
def intersect(box_a, box_b):
"""
Compute the intersection of two sets of boxes
Args:
box_a: (tensor) Shape(num_objects, 4) with tl_x, tl_y, br_x, br_y
tl = topleft
br = bottomright
box_b: (tensor) Shape(num_priors, 4) with tl_x, tl_y, br_x, br_y
Return:
intersection: (tensor) Shape: [box_a.size(0), box_b.size(0)]
"""
A = box_a.shape[0]
B = box_b.shape[0]
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1] | bb17655a76422d4ca1ef3e2b0eae0059615adea2 | 95,931 |
async def read_root():
"""My index roote for testing purposes"""
return {"result": "Welcome to the covid-19 Chatbot Back-end"} | d4820fc49d2b91c64b61851af52e30443efb7385 | 95,933 |
import importlib
def mod_from_name(name):
"""Import module from module name, e.g.
the newsltd_etl module."""
return importlib.import_module(name) | d4d17da5efa520cc82819c8425da0aaadde10429 | 95,937 |
def convert_ptt_units(ptt_list, tip_factor, tilt_factor, starting_units, ending_units):
"""
Convert the PTT list to or from Poppy units and the segmented DM units.
Note that this has been created for the IrisAO segmented DMs, therefore
the tip and tilt values are swapped with what Poppy desginates.
Poppy to segmented DM:
- tip_factor = -1
- tilt_facotr = 1
- starting_units = (u.m, u.rad, u.rad)
- ending_units = dm_ptt_units from the config.ini file
Segmented DM to Poppy
- tip_factor = 1
- tilt_factor = -1
- starting_units = dm_ptt_units from the config.ini file
- ending_units = (u.m, u.rad, u.rad)
:param ppt_list: list, of tuples existing of piston, tip, tilt, values for each
segment in a pupil, in the respective starting_units
:param tip_factor: int, either -1 or 1 based on the information above
:param tilt_factor: int, either -1 or 1 based on the information above
:param starting_units: tuple or list of the units associated with the piston, tip,
tilt values respectively of the input ptt_list
:param ending_units: tuple_or_list of the units associated with the piston, tip,
tilt values respectively of the expected output
:return: list of tuples of the piston, tip, tilt values for each segment listed,
in the respective ending_units
"""
converted = [(ptt[0]*(starting_units[0]).to(ending_units[0]),
tip_factor*ptt[2]*(starting_units[2]).to(ending_units[2]),
tilt_factor*ptt[1]*(starting_units[1]).to(ending_units[1])) for ptt in ptt_list]
return converted | 3e6d1c2f00ce5bea70a9d804745f932233851fb7 | 95,941 |
def group_by_attr(attr, items, getattr_fn=getattr):
"""
Group a sequence of items by a shared attribute.
For example, let's say you have an object "Struct":
>>> from pprint import pprint
>>> from collections import namedtuple
>>> Struct = namedtuple('Struct', ('x', 'y', 'z'))
And you have a few Struct instances:
>>> a, b, c = (
... Struct(x=1, y=1, z=1),
... Struct(x=1, y=2, z=2),
... Struct(x=1, y=1, z=3))
If we were to group these instances by the 'x' attribute, we should
expect a single group containing all three items:
>>> pprint(group_by_attr(attr='x', items=(a, b, c)))
{1: (Struct(x=1, y=1, z=1),
Struct(x=1, y=2, z=2),
Struct(x=1, y=1, z=3))}
If, instead, we were to group by 'y', we should expect a different
grouping:
>>> pprint(group_by_attr(attr='y', items=(a, b, c)))
{1: (Struct(x=1, y=1, z=1),
Struct(x=1, y=1, z=3)),
2: (Struct(x=1, y=2, z=2),)}
Finally, grouping by 'z' will result in three separate groups:
>>> pprint(group_by_attr(attr='z', items=(a, b, c)))
{1: (Struct(x=1, y=1, z=1),),
2: (Struct(x=1, y=2, z=2),),
3: (Struct(x=1, y=1, z=3),)}
This function can also use an alternate getattr, as long as it implements
the same interface (taking an item and an attribute name as arguments).
For example, you could group dictionaries:
>>> pprint(group_by_attr(
... attr='x',
... items=(
... {'x': 1, 'y': 'a'},
... {'x': 2, 'y': 'b'},
... {'x': 1, 'y': 'c'}),
... getattr_fn=dict.__getitem__))
{1: ({'x': 1, 'y': 'a'},
{'x': 1, 'y': 'c'}),
2: ({'x': 2, 'y': 'b'},)}
"""
grouped = {}
for i in items:
v = getattr_fn(i, attr)
grouped[v] = grouped.get(v, ()) + (i,)
return grouped | 634affaf40db1a49b66c2ac68908ca9711e14a0a | 95,942 |
from typing import List
from typing import Dict
from typing import Any
def convert_timeline_to_diary(timeline: List[Dict[str, Any]]) -> Dict[str, Any]:
"""
Converts a list of logged data into a Dict[str, Any] containing lists of
data instead of singular values, or in other words a diary.
:param timeline:
A List[Dict[str, Any]] containing the logged metrics you wish to
flatten into a diary dictionary.
:return:
A Dict[str, Any] representing the diary dictionary.
"""
to_return = dict()
for time_stamp in timeline:
for metric, value in time_stamp.items():
if metric not in to_return:
to_return[metric] = list()
to_return[metric].append(value)
return to_return | 72757f548ec475bf7d033f6c3befb60fa1f56efd | 95,947 |
from typing import List
def NormalizeCorpus(corpus: List[str]) -> List[str]:
"""Normalize words in the corpus."""
corpus = [word.lower() for word in corpus]
return corpus | 38612be531e1481a4c6ba57d650960a3cacd6b07 | 95,949 |
def isnormaldataitem(dataitem):
"""
Detects if the data item is in standard form
"""
return 'Table' in dataitem and 'Action' in dataitem and 'Data' in dataitem | 91b339a53595cc6c947e2c300639226ea0916ba6 | 95,950 |
def clean_list(pre_list):
"""Function delete blank lines from a list
:type pre_list: List
:param pre_list: A list made from a file
:rtype: List
:returns: A Cleaned List
"""
post_list = list()
for line in pre_list:
if len(line) > 0:
post_list.append(line)
return post_list | 2ab49fdbcaf802b744f6c05ffbc1dec1f1ab9306 | 95,952 |
def read_animals(filename):
"""
Reads animal count file agreed upon at May 30, meeting.
Must have 4 columns, and no header row.
Columns are date, time, animal name, number seen.
"""
f = open(filename, 'r')
date = []
time = []
animal = []
number = []
# iterate over the file one line at a time
for line in f:
d, t, a, n = line.split()
date.append(d)
time.append(t)
animal.append(a)
number.append(int(n))
return date, time, animal, number | 693f060f10c6d5580782c15f5fbe66be3797d081 | 95,953 |
import re
import string
def uniformize_song_title(song_title: str) -> str:
"""Apply basic manual title uniformization transformations.
.. note::
This function is meant to be used in conjunction with
:meth:`fuzzy_score` and can serve for a rough first filtering
step.
Parameters
----------
song_title
Song title.
Returns
-------
:code:`str`
Uniformized song title.
"""
# Make everything lower case:
uniformized_song_title = song_title.lower()
# Remove bracket/brace content from the end of titles:
uniformized_song_title = re.sub(
r"((\s*\[[^]]*\])|(\s*\([^)]*\)))+$", "", uniformized_song_title
)
# Remove punctuation:
uniformized_song_title = uniformized_song_title.translate(
str.maketrans("", "", string.punctuation)
)
# Strip white-space:
uniformized_song_title = uniformized_song_title.strip()
return uniformized_song_title | abdba0bf16d4dbf4c6ffe3484a72bf72e292c9e7 | 95,967 |
def firstObjWith(pmgr, field, val):
""" Returns the first objID found where field is value """
pmgr.updateTables()
for obj in pmgr.objs.keys():
if pmgr.objs[obj][field] == val:
return obj
return None | 5ac64a55f5f11ffbdcd4cad0a087012e48fe8c6c | 95,972 |
def getfields(jsondict: dict, types: bool = False):
"""Return list of field names or a name:type dict if types=True"""
if types:
return {
f["name"]: f["type"].replace("esriFieldType", "") for f in jsondict["fields"]
}
else:
return [f["name"] for f in jsondict["fields"]] | 9174574e86022710bfd1ceecff2c28cf67cb8d90 | 95,974 |
def get_cols(tname):
""" Takes the name of a neuropsych test and returns a tupple of two lists.
The first list has the headers of the metrics of
interest (columns in the test's score table).
The second list has the names to be given to these metrics
in the final table of neuropsych scores outputed by this script.
Parameter:
----------
tname: string (name of a neuropsych test)
Return:
----------
a tupple associated with the test's name (key in the switcher dictionnary);
the tupple contains two lists of strings of equal length
"""
switcher = {
'alpha_span': (['71233_rappel_alpha_item_reussis', '71233_rappel_alpha_pourcentage'], ['aspan_recall_correct_items', 'aspan_recall_percentage']),
'boston_naming_test': (['57463_boston_score_correcte_spontanee', '57463_boston_score_total'],['boston_correct_spontaneous', 'boston_total']),
'easy_object_decision': (['45463_score'], ['easy_object_decision_score']),
'echelle_depression_geriatrique': ([' d.70664_score'], ['gds_score']),
'echelle_hachinski': (['86588_score'],['hachinski_score']),
'evaluation_demence_clinique': (['34013_cdr_sb'], ['cdr_sb']),
'fluence_verbale_animaux': ([' 18057_score_reponse_correcte'], ['verb_flu_correct_responses']),
'histoire_logique_wechsler_rappel_immediat': (['24918_score_hist_rappel_immediat'],['log_story_immediate_recall']),
'histoire_logique_wechsler_rappel_differe': (['40801_score_hist_rappel_differe'],['log_story_delayed_recall']),
'memoria': (['18087_score_libre_correcte', '18087_score_indice_correcte'],['memoria_free_correct', 'memoria_indice_correct']),
'moca': (['12783_score', '12783_score_scolarite'], ['moca_score', 'moca_score_schooling']),
'prenom_visage': (['33288_score_rappel_immediat', '33288_score_rappel_differe'], ['name_face_immediate_recall', 'name_face_delayed_recall']),
'ravlt': (['86932_mots_justes_essai_1', '86932_mots_justes_essai_total1', '86932_mots_justes_rappel_diff_a', '86932_score_total_reconnaissance'], ['RAVLT_trial1', 'RAVLT_total', 'RAVLT_delRecall', 'RAVLT_recognition']),
'test_enveloppe': (['75344_score_memoire_prospective', '75344_score_memoire_retrospective'], ['env_prospective_memory', 'env_retrospective_memory']),
'tmmse': (['80604_score_total'],['mmse_total']),
'trail_making_test': (['44695_temps_trailA', '44695_temps_trailB', '44695_ratio_trailB_trailA'],['trailA_time', 'trailB_time', 'trailB_trailA_ratio']),
'stroop': (['77180_cond3_temps_total', '77180_cond3_total_erreurs_corrigees', '77180_cond3_total_erreurs_non_corrigees', '77180_cond4_temps_total', '77180_cond4_total_erreurs_corrigees', '77180_cond4_total_erreurs_non_corrigees'],['Stroop_cond3_time', 'Stroop_cond3_corr_errors', 'Stroop_cond3_nonCorr_errors', 'Stroop_cond4_time', 'Stroop_cond4_corr_errors', 'Stroop_cond4_nonCorr_errors']),
'vocabulaire': (['87625_score'],['WAIS_vocabulary']),
'wais_digit_symbol':(['12321_resultat_brut'],['WAIS_digit_symbol_total'])
}
return switcher.get(tname, ([], [])) | 6411641fc3a49a1459fa47b183adc1dfb2c316ac | 95,976 |
def update(variable, orig_img, mode):
"""Update a variable based on mode and its difference with orig_img."""
# Apply the shrinkage-thresholding update element-wise.
z = variable - orig_img
if mode == "PP":
return (z <= 0) * variable + (z > 0) * orig_img
return (z > 0) * variable + (z <= 0) * orig_img | bb8a80c271a0b341a29cc1e1b785b61702168d68 | 95,978 |
from typing import Dict
from typing import Any
from typing import List
def _get_conversion_events(
count: int, event: Dict[str, Any]) -> List[Dict[str, Any]]:
"""Creates a list of 'count' number of events with unique ordinal Ids.
Args:
count: Number of events required in the list.
event: Payload event to be replicated in the list.
Returns:
List containing 'count' number of events.
"""
events = []
for i in range(count):
event_copy = dict(event)
event_copy['ordinal'] = 'ordinal' + str(i)
events.append(event_copy)
return events | 8c0a4287ca653d26b3736633ceb44570d6000149 | 95,981 |
def get_kmer_fraction(row):
"""Calculate the percentage frequency of a given position
Args:
row: BioHansel k-mer frequence pandas df row
Returns:
- float of percentage abundance
"""
total_freq = row.total_refposition_kmer_frequency
return row.freq / total_freq if total_freq > 0 else 0.0 | 157a69e569ebd7720015bbb7375a3c8df7bcc9e0 | 95,984 |
import json
def json_from_file(f: str) -> dict:
""" Read a json file into a dict """
_dict = {}
with open(f, "r") as fh:
_dict = json.load(fh)
return _dict | 85886ae3e3f2907df5b0aa4dd2a2678869a05dcb | 95,988 |
def uniform_prior(p1, p2):
"""Return 1."""
return 1 | b9da64c5ac583eb87156f0647a1d0051d01d1147 | 95,991 |
import re
def _split_by_task(devices, values):
"""Partition devices and values by common task.
Args:
devices: list of device name strings
values: list of T @{tf.tensor} of same length as devices.
Returns:
(per_task_devices, per_task_values) where both values are
lists of lists with isomorphic structure: the outer list is
indexed by task, and the inner list has length of the number
of values belonging to that task. per_task_devices contains
the specific devices to which the values are local, and
per_task_values contains the corresponding values.
Raises:
ValueError: devices must be same length as values.
"""
num_devices = len(devices)
if num_devices != len(values):
raise ValueError("len(devices) must equal len(values)")
pattern = re.compile(r"/task:(\d+)/")
per_task_devices = []
per_task_values = []
for d in range(num_devices):
m = pattern.search(devices[d])
if m:
index = int(m.group(1))
while index >= len(per_task_devices):
per_task_devices.append([])
per_task_values.append([])
per_task_devices[index].append(devices[d])
per_task_values[index].append(values[d])
else:
assert False, "failed to parse device %s" % devices[d]
return (per_task_devices, per_task_values) | 3a1746956f40275fc0e14de4003c1e5afc751b58 | 95,995 |
def stopping_criteria_met(eval_metrics, mask_min_ap, box_min_ap):
"""Returns true if both of the min precision criteria are met in the given
evaluation metrics.
Args:
eval_metrics: dict of metrics names as keys and their corresponding values,
containing "DetectionMasks_Precision/mAP", and
"DetectionBoxes_Precision/mAP" fields.
mask_min_ap: minimum desired mask average precision, will be ignored if -1
box_min_ap: minimum desired box average precision, will be ignored if -1
Returns:
True if non -1 criteria are met, false o.w.
"""
assert mask_min_ap == -1 or 0 < mask_min_ap < 1
assert box_min_ap == -1 or 0 < box_min_ap < 1
try:
mask_mAP_reached = eval_metrics['DetectionMasks_Precision/mAP']
box_mAP_reached = eval_metrics['DetectionBoxes_Precision/mAP']
except KeyError as err:
raise Exception('eval_metrics dict does not contain the mAP field') from err
return (mask_min_ap == -1 or mask_mAP_reached > mask_min_ap) & \
(box_min_ap == -1 or box_mAP_reached > box_min_ap) & \
(mask_min_ap != -1 or box_min_ap != -1) | 59addad8c5b273e9a08276e2ecffba1d884bfd66 | 95,997 |
def denormalize_data(batch_data, batch_std, batch_mean):
"""
Denormalize data given the standard deviation and mean.
"""
batch_data = (batch_data * batch_std) + batch_mean
return batch_data | cb048f93c93b578b332ec210d838f67e4d5cf3e3 | 96,005 |
def map_lookup(vers, lookups, injective=False):
"""
Creates a Map lookup request.
vers: str
An arbitrary string assigned by the user. When making updates to an
existing lookup the value must be lexicographically greater than
the existing version.
lookups: dict
A dictionary of key/value pairs, both of which must be strings.
injective: bool, default = False
Marks the lookup as injective, per this reference:
https://druid.apache.org/docs/latest/querying/lookups.html#query-execution
"""
return {
"version": vers,
"lookupExtractorFactory": {
"type": "map",
"injective": injective,
"map": lookups
}
} | 69a3f594d15fefab4a89b9772e0ec99be6702d67 | 96,007 |
import base64
def fig2inlinehtml(fig):
"""
small hack to convert image data to image string for html display
Parameters
----------
fig: image to be converted
Returns
-------
imgestr: image in string format
"""
figfile = fig.data
# for python 3.x:
figdata_png = base64.b64encode(figfile).decode()
imgstr = '<img src="data:image/png;base64,{}" />'.format(figdata_png)
return imgstr | 97a2c8f22bcd8aba57fe0d792864756859a1a323 | 96,010 |
def get_node(i, j, ncol):
"""Assign a unique, consecutive number for each i, j location"""
return i * ncol + j | ebee868ec19f07eb7677f23a7b8aabffced422a0 | 96,013 |
import hashlib
def basefn(cc, test_name, inp):
"""Generate opaque filesystem-safe filename
inp can be "" or None (returning different hashes)
"""
d = f"{cc}:{test_name}:{inp}"
h = hashlib.shake_128(d.encode()).hexdigest(16)
return h | 5db475a62a179000c3f47be808086a45383a859d | 96,015 |
import torch
def generate_coordinates(h, w):
"""
Generate coordinates
Args:
h: height
w: width
Returns: [h*w, 2] FloatTensor
"""
x = torch.floor((torch.arange(0, w * h) / w).float())
y = torch.arange(0, w).repeat(h).float()
coord = torch.stack([x, y], dim=1)
return coord | d993b4d076bf76f60b6cfbab3b6a3f0406be6c60 | 96,017 |
def _sample_data(ice_lines, frac_to_plot):
"""Get sample ice lines to plot
Notes
-----
If frac_to_plot==1, will plot all lines instead of sampling one line
"""
if frac_to_plot < 1.:
ice_plot_data = ice_lines.sample(int(ice_lines.shape[0] * frac_to_plot))
elif frac_to_plot > 1:
ice_plot_data = ice_lines.sample(frac_to_plot)
else:
ice_plot_data = ice_lines.copy()
ice_plot_data = ice_plot_data.reset_index(drop=True)
return ice_plot_data | 813ff67c621d909f93edf98d291d0a3d3aa5ba53 | 96,019 |
def filter_data(src, trg, min_len=0, max_len=float("inf")):
"""Filters data on source and target lengths
Args:
src (list[int]): Tokenized source data
trg (list[int]): Tokenized target data
min_len (int): Minimum length
max_len (int): Maximum length
Returns:
(list[int], list[int]): The filtered data (tokenized)
"""
filtered_src = []
filtered_trg = []
for src, trg in zip(src, trg):
if min_len <= len(src) <= max_len and min_len <= len(trg) <= max_len:
filtered_src.append(src)
filtered_trg.append(trg)
return filtered_src, filtered_trg | 7335d9ebff6cbca436e78dba9a85c76dc5c88e95 | 96,020 |
from typing import List
from typing import Dict
import copy
def process_record_merge_summary(record_list: List[Dict]) -> List[Dict]:
"""
merge records that have same step summary into single one record
:param record_list: list of record
:return: list of record
"""
record_dict = {} # step -> record
for record in record_list:
step = record["step"]
if step not in record_dict:
record_dict[step] = record
else:
new_record = copy.deepcopy(
record
) # new record have newer wall_time, record_list are ordered
old_record = record_dict[step]
old_key = set(old_record["summary"].keys())
current_key = set(record["summary"].keys())
if old_key & current_key:
raise ValueError("{}, {} have same keys".format(old_record, record))
old_summary = old_record["summary"]
new_summary = record["summary"]
new_summary.update(old_summary)
new_record["summary"] = new_summary
record_dict[step] = new_record
return list(sorted(record_dict.values(), key=lambda x: x["step"])) | 4403d4c8e999fb35e1fb7c35d7c32e98d39761eb | 96,022 |
def check_blocks(dag, block_ids):
"""Check if all blocks exist"""
if set(block_ids) - set(dag.block_ids()):
return False
return True | e23bca2e4e7a93b1466fb9c43276d9949811464a | 96,026 |
def pick_share(share, ii, jj, axs_):
"""
Helper function for selecting which axes to link. Should be called separately for sharex and sharey.
:param share: string
What type of sharing is going on?
:param ii, jj: ints
Coordinates of current plot in plot grid
:param axs_: 2D array of Axes instances
The plot Axes
:return: Axes instance or None
The Axes instance to be linked.
"""
if share in ['all', True] and ((ii > 0) or (jj > 0)):
return axs_[0, 0]
elif share in ['col'] and ii > 0:
return axs_[0, jj]
elif share in ['row'] and jj > 0:
return axs_[ii, 0]
else:
return None | 0194be7be4c2e06373a411627bdd6b5567330ba2 | 96,029 |
def render_build_args(options, ns):
"""Get docker build args dict, rendering any templated args.
Args:
options (dict):
The dictionary for a given image from chartpress.yaml.
Fields in `options['buildArgs']` will be rendered and returned,
if defined.
ns (dict): the namespace used when rendering templated arguments
"""
build_args = options.get('buildArgs', {})
for key, value in build_args.items():
build_args[key] = value.format(**ns)
return build_args | e64e0978eba1d66bec309e1ed9d29a82a01fe584 | 96,033 |
def seq_version(_):
"""
Compute a seq_version for GtRNAdb data. CUrrentlyt his will always return
'1'
"""
return "1" | 69db5d87fc57da697e0ce2f8c9104554ad0152b4 | 96,036 |
def variance(values, mean):
"""Calculate sample variance."""
return sum(map(lambda v: (v - mean)**2, values)) / len(values) | 6b65dc8014b588ad6748b5ecd9097bd95d40ccbf | 96,042 |
def get_string_size(string_value):
"""Return the byte size of a string (encoded with utf-8)"""
return len(string_value.encode('utf-8')) | bb3631533893a192ae81e5a8631eba2ed81c94d0 | 96,044 |
import re
def rm_dup_spaces(t: str) -> str:
"""
Remove duplicate spaces in `t`.
"""
return re.sub(' {2,}', ' ', t) | fd076bdee377e45e7bf4fdc5a651307f3d1e56f9 | 96,046 |
from typing import Tuple
def get_mc_weights(n_samples: int = 100) -> Tuple[float, float]:
"""Generate normalizers for MCMC samples"""
mean = 1.0 / n_samples
cov = 1.0 / (n_samples - 1)
return mean, cov | 6ad9c9369b2a35b7b71a2baf2c4d0a4d58dab034 | 96,047 |
def postprocess_text(preds, labels):
"""Use this function to postprocess generations and labels before BLEU computation."""
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return preds, labels | 9f9ad683fc167c0d8a2ccb615031bda2862ca056 | 96,054 |
def split_by_equals(word, unused_lines, iline):
"""
splits 'x = 42'
into 'x' and '42'
"""
if '=' not in word:
msg = 'line %i: %r cannot be split by an equals sign (=)' % (iline, word)
raise RuntimeError(msg)
word_out, value = word.split('=')
return word_out, value | 11969ac717e77fc14ea66c26069ce7e1def51188 | 96,057 |
def xpath_lower_case(context, values):
"""Return lower cased values in XPath."""
return [v.lower() for v in values] | f9048bc16ea717ec6f0538ab8217a0e1e2223c0d | 96,059 |
def get_session_length(row):
""" Calculate length of session in seconds"""
time_delta = row['session_end'] - row['session_start']
session_length = time_delta.total_seconds()
return session_length | f6ce61db58635a5da494707ed7d4b651df250fbd | 96,060 |
import threading
def background(fun):
""" Decorator to run a function in the background.
Based on the implementation at https://amalgjose.com/2018/07/18/run-a-background-function-in-python/
"""
def background_func(*args, **kwargs):
threading.Thread(target=fun, args=args, kwargs=kwargs).start()
return background_func | 04f4673537b46c04b8ebba5e19d360594efd2142 | 96,061 |
def root_to_pathsnames(gamelist):
""" Extract path and name lists from a gameList root object.
Parameters
----------
gameList : ElementTree.Element
An ElementTree object with a gameList root and game-elements.
Expects an ET object read from a gamelist.xml file.
Returns
-------
list
List of all text from path-tag for every game.
list
List of all text from name-tag for every game.
"""
paths = []
names = []
if gamelist is not None:
for game in gamelist.findall('game'):
paths.append(game.findtext('path', ''))
paths.append(game.findtext('name', ''))
return paths, names | 6fd00dca8817e61714d43c5a9eb2e5de11149aa8 | 96,063 |
def rename_and_subset_cols(df, dict_rename, list_cols, include=True):
"""
Method to rename and subset certain columns from a DataFrame.
Parameters
----------
df : pd.DataFrame
DataFrame with several columns
dict_rename : dict(str:str)
Dictionary with a dictionary where the keys are the original columnnames and the values are the new column names
list_cols : list(str)
List of columns to keep/drop
include : bool
Boolean value to indicate if the columns from list_cols should be kept or dropped. Default 'true' to keep.
Returns
-------
pd.DataFrame
"""
df = df.rename(columns=dict_rename)
if include:
df = df[list_cols]
else:
df = df.drop(list_cols, axis=1)
return df | ae9a56cffa9120fa86b8410b3f7db5757a9ea73f | 96,067 |
def relax_w(min_w, relax_factor, base=2):
"""
Scale min_w by relax_factor and round to the nearest multiple of base.
"""
relaxed_w = int(base * round(min_w * relax_factor / base))
return relaxed_w | b0c96d1ac323c2b82adf5c01cf64b3ade81a3d53 | 96,069 |
def asdicts(rows):
"""
Converts list of named tuples to list of dicts.
"""
return [row._asdict() for row in rows] | 3d0b11d6b557215a82db276f17e0000a70b57bcf | 96,072 |
from typing import Sequence
from typing import List
def get_evenly_spaced_elements(nr_elements: int, input_sequence: Sequence) -> List:
"""
Use Bresenham's line algorithm to select `nr_elements` spaced evenly from the `input_sequence`
Sequences are the general term for ordered sets. Among others, these are lists or Django's QuerySets.
Definition: https://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm
Implementation: https://stackoverflow.com/a/9873804
"""
len_sequence = len(input_sequence)
if nr_elements > len_sequence:
nr_elements = len_sequence
indices = [i * len_sequence // nr_elements + len_sequence // (2 * nr_elements) for i in range(nr_elements)]
return [input_sequence[index] for index in indices] | 0c6e4db8be368282336990314a90658677f08faa | 96,073 |
def return_percent(num, pop_size):
"""
Returns [num] percent of the population size.
:param num: A desired percentage of the population.
:param pop_size: A given population size.
:return: [num] percent of the population size.
"""
return int(round(num * pop_size / 100)) | a0b26aa96c1e98967d302ab7b9e37db7d759dcff | 96,081 |
def do_something3(n):
"""
some docstring for do_something3
:param n: number
:return: n + 3
"""
return n + 3 | a9858667253559c400168826ed0d0afd54de2de0 | 96,084 |
from typing import List
def super_packages(id: str) -> List[str]:
"""Return the surrounding packages of a module, e.g. ['os'] for os.path."""
c = id.split('.')
res = [] # type: List[str]
for i in range(1, len(c)):
res.append('.'.join(c[:i]))
return res | 3fa74ec95dde8bb136ece56620b4574dfeb67040 | 96,089 |
def read_file(file_path):
"""
Securely read a file from a given path
:param file_path: Path of the file
:return: The content of the text file
"""
try:
with open(file_path, 'r') as file:
return file.read()
except FileNotFoundError:
print('Could not find file in the path')
except Exception as e:
print(e) | 13849f4892290a207b4ea6d1fc15fa8346ccd755 | 96,091 |
def find_in_dict(string, dictionary):
"""
Returns True if the key is in the dictionary, False if not
"""
if string in dictionary:
return True
return False | 5c76ff630a86f9156b0c7abdf550d39391ca1a3f | 96,093 |
import struct
def cbt_intvalue(value):
"""Decode a big-endian uint64.
Cloud Bigtable stores integers as big-endian uint64,
and performs this translation when integers are being
set. But when being read, the values need to be
decoded.
"""
return int(struct.unpack('>q', value)[0]) | 072a3701c69e975dc10d4abfb87350ae1a43a71b | 96,098 |
def create_configuration(model, options):
"""
create configuration dict to pass to the run method.
Parameters
----------
model: pysd.model object
options: argparse.Namespace
Returns
-------
conf_dict: dict
"""
conf_dict = {
"progress": options.progress,
"params": options.new_values['param'],
"initial_condition": (options.initial_time or model.time(),
options.new_values['initial']),
"return_columns": options.return_columns,
"final_time": options.final_time,
"time_step": options.time_step,
"saveper": options.saveper,
"flatten_output": True, # need to return totally flat DF
"return_timestamps": options.return_timestamps # given or None
}
if options.import_file:
conf_dict["initial_condition"] = options.import_file
return conf_dict | e87469b7368c930e245571f8f817f6d2df28c906 | 96,099 |
def get_linear_intersection(m1,c1,m2,c2):
"""gets the intersection of 2 straight lines described by y=mx+c"""
x_intersect = (c1-c2)/(m2-m1)
y_intersect = x_intersect*m1+c1
return x_intersect, y_intersect | e863000e947539ee34426d1b2524882e68b3ac04 | 96,100 |
def is_variable_name(test_str: str):
"""Is the test string a valid name for a variable or not?
Arguments:
test_str (str): Test string
Returns:
bool: Is str a variable name
"""
return test_str.isidentifier() | 5f50dfb370204200d466e55a6733535f701963ab | 96,101 |
def reveal(ch, answer):
"""
This function will reveal the positions of the letter that
is in the answer.
:param ch: str, the characters that are in the answer
:param answer: str, the word to be guessed
:return: str, the revealed word after each guessing
"""
ans = ''
for i in range(len(answer)):
for base in ch:
if answer[i] == base:
ans += base
if len(ans) == i:
ans += '_'
# if the letter is not in the answer, it shows '_'
return ans | e289740eb1d80b0ffd0cc9afde9b7c8251d4e996 | 96,102 |
def convert_to_index(word2idx, sent):
"""Converts the words in a sentence to the corresponding indices.
Args:
word2idx (Dict): Mapping of the words to indices for the words in the dataset.
sent (str): The sentence whose indiced representation we want.
Returns:
representation_indices (List[int]): List containing the indices for the words in the sentence according to our vocabulary.
"""
# Convert sentence word by word in the corrresponding indices. Keep word as UNK if it is not in the vocab.
representation_indices = [word2idx.get(str(token), 1) for token in sent.split()]
return representation_indices | 3571b50687529d804e8c9e0076612465c7bbeacc | 96,103 |
def extract_data(psql_conn):
"""Given a cursor, Extracts data from PostgreSQL movielens dataset
and returns all the tables with their data"""
genres = psql_conn.query_all("genres")
movie_genres = psql_conn.query_all("genres_movies")
movies = psql_conn.query_all("movies")
ratings = psql_conn.query_all("ratings")
users = psql_conn.query_all("users")
tables = (genres, movie_genres, ratings, users, movies)
return tables | b9873a09473acd80de231c935517083693a8d0fd | 96,104 |
def get_l2_distance(x1, x2, y1, y2):
"""
Computes the L2 distance between two points.
"""
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5 | 0475957a314b244849045e5d1328448ace6ff031 | 96,105 |
def isstr(obj):
"""Return True if the obj is a string of some sort."""
return isinstance(obj, str) | e580e61c4a70a07813349fc0974dc38af3659b3b | 96,107 |
def action2motion(action, speed=0.25):
"""Turns an action from our fully connected net and turns it into movement.
Assumes discrete actions space as an integer. Turns it into X, Y, Z
velocities corresponding to our specific drone."""
action_dict = {
0: (0, 0, 0),
1: (speed, 0, 0),
2: (0, speed, 0),
3: (-speed, 0, 0),
4: (0, -speed, 0),
}
try:
return action_dict[action]
except KeyError:
raise RuntimeError("Could not convert discrete action into movement.") | 34f798e757b232aa1191f905827e0b68c8860107 | 96,110 |
def display_chr_value(num: int):
"""Displays the glyph representation of a given integer.
Examples:
>>> display_chr_value(128013)
'🐍'
>>> display_chr_value(77)
'M'
display_chr_value(700)
'ʼ'
"""
return chr(num) | 1ee103e50407f87e3d781d9c3e5a56d541ac9a3b | 96,111 |
def _get_alert(lb, ub, price_old, price_new):
"""
Filter function to determine whether bounds were crossed.
:param lb: Lower bound for alert.
:param ub: Upper bound for alert.
:param price_old: Old stock price.
:param price_new: New stock price.
:return: tuple of data if bounds were crossed. Otherwise None.
"""
if (price_old <= ub < price_new) or (price_new < lb <= price_old):
return lb, ub, price_old, price_new | df6ebc86336cde581693b33e9371fe03ea40daba | 96,114 |
import warnings
def deprecated(function):
"""Define decorator for deprecated functions."""
def new_function(*args, **kwargs):
warnings.warn(
"call to deprecated function {}".format(function.__name__),
category=DeprecationWarning
)
return function(*args, **kwargs)
new_function.__name__ = function.__name__
new_function.__doc__ = function.__doc__
new_function.__dict__.update(function.__dict__)
return new_function | c7760a0398f8403f1e2c2cdb50aa2007f8ac2de2 | 96,119 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.