content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def cosine(seen_doc, unseen_doc):
""" cosine(weighted_a, weighted_b) -> float value of cosine similarity of two input vectors
seen_doc = dictionary that is a BM25-weighted document term vector
unseen_doc = dictionary that is a BM25-weighted document term vector
"""
similarity = 0
for w,score in seen_doc.items():
if w in unseen_doc:
similarity += score * unseen_doc[w]
return similarity | ca4bfd670e2600200cb539cb1abc2da70d8dc676 | 124,734 |
def inconfmode(text):
"""
Check if in config mode.
Return True or False.
"""
return bool('(config)' in text) | 7ef78ce132806faf53a9c00bc2f6e62ce2f4ab41 | 124,737 |
def geometric_mean(values):
"""Calculate geometric mean in numpy array.
Avoids numerical over- and underflow at the cost of compute time.
Args:
values (ndarray)
Returns:
float
"""
inv_len = 1.0 / values.size
x = values**inv_len
return x.prod() | 94ae0c8044de0b800c79fc5640e62c05ec8a2c44 | 124,747 |
def get_batch_utilization(batch_field, pad_idx=0):
""" Get ratio of batch elements that are padding
Batch should be field, i.e. a dictionary of inputs"""
if "elmo" in batch_field:
idxs = batch_field["elmo"]
pad_ratio = idxs.eq(pad_idx).sum().item() / idxs.nelement()
else:
raise NotImplementedError
return 1 - pad_ratio | e3608b6586ecf91b6001169a869b71ff4882c364 | 124,749 |
def format_internal_exception_output(result):
"""
Output a formatted version of any internal
errors that Ansible runs into when executing,
if any are present.
:param result: result to inspect
:return: formatted output message
"""
if 'exception' in result:
return 'An internal exception occurred:\n{}'.format(result['exception'])
return '' | 52e3684338eda3574b92392c8ce010c6c7eb8063 | 124,750 |
def get_cover_image(beatmapset_id: int):
"""Return url of cover image from beatmapset_id."""
return f"https://assets.ppy.sh/beatmaps/{beatmapset_id}/covers/cover.jpg" | 54f5bf96c8e2e5dd266d5c3d2a7ae64c48e85c99 | 124,752 |
def read_clusters(file_name):
"""
Returns a unique listing of the clusters
and the mapping of names to cluster id.
The cluster file format is as follows:
sample_name<tab>cluster_id
@return: tuple with cluster id list and sample names to cluster id
"""
cmap={}
imap={}
handle = open(file_name, "rU")
for line in handle:
all = line.strip().split('\t')
cmap[all[1]] = all[1]
imap[all[0]] = all[1]
clist = cmap.keys()
handle.close()
return (clist, imap) | 3985bac0cc6e22d9b9bfa11b753562834ccf400f | 124,754 |
def is_private(key):
"""
Checks if the given serialised key corresponds to a private bip32 node.
"""
return key[:4] in ("xprv", "tprv") | fbab3f523002624565ca1fca28f3bc08fc580c69 | 124,755 |
def drop_null(dataframe, inplace=True):
"""Drop null tickets
Parameters
----------
dataframe : pandas.DataFrame
inplace : bool
Returns
-------
If `inplace` is False, returns the input dataframe with the null citations removed.
"""
if not inplace:
dataframe = dataframe.copy()
null_indices = dataframe.ticket_number.isnull()
dataframe.drop(index=dataframe.index[null_indices], inplace=True)
if not inplace:
return dataframe | 12a7584af0168a9a897d6fac6e67be65d152a387 | 124,756 |
def oprScorePred(alliance, opr, oprType = 'opr'):
"""
Given an alliance of three teams, an OPR DataFrame, and which kind of OPR
to use, compute a predicted score for that alliance in a match. If a team
is not found in the OPR DataFrame, return a zero for their contribution.
This error checking was needed for event 2020waspo, where there were only
seven alliances in the playoffs and the missing alliance was populated in
the FMS results with three team numbers that were not competing at the event.
Parameters
----------
alliance : list of strings
A list of the team numbers as strings (e.g. '4020') which make up the
alliance.
opr : DataFrame
A DataFrame with OPR metrics by team for an event. The needed
DataFrame is supplied by function calcOPR.
oprType : string
The column name in the OPR DataFrame for the typr of OPR to be used to
compute the predicted alliance score. Defaults to the OPR for the
alliance score, 'opr'
Returns
-------
scorePred : float
The score predicted for the alliance by the type of OPR requested.
"""
try:
c1 = opr.at[alliance[0], oprType]
except:
c1 = 0.0
print('warning: OPR not found for team ' + alliance[0])
try:
c2 = opr.at[alliance[1], oprType]
except:
c2 = 0.0
print('warning: OPR not found for team ' + alliance[1])
try:
c3 = opr.at[alliance[2], oprType]
except:
c3 = 0.0
print('warning: OPR not found for team ' + alliance[2])
scorePred = c1 + c2 + c3
return scorePred | 2e588057377ae10a9230a6db98b5807900818303 | 124,757 |
def grad(val, tens, core_indices = None):
"""
Compute the gradient w.r.t. the cores of the given TT-tensor (or TT-matrix).
Args:
val (torch.tensor): Scalar tensor that has to be differentiated.
tens (torchtt.TT): The given tensor.
core_indices (list[int], optional): The list of cores to construct the gradient. If None is provided, all the cores are watched. Defaults to None.
Returns:
list[torch.tensor]: the list of cores representing the derivative of the expression w.r.t the tensor.
"""
val.backward()
if core_indices == None:
cores = [ c.grad for c in tens.cores]
else:
cores = []
for idx in core_indices:
cores.append(tens.cores[idx].grad)
return cores | c8fd644c0c6ce209abee7ad4cb091a03ce5be912 | 124,758 |
def quick_sorted(items):
"""Return a list of all items, in non-decreasing order."""
# Base case: the empty list is already sorted.
if items == []:
return []
# Reduction step: take the first item (call it the pivot)
# and put the remaining items in two partitions,
# those smaller or equal than the pivot, and those greater.
pivot = items[0]
left_unsorted = []
right_unsorted = []
for index in range(1, len(items)):
if items[index] <= pivot:
left_unsorted.append(items[index])
else:
right_unsorted.append(items[index])
# Recursive step: sort each of the partitions.
left_sorted = quick_sorted(left_unsorted)
right_sorted = quick_sorted(right_unsorted)
# Inductive step: put the sorted partitions and the pivot
# in the correct order.
return left_sorted + [pivot] + right_sorted | 2d761266e846a8fb8582cad3d710c79aaa4940f6 | 124,760 |
def parse_scen_file(scen_file, n_agents):
"""Return the agent start locations and the goal locations.
Args:
scen_file (str): path to the scenario file.
n_agents (int): number of agents to read from the scenario (might contain a lot of agents - the more the harder)
Returns:
tuple. two lists - one of start locations and one of goal locations (each locations is a tuple of x,y).
"""
starts = []
goals = []
with open(scen_file, 'r') as f:
lines = iter(f)
next(lines)
for i, line in enumerate(lines):
_, _, _, _, x_start, y_start, x_goal, y_goal, _ = line.split('\t')
starts.append((int(x_start), int(y_start)))
goals.append((int(x_goal), int(y_goal)))
if i == n_agents - 1:
break
return tuple(starts), tuple(goals) | 4cca6edf40f3463c120b0d1db878c865a505c63e | 124,762 |
import typing
def it_erable(val: typing.Any) -> bool:
"""Determine if `val` is a non-string iterable
Arguments:
val {typing.Any} -- any value
Returns:
bool -- value is nont-string iterable
"""
return not isinstance(val, str) and hasattr(val, '__iter__') | e1cc2f773257114e49f7ba9ae98e4f0d6c54fa33 | 124,766 |
def _iou(box1, box2):
"""
Computes Intersection over Union value for 2 bounding boxes
:param box1: array of 4 values
(top left and bottom right coords): [x0, y0, x1, x2]
:param box2: same as box1
:return: IoU
"""
b1_x0, b1_y0, b1_x1, b1_y1 = box1
b2_x0, b2_y0, b2_x1, b2_y1 = box2
int_x0 = max(b1_x0, b2_x0)
int_y0 = max(b1_y0, b2_y0)
int_x1 = min(b1_x1, b2_x1)
int_y1 = min(b1_y1, b2_y1)
int_area = max(int_x1 - int_x0, 0) * max(int_y1 - int_y0, 0)
b1_area = (b1_x1 - b1_x0) * (b1_y1 - b1_y0)
b2_area = (b2_x1 - b2_x0) * (b2_y1 - b2_y0)
# we add small epsilon of 1e-05 to avoid division by 0
iou = int_area / (b1_area + b2_area - int_area + 1e-05)
return iou | 8f6d5e544c4bcc65569560fcb40d40ab741a4cfe | 124,768 |
from pathlib import Path
def file_check(file_location):
""" Check that a file exists in the given directory
:param file_location: Absolute file path for YAML file
:return: Boolean validating the
"""
return Path(file_location).is_file() | 50731e6205ad61df039e60a0d3105d04a0acecb3 | 124,777 |
import re
def txt2doi(txt):
"""
Extract a DOI from text data
"""
regex_arr = [
r'(https?://)?dx\.doi\.org/([0-9.]+/[A-Za-z0-9.]+)',
r'(DOI|doi):([0-9.]+/[A-Za-z0-9.]+)'
]
for regex in regex_arr:
m = re.search(regex, txt)
if m is not None:
return m.group(0), m.group(2)
return None, None | 6d87fa62244f89879b85b9719609b9ee22109743 | 124,778 |
from datetime import datetime
def time_since_report(entry):
"""
Calculates the time difference between the reported time of the event and its updated time.
:param entry: A dict with a 'reported' and 'updated' keys that contain datetime objects.
:type entry: Dict
:return: Boolean, True in case the time difference is greater than one hour. False otherwise.
:raises TypeError: In case the parameter is not a dictionary.
:raises KeyError: In case the parameter doesn't have the correct keys.
"""
if type(entry) is dict:
if "reported" in entry:
reported = datetime.strptime(entry["reported"], "%d-%m-%Y %H:%M:%S")
else:
raise KeyError("No 'reported' key in the dict!")
if "updated" in entry:
updated = datetime.strptime(entry["updated"], "%d-%m-%Y %H:%M:%S")
else:
raise KeyError("No 'updated' key in the dict!")
delta = updated - reported
return delta.seconds / 3600 >= 1
else:
raise TypeError("Parameter must be a dictionary!") | 4e2d9a0ad68e7d26698029b6242685369674aac4 | 124,783 |
def getInt(val):
"""
Converts a string to an integer with a null check
Parameters
----------
val : str
The value to convert to an integer
Returns
-------
val : int
Returns converted str value to an int
Raises
------
None
"""
if(val is None):
val = 0
else:
val = int(val)
return val | 3bec6d90e0b33ada32bfd9c654da544062ab175d | 124,784 |
def addone(bv):
"""Add one bit to a bit vector.
Overflows are silently dropped.
>>> print addone(BitVector(bitstring='1100'))
1101
>>> print addone(BitVector(bitstring='1111'))
0000
@param bv: Add one to these bits
@type bv: BitVector
@return: Bits with one added
@rtype: BitVector
"""
new = bv
r = range(1,len(bv)+1)
for i in r:
index = len(bv)-i
if 0==bv[index]:
new[index]=1
break
new[index]=0
return new | d1f999bf22ea1337533395b42f263e2ea7838c56 | 124,786 |
def validate_model(model, expect_content):
"""
Validate a model returned by a ContentsManager method.
If expect_content is True, then we expect non-null entries for 'content'
and 'format'.
"""
required_keys = {
"name",
"path",
"type",
"writable",
"created",
"last_modified",
"mimetype",
"content",
"format",
}
missing = required_keys - set(model.keys())
if missing:
return {
"status": False,
"msg": u"Missing Model Keys: {missing}".format(missing=missing),
"code": 5000
}
maybe_none_keys = ['content', 'format']
if expect_content:
errors = [key for key in maybe_none_keys if model[key] is None]
if errors:
return {
"status": False,
"msg": u"Keys unexpectedly None: {keys}".format(keys=errors),
"code": 5000
}
else:
errors = {
key: model[key]
for key in maybe_none_keys
if model[key] is not None
}
if errors:
return {
"status": False,
"msg": u"Keys unexpectedly not None: {keys}".format(keys=errors),
"code": 5000
} | 64ae91886cc87d4fab14d44f379a42b1624379cc | 124,796 |
def strike2dipaz(strike):
"""Convert strike to dip azimuth using the right-hand rule convention
Args:
strike (float): Strike azimuth in degrees from north
Returns:
float: Azimuth of the dip direction (0-360 degrees)
"""
if strike > 270:
dipaz = (strike + 90) - 360
else:
dipaz = strike + 90
return dipaz | 3ddc9380e4e7d155995e7bef8f6477d61f667c13 | 124,800 |
def get_svg_string(svg_name, verbose=False):
"""
Helper function returning an SVG string from the given file
:param svg_name: The filename to search in
:param verbose: Whether to print ancillar information
:return: The contents of the SVG file, or an empty string if none are found.
"""
try:
with open(svg_name, 'r') as input_file:
raw_string_data = input_file.read()
if verbose:
print(f'Loading "{svg_name}"')
return raw_string_data
except:
print(f'Error: No file "{svg_name}" found')
return ''
None
'' | 315ec255c4dc64e69f56cc3e68a9b453b2652ac9 | 124,802 |
def format_repr(obj, attributes) -> str:
"""Format an object's repr method with specific attributes."""
attribute_repr = ', '.join(('{}={}'.format(attr, repr(getattr(obj, attr)))
for attr in attributes))
return "{0}({1})".format(obj.__class__.__qualname__, attribute_repr) | 7d342a3e8d533aed50782c0b7e6de6c670e089ed | 124,806 |
def get_optarg(arglist, *opts, default=False):
"""Gets an optional command line argument and returns its value.
If default is not set, the flag is treated as boolean. Note that
that setting default to None or '' will still take an argument
after the flag.
Parameters
----------
arglist : array_like
The command line argument list to be parsed.
opts : list
The arguments searched for in arglist.
default : str or bool
The default value if opts are not found in arglist. If default
is False (default), then True is returned if opts are found.
Returns
-------
str or bool
The argument value in arglist or its default value.
"""
for op in opts:
if op in arglist:
ind = arglist.index(op)
arglist.remove(op)
if default is False:
return True
else:
return arglist.pop(ind)
return default | dad62e86e613c2d4642c3291fa53166eca9f103e | 124,810 |
import importlib
def name_to_feature(name):
"""
Converts a feature name, e.g. continuous.Pssm to a class instance.
Basically the name is appended to mllib.features to form
mllib.features.continuous.Pssm, the module is loaded, and an instance created.
Args:
name: Name of the module and the class.
Returns:
Instance of the loaded class.
"""
module_name, class_name = name.split(".")
if not module_name.startswith("mllib.features"):
module_name = "mllib.features.{}".format(module_name)
module = importlib.import_module(module_name)
cls = getattr(module, class_name)
return cls() | e9f213a4c9f478d11e90549c5ec8954a41eeb639 | 124,812 |
from bs4 import BeautifulSoup
def get_feeds_from_links(html: BeautifulSoup) -> list:
"""Try to find RSS feeds from link elements in the webpage"""
possible_feeds = []
feed_urls = html.findAll("link", rel="alternate")
for f in feed_urls:
t = f.get("type", None)
if t:
if "rss" in t or "xml" in t:
href = f.get("href", None)
if href:
possible_feeds.append(href)
return possible_feeds | d3faf13f7a00c94b758463ceb0df4b37038ca097 | 124,814 |
def replace_user_in_file_path(file_name, user):
"""Replace user name in give file path
Args:
file_name (str): Path to file
user (str): New user to replace with
Returns:
str: New file path
"""
file_items = [x.strip()
for x in file_name.strip().split('/') if len(x.strip())]
file_items[1] = user
return "/" + '/'.join(file_items).strip() | db4426f99cd6930d7edc8277e5c5ecf1870853e4 | 124,816 |
def get_color(n):
""" Valid matplotlib colors. Could be used to automatically pick colors.
:param n: an integer
:return: a valid matploglib color string
"""
n %= 8
colors = [
'b', # blue
'g', # green
'r', # red
'c', # cyan
'm', # magenta
'y', # yellow
'k', # black
'w', # white
]
return colors[n] | 26d476b53d81e5a60416ab5475bd0a5da842c4e0 | 124,820 |
def delta_nu(numax):
"""
Estimates dnu using numax scaling relation.
Parameters
----------
numax : float
the estimated numax
Returns
-------
dnu : float
the estimated dnu
"""
return 0.22*(numax**0.797) | 2e3095df2d2b50297067eb0b7ff66909ad26688b | 124,821 |
def perfect_cube(n: int) -> bool:
"""
Check if a number is a perfect cube or not.
>>> perfect_cube(27)
True
>>> perfect_cube(4)
False
"""
val = n ** (1 / 3)
return (val * val * val) == n | d9f470c91f252f92c95e9128455e8b6be606d6fd | 124,823 |
from typing import List
def propagate_jemalloc_env_var(*, jemalloc_path: str, jemalloc_conf: str,
jemalloc_comps: List[str], process_type: str):
"""Read the jemalloc memory profiling related
env var and return the dictionary that translates
them to proper jemalloc related env vars.
For example, if users specify `CLOUDTIK_JEMALLOC_LIB_PATH`,
it is translated into `LD_PRELOAD` which is needed to
run Jemalloc as a shared library.
Params:
jemalloc_path (str): The path to the jemalloc shared library.
jemalloc_conf (str): `,` separated string of jemalloc config.
jemalloc_comps List(str): The list of components
that we will profile.
process_type (str): The process type that needs jemalloc
env var for memory profiling. If it doesn't match one of
jemalloc_comps, the function will return an empty dict.
Returns:
dictionary of {env_var: value}
that are needed to jemalloc profiling. The caller can
call `dict.update(return_value_of_this_func)` to
update the dict of env vars. If the process_type doesn't
match jemalloc_comps, it will return an empty dict.
"""
assert isinstance(jemalloc_comps, list)
assert process_type is not None
process_type = process_type.lower()
if (not jemalloc_path or process_type not in jemalloc_comps):
return {}
env_vars = {
"LD_PRELOAD": jemalloc_path,
}
if jemalloc_conf:
env_vars.update({"MALLOC_CONF": jemalloc_conf})
return env_vars | 92ecfb9f5d1e68922134bbcedd533e47e254c6bb | 124,824 |
def complement(f):
"""
Takes a function ``f`` and returns a function that takes the same arguments
as ``f``, has the same effects, if any, and returns the opposite truth
value.
"""
def _f(*args, **kw):
return not f(*args, **kw)
return _f | 3c442c4883f023820371179b37a21f4fd1c27951 | 124,826 |
import unittest
def unroll_test_suite(suite):
"""Convert a (possibly heirarchical) test suite into a flat set of tests.
This is used to ensure that the suite only executes any
individual test once.
"""
flat = set()
for test in suite:
if isinstance(test, unittest.TestSuite):
flat.update(unroll_test_suite(test))
else:
flat.add(test)
return flat | cb4e68418ecd1d816bb8e1ca218138d1b3322636 | 124,832 |
from typing import Any
def is_int(obj: Any, digit: bool = False) -> bool:
"""Check if obj is int typeable.
Args:
obj: object to check
digit: allow checking str/bytes
"""
if digit:
if (isinstance(obj, str) or isinstance(obj, bytes)) and obj.isdigit():
return True
return not isinstance(obj, bool) and isinstance(obj, int) | c6c636b9ab8df029ae937a6132d7f7a14a60d24f | 124,833 |
def bias_term(power1, power2, power1_noise, power2_noise, n_ave, intrinsic_coherence=1.0):
"""Bias term needed to calculate the coherence.
Introduced by
Vaughan & Nowak 1997, ApJ 474, L43
but implemented here according to the formulation in
Ingram 2019, MNRAS 489, 392
As recommended in the latter paper, returns 0 if n_ave > 500
Parameters
----------
power1 : float `np.array`
sub-band periodogram
power2 : float `np.array`
reference-band periodogram
power1_noise : float
Poisson noise level of the sub-band periodogram
power2_noise : float
Poisson noise level of the reference-band periodogram
n_ave : int
number of intervals that have been averaged to obtain the input spectra
Other Parameters
----------------
intrinsic_coherence : float, default 1
If known, the intrinsic coherence.
Returns
-------
bias : float `np.array`, same shape as ``power1`` and ``power2``
The bias term
"""
if n_ave > 500:
return 0. * power1
bsq = power1 * power2 - intrinsic_coherence * (power1 - power1_noise) * (power2 - power2_noise)
return bsq / n_ave | 4a63592c9b6b71ae413e58b1a53a263d7e38543e | 124,837 |
import json
def pack(data):
"""
Create a string represenation of data
:param data -- dictionary
"""
return json.dumps(data, sort_keys=True, indent=2) | d6ed0bc374065f2af97bed569bcdcc622a518c9a | 124,838 |
def calculate_size(transformed_dataset):
"""Helper function to calculate the total size of a dataset
Args:
transformed_dataset (dict): a ``TransformedDataset`` instance, which maps (str) table name
to {'size': (float) size of the table in byte, 'row_size': (float) the size of a
row in byte, 'entries': (set) the column names, 'chosen': (set) the rows selected}
Returns:
float: the dataset size in byte
"""
size = 0
for table in transformed_dataset.values():
size += table['row_size'] * len(table['chosen'])
return size | 5cf82114c6849ab6555a4a1fc3f6599e37ed4fae | 124,841 |
def get_auth_header(key):
"""Return the authorization headers for the eventbrite request."""
return {'Authorization': 'Bearer {}'.format(key)} | 69d04812d2a8cbc804b8f09211a9f23f9c15d0d9 | 124,846 |
import struct
def uwsgi_packet_to_dict(blob):
"""Convert a uWSGI binary packet to a dictionary."""
d = dict()
_, packet_len, _ = struct.unpack("<BHB", blob[0:4])
i = 4
while i < packet_len:
size, = struct.unpack("<H", blob[i:i + 2])
i += 2
key = blob[i:i + size]
i += size
size, = struct.unpack("<H", blob[i:i + 2])
i += 2
value = blob[i:i + size]
i += size
d[key] = value
return d | 4ce3c1dca8a10a89e1e6a36b9bec1adc45189b91 | 124,849 |
def calc_energy_position(energy_edges, position_rate=0.5):
"""Calculate energies at which position_rate corresponds to."""
out = []
for i in range(len(energy_edges)-1):
width = energy_edges[i+1] - energy_edges[i]
out.append(energy_edges[i] + position_rate*width)
return out | b51b38b0c1ba98b918177ab4aec1818829c3797e | 124,861 |
def getTileFromCoords(coords, gameboard, replace=''):
"""Ask the gameboard for the tiles at the xy choords given and replaces None with ''"""
tile = gameboard.getTile('xy', tuple(coords))
if tile is None:
tile = replace
return tile | ad5eace379cc832b4e87738c05e7d1e2316b2939 | 124,866 |
def gpmset_metadata() -> dict:
"""
Return a bunch of metadata (description, units, long_name, etc.) for the
gpmset.
Returns:
========
metadata: dict
"""
metadata = {
"overpass_time": {
"description": "GPM overpass time at the closest from ground radar site"
},
"x": {
"units": "m",
"description": "x-axis parallax corrected coordinates in relation to ground radar.",
},
"y": {
"units": "m",
"description": "y-axis parallax corrected coordinates in relation to ground radar.",
},
"z": {
"units": "m",
"description": "z-axis parallax corrected coordinates in relation to ground radar.",
},
"precip_in_gr_domain": {
"units": "1",
"description": "Satellite data-columns with precipitation inside the ground radar scope.",
},
"range_from_gr": {
"units": "m",
"description": "Range from satellite bins in relation to ground radar",
},
"elev_from_gr": {
"units": "degrees",
"description": "Elevation from satellite bins in relation to ground radar",
},
"reflectivity_grband": {"units": "dBZ"},
}
return metadata | 89bbdae8ad70668caefe558afe778c6076816694 | 124,868 |
def between(series, low, high):
"""Check whether values are between `low` and `high`."""
return (series > low) & (series < high) | 7ac2636d2a64b1de9b839651a0f5227baf1c0403 | 124,869 |
def read_dict_from_file(file_path):
"""
Read a dictionary of strings from a file
"""
with open(file_path) as file:
lines = file.read().splitlines()
obj = {}
for line in lines:
key, value = line.split(':', maxsplit=1)
obj[key] = eval(value)
return obj | 333f56d279a945a9d0339a6442b01e3195010e28 | 124,877 |
def alloc_to_share_ratio(share, total_shares, allocation, total_alloc):
"""
Calculate the allocation to share (advantage) ratio given to a region or group.
Parameters
----------
share : int
The proportion to be checked.
total_shares : int
The total amount of shares.
allocation : int
The share of allocations given to the region or group.
total_alloc : int
The number of allocations to provide.
Returns
-------
asr : float
The ratio of the allocations the region or group received to their proportion of the original shares.
"""
return 1.0 * (allocation / total_alloc) / (share / total_shares) | 27f5966a4c6d97e3136d29a250eaa8f72e3a3b29 | 124,883 |
def get_user_pw(request):
"""
Obtains user's credentials from request object.
@return (username, password) if credentials are available
and (None, None) otherwise
"""
if 'Authorization' not in request.headers:
return (None, None)
auth = request.authorization
return (auth.username, auth.password) | 16587db1d82634efede9c6424353a7278332105e | 124,884 |
import logging
def BotInternalOnly(bot_name, bot_whitelist):
"""Checks whether a given bot name is internal-only.
If a bot name is internal only, then new data for that bot should be marked
as internal-only.
"""
if not bot_whitelist:
logging.warning(
'No bot whitelist available. All data will be internal-only. If this '
'is not intended, please add a bot whitelist using /edit_site_config.')
return True
return bot_name not in bot_whitelist | 5e683a712049b082a824097f8897c31725d7a2b5 | 124,885 |
def rightmost(root):
"""
Returns the rightmost leaf in this tree.
:param root: The root node of the tree.
:type root: d20.ast.ChildMixin
:rtype: d20.ast.ChildMixin
"""
right = root
while right.children:
right = right.children[-1]
return right | 85f5ed1f0cc326fd1211295432b41c63bfac0311 | 124,888 |
def _removeHeaderTag(header, tag):
"""Removes a tag from the beginning of a header string.
:param header: str
:param tag: str
:returns: (str, bool), header without the tag and a bool that indicates
wheter the tag was present.
"""
if header.startswith(tag):
tagPresent = True
header = header[len(tag):]
else:
tagPresent = False
return header, tagPresent | efaa163ebde9233824b1ee15466f44d1b4e3f58d | 124,891 |
def convert_segments(segment):
"""Converts a V3 segment parameter into an list with a V4 Segment object."""
return [{'segmentId': segment}] | acd18725fe87a613ab1a8b1863d4456691cb319c | 124,898 |
import itertools
def flatten_sequences_time_first(sequences):
"""Flatten sequences with time axis first.
The resulting order is the same as how
`torch.nn.utils.rnn.pack_sequence` will pack sequences into a tensor.
Args:
sequences: Sequences with batch axis first.
Returns:
list: Flattened sequences with time axis first.
"""
ret = []
for batch in itertools.zip_longest(*sequences):
ret.extend([x for x in batch if x is not None])
return ret | 81d1ce64e61c5cfa531baf2425bd93050c1a6762 | 124,901 |
def correct_thresholds(p):
"""
Checks that the thresholds are ordered th_lo < th < th_hi
"""
return ( (p['th_lo'] < p['th']) or p['th'] == -1 ) and \
( (p['th'] < p['th_hi']) or p['th_hi'] == -1 ) | e2e840f14cab1b46bb2ce5b6ad9fb45bfa0510bf | 124,904 |
import torch
def new_tensor(data, requires_grad=False):
"""Create a new tensor from data."""
return torch.tensor(data, dtype=data.dtype, requires_grad=requires_grad) | 8b0bd5242261321b44e825518570d5dc5abcbb6d | 124,906 |
def default_complex_serializer(obj: complex, **_) -> dict:
"""
Serialize a complex as a dict.
:param obj: the complex.
:param _: not used.
:return: a ``dict``.
"""
return {'real': obj.real, 'imag': obj.imag} | 42d5c31918240a0a4d987e2d1373c70f5abed468 | 124,907 |
def nchw_to_nhwc(array):
"""
ONNX uses NCHW. ELINA expects NHWC
:param array: array to be converted
:return: converted array
"""
if array.ndim == 4:
return array.transpose(0, 2, 3, 1)
return array | d29bbf9491ca3c8f66fc7ec7bf4f8b843dc3e1b5 | 124,911 |
def get_template_engine_plugin(self):
"""
Retrieves the template engine plugin that is currently
under usage for the current controller. This is the
template engine used for the default rendering of files
corresponding to templates in the controller.
:rtype: Plugin
:return: The template engine plugin that is currently
in used for rendering of templates in the controller.
"""
return self.template_engine_plugin | 1a8cbd943c9fc20f212bec19f587ff59ca3e7a72 | 124,912 |
def append(l, elem):
"""Append list with element and return the list modified"""
if elem is not None:
l.append(elem)
return l | c4e455bfe493f5949572022561271191c8d1dda0 | 124,913 |
def __list_equals(l1, l2):
"""
Equality operator between two lists, If the lenght is different, the lists
are different
Args:
(List[Object]): First list.
(List[Object]): Second list.
Returns:
(Bool): True if the list has the same content, false otherwise
"""
if len(l1) != len(l2):
return False
# If any value is different, the lists are different
for i in range(len(l1)):
if l1[i] != l2[i]:
return False
# Else, they are the same
return True | 22bca9590ee62bacdd20f77d8fbb0d5ee351d08f | 124,916 |
def grubler(L,J1,J2):
"""
Calculates the DOF of a linkage using Grübler-Kutzbach criterion
Parameters
----------
L : int
Number of links
J1 : int
1-DOF pairs
J2 : int
2-DOF pairs
Returns
-------
M : int
Degrees of freedom
"""
M = 3*(L-1) - 2*J1 - J2
return M | e432c2214c0cb9b0455b27b5e5742c518c90a3b5 | 124,918 |
import hmac
def constant_time_compare(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
https://codahale.com/a-lesson-in-timing-attacks/
"""
return hmac.compare_digest(bytes(val1), bytes(val2)) | 5d7063cfda5f4d9e809d51bdc611dac161481c2a | 124,919 |
import torch
def _allocate_grad(tensor, requires_grad):
"""Allocate a Tensor to store a gradient.
If the provided Tensor requires a gradient, then a Tensor of the
appropriate size will be allocated to store it. If it does not,
then an empty Tensor will be created (so that it can be passed to
save_for_backward either way without causing an error).
Args:
tensor: A Tensor that may or may not require a gradient
requires_grad: Bool specifying whether tensor requires gradient
Returns:
Either a Tensor to store the gradient, or an empty Tensor.
"""
if requires_grad:
grad = torch.zeros_like(tensor)
else:
grad = torch.empty(0)
return grad | f598c06618605292793259d529cb4808e117d855 | 124,920 |
def required(field):
"""
Return 'required' as a string if the BoundField's underlying field is required.
"""
return "required" if field.field.required else "" | 9db9e00ccc03cf377f04159834ca53c1e086359d | 124,921 |
import base64
def escape_jinja(string):
"""Escapes a string so that jinja template variables are not expanded
:param string: the string to escape
:return: the escaped string
"""
# We base64 encode the string to avoid the need to escape characters.
# This is because ansible has some parsing quirks that makes it fairly
# hard to escape stuff in generic way.
# See: https://github.com/ansible/ansible/issues/10464
b64_value = base64.b64encode(string.encode())
return ''.join(('{{', "'", b64_value.decode(), "' | b64decode ", '}}')) | 923925806354d8f40a6a701a215945847581bfb0 | 124,925 |
from typing import Optional
import socket
def resolve_hostname(hostname: str) -> Optional[str]:
"""Resolve the given hostname and return its IP address, or None if it cannot be resolved."""
try:
return socket.gethostbyname(hostname)
except socket.error:
return None | 3f318c3ec16e0b733b81bf2d9762156141565fb4 | 124,928 |
def get_datatype(value) -> list:
"""
Determine the data type for an object and set the type if possible. A string such as "1.23"
will result in a type "float" and "2" will result in type "int".
Args:
value: object to be typed
Returns:
list [type, value] of data type as a code and object with that type
"""
if type(value) is str:
# string integer
if value.isdigit():
attributetype = "int"
value = int(value)
return [attributetype, value]
else:
try:
value = float(value)
attributetype = "float"
return [attributetype, value]
except ValueError:
pass
attributetype = "str"
elif type(value) is int:
attributetype = "int"
value = int(value)
elif type(value) is float:
attributetype = "float"
value = float(value)
# more work here
else:
attributetype = "str"
return [attributetype, value] | 289a8f9a5d2bf08e71b738e1c6a81e63ecb3481d | 124,929 |
def _dict_diff(old, new):
"""
Compares two dicts, returning two iterables: the keys that have been added
or changed, and the keys that have been deleted.
"""
oldkeys = set(old.keys())
newkeys = set(new.keys())
changed = newkeys - oldkeys
deleted = oldkeys - newkeys
for key in newkeys & oldkeys:
if new[key] != old[key]:
changed.add(key)
assert len(changed & deleted) == 0
return changed, deleted | 6c3971661a8dfe2ecaa98f0d1e9f11b6b771dc2c | 124,930 |
def expected_dist_to_boundary(dimension):
"""The expected distance to the boundary for random uniform points."""
assert dimension > 0
return 0.5 / (1 + dimension) | a9bb1e222645eb598198693f391f9d7e474f0d73 | 124,931 |
def parse_healing_and_source(line, is_lifetap, is_absorb):
"""Helper method that finds amount of healing and character providing it"""
split_line = line.split()
source = ''
if is_lifetap:
num = int(split_line[3])
elif is_absorb:
num = int(split_line[5])
else:
source = ' '.join(split_line[5:split_line.index('for')])
num = int(split_line[split_line.index('for')+1])
source = source.replace('the ', '')
return [num, source] | 37a04f878ba559b1b9d9c96f87181c5801fa3273 | 124,933 |
def kv_string(key, value, *, connector="="):
"""Make Key-Value String."""
return f"{key}{connector}{value}" | e6a6efa9af6972be46f1d05c3d62944591b20401 | 124,946 |
def get_N_lB(filepath):
"""
Returns the degree of polymerization N and Bjerrum length lB from filepath.
*Assumes symmetric polymers.
Parameters:
-----------
filepath : string
Full filepath to data file
ext : string
Extension for data file, including period
Returns:
--------
N : int
Degree of polymerization
lB : float
Bjerrum length (non-dimensionalized by sigma, bead diameter)
"""
# extracts filename from filepath
folder = filepath.split('\\')[-2]
# extracts N
i_N_begin = folder.find('NA')+len('NA(')
i_N_end = folder.find(')NB')
N = int(folder[i_N_begin:i_N_end])
# extracts lB
i_lB_begin = folder.find('lB(') + len('lB(')
i_lB_end = folder[i_lB_begin:].find(')') + i_lB_begin
lB = float(folder[i_lB_begin:i_lB_end])
return N, lB | 4cde95413e8b88a9d1846bdb34545895f7ccb13a | 124,949 |
def remove_article(str_):
"""Removes article 'the' from a string and capitalizes if necessary.
"""
return str_.replace('the ', '').title() | ecb1a2efcf86586bec3e1ca6cd88b246f7e0c0ff | 124,952 |
def normalize_bool(val, param_name):
"""
Normalize boolean-like strings to their corresponding boolean values.
:param val: Value to normalize. Acceptable values:
True, "True", "true", False, "False", "false"
:type val: str or bool
:param param_name: Name of the parameter being checked
:type param_name: str
:rtype: bool
"""
if isinstance(val, bool):
return val
elif val in ("True", "true"):
return True
elif val in ("False", "false"):
return False
else:
raise ValueError(
'Parameter `{}` must be True, "True", "true", False, "False", or "false".'.format(
param_name
)
) | 1f5e01a9ed58315292250bccd43c82e5470696eb | 124,953 |
def is_ready(model):
""" Check if a `model` is prepared. """
return hasattr(model, '_pywarm_forward_pre_hook') | c95a39b50396d151dd25cd4d6d168fe673a0839c | 124,960 |
from pathlib import Path
def project_path(dataset_path, suffix):
"""create predictable temporary project path name for dataset"""
return Path("tmp_" + dataset_path.stem).with_suffix(suffix) | ba49187c49f10756842fc58acebcf20e2766a771 | 124,961 |
def to_fahrenheit(celsius):
"""
Accepts degrees Celsius (celsius argument)
Returns degrees Fahrenheit
"""
fahrenheit = celsius * 9/5 + 32
return fahrenheit | ed12c09276736aa9a9d6c80d1637cf506a1f4ba7 | 124,964 |
def bind(sock, endpoint='tcp://127.0.0.1:0'):
"""Bind socket to endpoint accepting a variety of endpoint formats.
If connection is tcp and port is 0 or not given, it will call bind_to_random_port.
:param sock: Socket to bind
:type sock: zmq.Socket
:param endpoint: endpoint to bind as string or (address, port) tuple
:type endpoint: tuple or str
:return: bound endpoint
:rtype: str
"""
if not endpoint:
endpoint = 'tcp://127.0.0.1:0'
elif isinstance(endpoint, (tuple, list)):
endpoint = 'tcp://{}:{}'.format(*endpoint)
if endpoint.startswith('tcp://') and endpoint.endswith(':0'):
endpoint = endpoint[:-2]
port = sock.bind_to_random_port(endpoint)
endpoint += ':' + str(port)
else:
sock.bind(endpoint)
return endpoint | fcb14a6776a8f11402058dc4e7cdd01546948b6b | 124,965 |
import inspect
def get_unbound_fn(method_or_fn):
"""Return an unbound function from a bound method."""
if inspect.ismethod(method_or_fn):
return method_or_fn.__func__
elif callable(method_or_fn):
return method_or_fn
else:
raise ValueError('Expect a function or method.') | b82c279ac419df4278d4da4ec9c4ad903bd92e0b | 124,966 |
def parse_tags(tags):
"""Parses the given "list" of tags (str) from WeeChat into a list."""
return tags.split(',') | c93500be219475e53df38a794861ebd48ea01565 | 124,968 |
def _add_unit(label, xs):
"""Add a unit string to the label if available.
This also converts a label of density to n_e.
Parameters
----------
label : str
The label
xs : values
The numeric values
Returns
-------
label : str
If xs is an AstroPy Quantity then " (<unit>)" is added to the
label, using LaTeX (inline) formatting. If the input label is
'density' then it is replaced by '$n_e$'.
"""
if label == 'density':
label = '$n_e$'
try:
unit = xs.unit.to_string(format='latex_inline')
return r"{} ({})".format(label, unit)
except AttributeError:
return label | 3d55c1091f53f3e4e04723bea18a27c0662fe58a | 124,970 |
from pathlib import Path
def read_source(path: Path) -> str:
"""Read Python source code with 'utf-8' encoding."""
return path.read_text(encoding='utf-8') | 05494252bba9689c0d346bfef436808845d5b6da | 124,971 |
def group_intersection(group):
"""Intersection of characters for list of strings."""
characters = [list(i) for i in group]
return set.intersection(*[set(x) for x in characters]) | b33a71addcf6e20df633be82a838c4f539dc8993 | 124,972 |
def filter_clusters_by_size(clusters, min_cluster_size=2):
"""Filter clusters in dict{cluster_id:set(), cluster_id:set(), ...} by size.
e.g., clusters={0:set("movie/zmw/s_e",
"movie/zmw/s_e"),
...
18:set("", "")}
return clusters which have at least $min_cluster_size members.
"""
return {k:v for (k, v) in clusters.iteritems() if len(v) >= min_cluster_size} | 57540b539de3a7cb75da6d274ca81acf00e39637 | 124,974 |
def integrate_copy_number(y, cancer_genes_df, genes, loss_df, gain_df,
include_mutation=True):
"""
Function to integrate copy number data to define gene activation or gene
inactivation events. Copy number loss results in gene inactivation events
and is important for tumor suppressor genes while copy number gain results
in gene activation events and is important for oncogenes.
Arguments:
y - pandas dataframe samples by genes where a 1 indicates event
cancer_genes_df - a dataframe listing bona fide cancer genes as defined by
the 20/20 rule in Vogelstein et al. 2013
genes - the input list of genes to build the classifier for
loss_df - a sample by gene dataframe listing copy number loss events
gain_df - a sample by gene dataframe listing copy number gain events
include_mutation - boolean to decide to include mutation status
"""
# Find if the input genes are in this master list
genes_sub = cancer_genes_df[cancer_genes_df['Gene Symbol'].isin(genes)]
# Add status to the Y matrix depending on if the gene is a tumor suppressor
# or an oncogene. An oncogene can be activated with copy number gains, but
# a tumor suppressor is inactivated with copy number loss
tumor_suppressor = genes_sub[genes_sub['Classification*'] == 'TSG']
oncogene = genes_sub[genes_sub['Classification*'] == 'Oncogene']
copy_loss_sub = loss_df[tumor_suppressor['Gene Symbol']]
copy_gain_sub = gain_df[oncogene['Gene Symbol']]
# Append to column names for visualization
copy_loss_sub.columns = [col + '_loss' for col in copy_loss_sub.columns]
copy_gain_sub.columns = [col + '_gain' for col in copy_gain_sub.columns]
# Add columns to y matrix
y = y.join(copy_loss_sub)
y = y.join(copy_gain_sub)
# Fill missing data with zero (measured mutation but not copy number)
y = y.fillna(0)
y = y.astype(int)
if not include_mutation:
y = y.drop(genes, axis=1)
return y | 15ba86c54750ad16f3fcd1779555a6348eaaa19c | 124,976 |
def lorentz(v, v0, I, w):
"""
A lorentz function that takes linewidth at half intensity (w) as a
parameter.
:param v: Array of values at which to evaluate distribution.
:param v0: Center of the distribution.
:param w: Peak width at half max intensity
:returns: Distribution evaluated at points in x.
"""
return I * ((0.5 * w) ** 2 / ((0.5 * w) ** 2 + (v - v0) ** 2)) | 32e2ee0af9f47869cfd89da56f0c32dd86c6f0fb | 124,977 |
import json
def _ReadJsonList(file_path):
"""Read a JSON file that must contain a list, and return it."""
with open(file_path) as f:
data = json.load(f)
assert isinstance(data, list), "JSON file %s is not a list!" % file_path
return [item.encode('utf8') for item in data] | 410ad521fec0d4c01d4c1e40732f0e4782e6a90e | 124,983 |
def jday(year, mon, day, hr, minute, sec):
"""Return two floats that, when added, produce the specified Julian date.
The first float specifies the day, while the second float specifies
an additional offset for the hour, minute, and second. Because the
second float is much smaller in magnitude it can, unlike the first
float, be accurate down to very small fractions of a second.
"""
jd = (367.0 * year
- 7 * (year + ((mon + 9) // 12.0)) * 0.25 // 1.0
+ 275 * mon / 9.0 // 1.0
+ day
+ 1721013.5)
fr = (sec + minute * 60.0 + hr * 3600.0) / 86400.0;
return jd, fr | 483d089a4c97c4ba649b9b05e4193a8adc645909 | 124,984 |
import importlib
def return_attr_from_module(module_name, attr_name):
"""
Given a module path and the name of an attribute that exists in that
module, import the attribute from the module using the importlib package
and return it.
:param module_name: The name/location of the desired module.
:param attr_name: The name of the attribute.
:return: The imported attribute from the module.
"""
module_name = "evoltree." + module_name
try:
# Import module.
module = importlib.import_module(module_name)
except ModuleNotFoundError:
s = "utilities.algorithm.initialise_run.return_attr_from_module\n" \
"Error: Specified module not found: %s" % (module_name)
raise Exception(s)
try:
# Import specified attribute and return.
return getattr(module, attr_name)
except AttributeError:
s = "utilities.algorithm.initialise_run.return_attr_from_module\n" \
"Error: Specified attribute '%s' not found in module '%s'." \
% (attr_name, module_name)
raise Exception(s) | 76c529af4329d149e7db8e54066724db2bc838be | 124,985 |
import torch
def predict_stance(feature_vector, clf):
"""
Predicts the stances in a given feature matrix, using the classifier passed as argument, returning these as an array
:param feature_vector: an array of features
:param clf: a stance detection classifier
:return: array of class predictions
"""
# Exclude first two parts of vector; text ID and text label
vector = feature_vector[2:]
embs = []
for emb in vector:
if clf and type(emb) is list:
# Flatten vector further to allow use of LSTM model
for obj in emb:
embs.extend(obj)
else:
embs.extend(emb)
# Get model prediction
label_scores = clf(torch.tensor(embs))
predicted = [torch.argmax(label_scores.data, dim=1).item()]
return predicted | 107aa2359959fc7f3647e439ec8dd4abebf8bc43 | 124,986 |
def is_ascii(s):
"""Checks if text is in ascii.
Thanks to this thread on StackOverflow: http://stackoverflow.com/a/196392/881330
"""
return all(ord(c) < 256 for c in s) | 6df42296f21bf8c6d24dea0635913fe27a9f88cf | 124,988 |
def merge(*objs):
"""
Create a ``dict`` merged with the key-values from the provided dictionaries such that each next
dictionary extends the previous results.
Examples:
>>> item = merge({'a': 0}, {'b': 1}, {'b': 2, 'c': 3}, {'a': 1})
>>> item == {'a': 1, 'b': 2, 'c': 3}
True
Args:
*objs (dict): Dictionary sources.
Returns:
dict
"""
result = {}
for obj in objs:
result.update(obj)
return result | eba81dd7668a530630370b825786f255b7e9a5be | 124,989 |
def _genericIterateIntersection(a, b):
"""Iterate through all elements in a that are also in b.
Somewhat like a set's intersection(),
but not type-specific so it can work with OrderedDicts, etc.
It also returns a generator instead of a set,
so you can pick what container type you'd like,
if any.
"""
return (x for x in a if x in b) | a452bc9b4494165d7f6eb1e625e123bde5815846 | 124,990 |
def prob(A):
"""Computes the probability of a proposition, A.
A: Boolean series
returns: probability
"""
return A.mean() | 6695c4fab3af436560282611c278ee7f08efa410 | 124,991 |
def get_difference(time, actual_time):
"""
Returns the difference in percent for two given times.
If it returns 1 it means that the task was finished as planned.
If it returns n > 1 it means that the task took n% longer than planned
If it returns n < 1 it means that the task took n% lesser than planned
"""
return round(float(actual_time) / float(time), 2) | ee1499c5ad622453d7d23f98540f1e7e3d0a3468 | 124,998 |
def RestrictDict( aDict, restrictSet ):
"""Return a dict which has the mappings from the original dict only for keys in the given set"""
return dict( [ ( k, aDict[k] ) for k in frozenset( restrictSet ) & frozenset( aDict.keys() ) ] ) | 63e9b1a0021aca4eaf1d8c0e8f17fb572ae90231 | 125,001 |
def get_max(list_of_string):
"""Return maximum value from a list of string or integer """
return max(map(int, list_of_string)) | cd4a3cbf67b3d2c663ff45ac294b1ac6f2678062 | 125,006 |
def spin(pgms, move):
"""Spin the last move program to the front.
>>> spin(['a', 'b', 'c', 'd', 'e'], 1)
['e', 'a', 'b', 'c', 'd']
>>> spin(['a', 'b', 'c', 'd', 'e'], 3)
['c', 'd', 'e', 'a', 'b']
"""
return pgms[-move:] + pgms[:-move] | 582693bf6c23d993897412e386d011db358434a3 | 125,007 |
from typing import Optional
def is_jumpstart_model_input(model_id: Optional[str], version: Optional[str]) -> bool:
"""Determines if `model_id` and `version` input are for JumpStart.
This method returns True if both arguments are not None, false if both arguments
are None, and raises an exception if one argument is None but the other isn't.
Args:
model_id (str): Optional. Model ID of the JumpStart model.
version (str): Optional. Version of the JumpStart model.
Raises:
ValueError: If only one of the two arguments is None.
"""
if model_id is not None or version is not None:
if model_id is None or version is None:
raise ValueError(
"Must specify `model_id` and `model_version` when getting specs for "
"JumpStart models."
)
return True
return False | b3f6b3a7230bdb53d925afd55f423b3deb43d6b1 | 125,012 |
def frequency(observed_mutant_count, number_of_cells):
""" Calculate mutation fraction (or frequency) per culture.
Frequency calculated as r/N.
where r is observed number of mutants
N is a number of cells in a culture
:param observed_mutant_count: observed number of mutants in a culture
:param number_of_cells: number of cells in a culture
:return: mutation fraction per culture
"""
return observed_mutant_count / number_of_cells | 608d7b81fa5d76cb086a65195c4250f46418a0ce | 125,021 |
def get_parts(line):
"""
Split each rule line into its constituent parts
>>> get_parts('1-3 a: abcde')
('1-3 a', 'abcde')
>>> get_parts('1-3 b: cdefg')
('1-3 b', 'cdefg')
>>> get_parts('2-9 c: ccccccccc')
('2-9 c', 'ccccccccc')
:param line:
:return:
"""
p1, p2 = line.split(":", 1)
return p1.strip(), p2.strip() | 019dbfde96ab9e7b6bee3025002a76bc4a0f0d3f | 125,024 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.