content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
import re
def is_regex_match(regex_pattern, search_context):
"""
Wrapper method to perform regex searches on a passed in regex pattern and a string
If the search context matches the regex_pattern, returns True. Else false.
"""
regex_search = re.compile(regex_pattern)
if regex_search.match(search_context) is not None:
return True
return False
|
7c5087a107fa0a8f77fafbf00b88fc24c64a2de1
| 450,478
|
def _ToYAMLDefault(appinfo_config):
"""Converts an app config to default (alphabetical by key) YAML string.
Args:
appinfo_config: appinfo.AppInfoExternal object. Configuration object
for either a module or app.yaml.
Returns:
String containing YAML for the app config object.
"""
return appinfo_config.ToYAML()
|
dee59899a30a1ebc00af497ee82cd08e6e7ba01c
| 317,187
|
import math
def _round_to_nearest_multiple_down(x, n=5):
"""Round down from x to the nearest multiple of n."""
return n * math.floor(float(x) / n)
|
fb937b48e1f6f682c46ac76c7d94ffb1da3c8818
| 381,529
|
import six
def msg2geojson(msg):
"""
**Experimental**
Convert a single positional message to GeoJSON. `lat` and `lon` are used
to create the geometry and all other fields are placed in the `properties`
key.
Input:
{
"radio": 0,
"course": 321.1000061035,
"accuracy": 0,
"maneuver": 0,
"lon": 94.9923782349,
"mmsi": 373061000,
"repeat": 0,
"turn": 0,
"raim": false,
"type": 1,
"status": 0,
"speed": 9.6999998093,
"second": 59,
"lat": -12.2402667999,
"spare": 0,
"heading": 320,
"timestamp": "2015-01-01T00:00:00.000000Z"
}
Output:
{
"type": "Feature",
"properties": {
"radio": 0,
"course": 321.1000061035,
"accuracy": 0,
"maneuver": 0,
"mmsi": 373061000,
"repeat": 0,
"turn": 0,
"raim": false,
"type": 1,
"status": 0,
"speed": 9.6999998093,
"second": 59,
"spare": 0,
"heading": 320,
"timestamp": "2015-01-01T00:00:00.000000Z"
},
"geometry": {
"type": "Point",
"coordinates": (94.9923782349, -12.2402667999)
}
Parameters
----------
msg : dict
GPSd message.
Returns
-------
dict
GeoJSON
"""
y = msg.pop('lat')
x = msg.pop('lon')
return {
'type': 'Feature',
'properties': {
k: v for k, v in six.iteritems(msg) if (k != 'lat' or k != 'lon')},
'geometry': {
'type': 'Point',
'coordinates': (x, y)
}
}
|
16ffc1c1905379b848212bdf118f6451c6655b67
| 159,139
|
def o_counter(listy):
"""
Input: A list of numbers.
Ouptut: The number of entries in a list, using the len function.
"""
return (len(listy))
|
80cfe29cd8bc132fbd877598923b4d0d823ada36
| 615,608
|
def csv2list(filename, column, lower=True, header=True, dedupe=True): #------<<<
"""
Create a list from a column in a CSV file.
filename = name of .CSV file
column = column # (0-based) to be returned as a list
lower = whether to make the values in the list lowercase
header = whether .CSV file has a header row as the first line
dedupe = whether to remove duplicate values
Returns the list.
"""
thelist = []
firstline = True
for line in open(filename, 'r').readlines():
if firstline and header:
firstline = False
continue # skip over the header line
listval = line.split(',')[column].strip().lower() if lower else \
line.split(',')[column].strip()
thelist.append(listval)
if dedupe:
return sorted(list(set(thelist)))
else:
return sorted(thelist)
|
cf85b45d3ff7e58971e3f06ea23081d0011aa00a
| 308,160
|
def get_google_access_groups_for_service_account(service_account):
"""
Return list of fence.models.GoogleBucketAccessGroup objects that the
given service account should have access to based on it's access
privileges.
Args:
service_account (fence.models.UserServiceAccount): service account
object
Returns:
List[fence.models.GoogleBucketAccessGroup]: list of google bucket
access groups the service account should have access to
"""
return [
group
for access_privilege in service_account.access_privileges
for bucket in access_privilege.project.buckets
for group in bucket.google_bucket_access_groups
]
|
6408d0e44c0f26a537e4e329967abd73a59883d7
| 298,217
|
import math
def standard_logistic_function(x: float) -> float:
"""
Computes value of standard logistic function.
:param x: Point to be evaluated
:return: Value of logistic function evaluated in x
"""
return 1 / (1 + math.exp(-x))
|
5cc6559949f3d20064432dc0a3d1c60698f253bf
| 141,496
|
def map_tcp_flags(bitmap):
"""
Maps text names of tcp flags to values in bitmap
:param bitmap: array[8]
:return: dictionary with keynames as names of the flags
"""
result = {}
result["FIN"] = bitmap[7]
result["SYN"] = bitmap[6]
result["RST"] = bitmap[5]
result["PSH"] = bitmap[4]
result["ACK"] = bitmap[3]
result["URG"] = bitmap[2]
result["ECE"] = bitmap[1]
result["CRW"] = bitmap[0]
return result
|
f6388e3aa6a7540df8b26ba2061be5807f6c19be
| 72,749
|
from typing import Dict
def merge_dict(dest: Dict, src: Dict, path=None, overwrite=True):
"""
Merges src dict into dest dict.
Parameters
----------
dest: Dict
dest dict
src: Dict
source dict
path: List
merge path
overwrite: bool
Whether overwrite dest dict when where is a conflict
Returns
-------
Dict
Updated dest dict
"""
if path is None:
path = []
for key in src:
if key in dest:
if isinstance(dest[key], Dict) and isinstance(src[key], Dict):
merge_dict(dest[key], src[key], path + [str(key)], overwrite=overwrite)
elif dest[key] == src[key]:
pass # same leaf value
elif overwrite:
dest[key] = src[key]
else:
raise ValueError('Conflict at %s' % '.'.join(path + [str(key)]))
else:
dest[key] = src[key]
return dest
|
a9d25f2fff6755f0d947b319182e4d0dfbd22f81
| 618,501
|
import torch
def model_to(on_gpu, model):
"""Transfers model to cpu/gpu.
Args:
on_gpu (bool): Transfers model to gpu if True otherwise to cpu
model (torch.nn.Module): PyTorch defined model.
Returns:
torch.nn.Module:
The model after being moved to cpu/gpu.
"""
if on_gpu: # DataParallel work only for cuda
model = torch.nn.DataParallel(model)
return model.to("cuda")
return model.to("cpu")
|
c8731f84448cf6fccc387fe50b359e118cc2ce5e
| 670,773
|
def Data_Type(data):
"""
This will return whether the item received is a dictionary, list, string, integer etc.
CODE: Data_Type(data)
AVAILABLE PARAMS:
(*) data - This is the variable you want to check.
RETURN VALUES:
list, dict, str, int, float, bool
EXAMPLE CODE:
test1 = ['this','is','a','list']
test2 = {"a" : "1", "b" : "2", "c" : 3}
test3 = 'this is a test string'
test4 = 12
test5 = 4.3
test6 = True
my_return = '[COLOR dodgerblue]%s[/COLOR] : %s\n' % (test1, koding.Data_Type(test1))
my_return += '[COLOR dodgerblue]%s[/COLOR] : %s\n' % (test2, koding.Data_Type(test2))
my_return += '[COLOR dodgerblue]%s[/COLOR] : %s\n' % (test3, koding.Data_Type(test3))
my_return += '[COLOR dodgerblue]%s[/COLOR] : %s\n' % (test4, koding.Data_Type(test4))
my_return += '[COLOR dodgerblue]%s[/COLOR] : %s\n' % (test5, koding.Data_Type(test5))
my_return += '[COLOR dodgerblue]%s[/COLOR] : %s\n' % (test6, koding.Data_Type(test6))
koding.Text_Box('TEST RESULTS', my_return)
~"""
data_type = type(data).__name__
return data_type
|
1720c922905671241e36459bb9c13020d3ac6cae
| 459,560
|
def binary_search(seq, f, target):
"""Find no such that f(seq[no-1]) <= target and f(seq[no]) > target.
If target < f(seq[0]), return 0
If target > f(seq[-1]), return len(seq)
Assume f(seq[no]) < f(seq[no+1]).
The integer result is useful for indexing the array.
Args:
seq: sequence of inputs on which to act
f: function that returns a comparable when called on any input
target: value
Returns: index of item in seq meeting search requirements
"""
if not seq or f(seq[0]) > target:
return 0
elif f(seq[-1]) < target:
return len(seq)
upper = len(seq)
lower = 0
while (upper - lower) > 1:
current = (upper + lower) // 2
next_val = f(seq[current])
if next_val > target:
upper = current
elif next_val <= target:
lower = current
return upper
|
fa3187d6d12955cc6bc66c98b5c8b31e5b8b0e61
| 542,173
|
import json
def load_tweets(path):
"""Loads tweets that have previously been saved as a json file.
Calling load_tweets(path) after save_tweets(tweets, path)
will produce the same list of tweets.
Args:
path (str): The place where the tweets were be saved.
Returns:
list: A list of Dictionary objects, each representing one tweet."""
with open(path, "r") as f:
tweets = json.load(f)
return tweets
|
85e6933aa78db2e76a5c3e4abcdda897deabe3bf
| 235,304
|
def key_in_dict(input_dict, input_key):
"""Method that checks if a key is in a dict. Returns a bool"""
return input_key in input_dict
|
42c24889d7863fa3500a0f7a3eebd11b18e5bdfc
| 426,477
|
def segment_segment_intersection_2(x1, y1, x2, y2, t):
""" Return point of intersection between two segments given t value """
return [x1 + t * (x2 - x1), y1 + t * (y2 - y1)]
|
75f68699e1c3adac8be23338052610bba19dde95
| 261,837
|
def PostMessage(session, queueurl, message):
"""Post a message to the given queue
:param session: Session to use for AWS access
:type session: boto3.session.Session
:param queueurl: URL of the queue to which we will post messages
:type queueurl: str
:param message: Body of message to post
:type message: str
:return: The created message
:rtype: object
"""
sqsconn = session.connect_to("sqs")
Messages = session.get_collection("sqs", "MessageCollection")
messages = Messages(connection=sqsconn, queue_url=queueurl)
m = messages.create(message_body=message)
return m
|
e62dbd044e3cdae2462b1a81c54d0faf5c0e7563
| 130,668
|
def _IsNamedTuple(x):
"""Returns whether an object is an instance of a collections.namedtuple.
Examples::
_IsNamedTuple((42, 'hi')) ==> False
Foo = collections.namedtuple('Foo', ['a', 'b'])
_IsNamedTuple(Foo(a=42, b='hi')) ==> True
Args:
x: The object to check.
"""
return isinstance(x, tuple) and hasattr(x, '_fields')
|
93c7438f5472c8d8b95b48aaced4833e0632c3c7
| 685,404
|
def factorial(n):
"""
Def: n mathematics, the factorial of a non-negative integer n, denoted by
n!, is the product of all positive integers less than or equal to n. For
example: 5! = 5 * 4 * 3 * 2 * 1 = 120
"""
result = 1
if n == 0:
return result
else:
for i in range(1, n+1, 1):
result *= i
return result
|
bcc7c7be75274e3cbfa9ad6dc751f2255dc86e24
| 347,656
|
import unicodedata
def strip_accents(line: str) -> str:
"""Strips accents from a piece of text."""
nfd = unicodedata.normalize("NFD", line)
output = [c for c in nfd if unicodedata.category(c) != "Mn"]
if len(output) == line:
return line
return "".join(output)
|
4100a1748ec1911ef7fc11e0a625f295263381ac
| 449,382
|
from datetime import datetime
def generate_folder(source_date):
"""
Generate partial folder name based on provided source_date
:param source_date: datetime
"""
assert isinstance(source_date, datetime)
date_part = source_date.date()
year = date_part.year
month = date_part.month
day = date_part.day
path = "{0:04d}/{1:02d}-{2:02d}".format(year, month, day)
return path
|
651ff58e0a81b5708b7b99803686f2aeb849e56e
| 221,852
|
def hello(name):
"""
Function that returns a greeting for whatever name you enter.
Usage:
>>> hello('Emiel')
'Hello, Emiel!'
"""
return ''.join(["Hello, ", name, '!'])
|
ffbdf2ee0869b3fd20f0cb85eff8eae748e30d28
| 49,609
|
from typing import Deque
def json_path(absolute_path: Deque[str]):
"""Flatten a data path to a dot delimited string.
:param absolute_path: The path
:returns: The dot delimited string
"""
path = "$"
for elem in absolute_path:
if isinstance(elem, int):
path += "[" + str(elem) + "]"
else:
path += "." + elem
return path
|
7c61f784fa269925e42ac5f1cc3de9e2c55b9718
| 26,725
|
def complement(base):
""" Complement nucleotide """
d = {'A':'T', 'T':'A', 'G':'C', 'C':'G'}
if base in d:
return d[base]
return base
|
213855c4ff3569c4469e6aadf50df18917d861f9
| 412,465
|
import json
def format_containers(containers, json_out):
"""Format container data for Ansible
Args:
containers: [(hostname, metadata), ...]
json_out: If True, return JSON, else dictionary.
Returns:
Dictionary of container information formatted to Ansible specs.
"""
data = {'all': {'vars': {'ansible_connection': 'docker'}, 'hosts': [], '_meta': {'hostvars': {}}}}
for host, metadata in containers:
# docs use dict keys set to none, but maybe all is special?
# data['all']['hosts'][host] = None
data['all']['hosts'].append(host)
if metadata:
data['all']['_meta']['hostvars'][host] = {'docker_metadata': metadata}
return json.dumps(data) if json_out else data
|
cc25ceb34d2029e2f43f78d10f6dce0c4f3074aa
| 105,459
|
def _get_objective_info(num_classes):
"""Provide information on classifier objective.
:param num_classes: integer greater than 1; number of classes
:return: dict with information on objective and evaluation metric for XGBoost
"""
if num_classes == 2:
return {"objective": "binary:logistic", "eval_metric": "logloss"}
return {"objective": "multi:softprob", "eval_metric": "mlogloss", "num_class": num_classes}
|
3c0c7880cd21a7078cc27eafaf780ddfcdded0ed
| 281,852
|
def logged_in_user(client, user):
"""Log the user in and yield the user object"""
client.force_login(user)
return user
|
7177a2fff19951c8d0166a691303e12e17512b25
| 368,784
|
import hashlib
def md5_key_mangler(key):
"""Receive cache keys as long concatenated strings;
distill them into an md5 hash.
"""
d = hashlib.md5(key.encode('utf-8'))
return d.hexdigest()
|
92edb4d2ce2a1c6c5d0724213aa854e9b34a4961
| 479,280
|
def some(predicate, seq):
"""If some element x of seq satisfies predicate(x), return predicate(x).
>>> some(callable, [min, 3])
1
>>> some(callable, [2, 3])
0
"""
for x in seq:
px = predicate(x)
if px: return px
return False
|
f39d8fb62081f06eaf229041423e50ca3b2b817d
| 690,145
|
import typing
import glob
def find_requirement_files() -> typing.List[str]:
"""Find all requirements.txt files."""
return glob.glob('**/*requirements.txt', recursive=True)
|
b9a4729617049ad8ba53152fffd9368024f71203
| 86,438
|
def check_rack_labels(df_map):
"""Removing '.' for rack labels (causes execution failures)
"""
cols = ['TECAN_sample_labware_name', 'TECAN_primer_labware_name', 'TECAN_dest_labware_name']
for x in cols:
df_map[x] = [y.replace('.', '_') for y in df_map[x].tolist()]
return df_map
|
9d30237412e3b1fb658ec705c89b9825503091b4
| 312,232
|
def card(id, title, s):
"""Build a card by ID."""
return """<card id="%(id)s" title="%(title)s"><p>%(s)s</p></card>""" % \
{'id': id, 'title': title, 's': s}
|
a112372f8fa7cc159df57cf465c2838114353992
| 593,542
|
def parse_tint(tint):
"""Parse tint string, returns tuple."""
if len(tint) == 0:
return None
return tuple(int(n) for n in tint.split(','))
|
5917f3bfd2bc206b385008d5bd3e318ccf4f2bc5
| 328,116
|
def sub_symbols(pattern, code, symbol):
"""Substitutes symbols in CLDR number pattern."""
return pattern.replace('¤¤', code).replace('¤', symbol)
|
4d87e263ba53e99368fb82c8c93b3998834852ad
| 38,800
|
from typing import OrderedDict
from typing import Type
import collections
def kwtypes(**kwargs) -> OrderedDict[str, Type]:
"""
This is a small helper function to convert the keyword arguments to an OrderedDict of types.
.. code-block:: python
kwtypes(a=int, b=str)
"""
d = collections.OrderedDict()
for k, v in kwargs.items():
d[k] = v
return d
|
8024e6940f84f2eab8d4c44924889624d75ca3bd
| 698,354
|
def lerp(x0, x1, t):
""" Linear interpolation """
return (1.0 - t) * x0 + t * x1
|
82d9ce36dd5879c7aab64dc5615a2fb298471383
| 143
|
from typing import List
import random
def uniformly_split_num(
sum: int,
n: int,
) -> List[int]:
"""Generate `n` non-negative numbers that sum up to `sum`"""
assert n > 0, 'n should be > 0'
assert sum >= 0, 'sum should be >= 0'
random_numbers = [random.randint(0, sum) for _ in range(n - 1)]
values = [0] + sorted(random_numbers) + [sum]
values.sort()
intervals = [values[i + 1] - values[i] for i in range(len(values) - 1)]
return intervals
|
8136cce9bd258e7b187646967402dc428f04a764
| 680,551
|
def externalarray(file_manager):
"""
An array of external array references.
"""
return file_manager.external_array_references
|
cee20da69b485ef2260e7222135cd58466d8ad49
| 637,333
|
def get_block_lines(b, mdata):
"""
Return the number of lines based on the block index in the RAW file.
"""
line_counts = [1, 1, 1, 1, 1, 4, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0]
if b == 5: # for transformer
if mdata[0][2] == 0: # two-winding transformer
return 4
else: # three-winding transformer
return 5
return line_counts[b]
|
f00a8da81dde73b75564b8968be02c8597c0b1bc
| 54,830
|
def remove_prefix(s: str, prefix: str) -> str:
"""Remove the prefix from the string. I.e., str.removeprefix in Python 3.9."""
return s[len(prefix) :] if s.startswith(prefix) else s
|
5a2f74477ad38c3ba1a96e03560fc1c0d6664a91
| 117,870
|
import random
def rand_iterable(it, len_it=None):
"""
Choose a uniformly random value from the given iterable. If len(it) is
available, len_it may be None. Otherwise, len_it must be provided as the
"length" of the given iterable (the number of values which will be
produced).
random.choice() seems to require a sequence (needs indexed access), so it
doesn't work with things like sets.
:param it: The iterable
:param len_it: The length of it, or None to obtain the length via len().
:return: A random value from it
"""
if len_it is None:
len_it = len(it)
for i, val in enumerate(it):
if random.random() < 1.0 / (len_it - i):
return val
raise Exception("Iterable was empty!")
|
8f97ba985adfa0f4c047f8e177f1e09066a564f2
| 350,981
|
def define_bounds(bounds):
"""
Extracts bounds from region drawn/defined
Args:
`bounds`= json bounds captured
Returns:
`bbox`= bounds of the region defined (list)
`bbox2`= bounds of the region defined (string)
"""
#ew_poly=GeoJSON(data=dc.last_draw)
new_poly=str(bounds)
if("'geometry': None" in new_poly):
raise NameError('Error: No rectangle selected')
new_poly=new_poly.rsplit("'coordinates': ", 1)[1]
new_poly=new_poly.replace('[[[','').replace('[','').replace(']]]}})','').replace('],','').replace(',','').split(" ")
longs=new_poly[0::2]
lats=new_poly[1::2]
minlong=float(min(longs))
maxlong=float(max(longs))
minlat=float(max(lats)) #ignores sign
maxlat=float(min(lats)) #ignores sign
bbox2 = str(minlong)+","+str(minlat)+","+str(maxlong)+","+str(maxlat)
bbox =(minlong,minlat,maxlong,maxlat)
bounds2=(minlong,maxlong,minlat,maxlat)
#bbox =[minlong,minlat,maxlong,maxlat]
#print(bbox2)
print("Bounds:", bbox)
return bbox, bbox2, bounds2
|
d7863135ca0c122acaaae0deb547a30bc763212c
| 205,410
|
def flatten_dict(a_dict, parent_keys=None, current_parent_key=None):
"""Given a dict as input, return a version of the dict where the keys
are no longer nested, and instead flattened.
EG:
>>> flatten_dict({"a": {"b": 1}})
{"a.b": 1}
NB: The kwargs are only for internal use of the function and should not be
used by the caller.
"""
if parent_keys is None:
parent_keys = []
for key, value in a_dict.items():
if current_parent_key:
key = "%s.%s" % (current_parent_key, key)
if isinstance(value, dict):
flatten_dict(value, parent_keys=parent_keys, current_parent_key=key)
else:
parent_keys.append((key, value))
return dict(parent_keys)
|
cdd69b1d5119d83c36266c13c5bec772ae138cb5
| 684,542
|
def dpdy(a, x, y, order=4):
"""Differential with respect to y
The polynomial is defined as p(x,y) = a[i,j] * x**(i-j) * y**j, summed over i and j
Then dp/dy = j * a[i,j] * x**(i-j) * y**(j-1)
Parameters
----------
a an array of polynomial coefficients in JWST arrangement.
The number of coefficients must be (order+1)(order+2)/2
x an integer or float variable(or an array of same) representing pixel x positions
y a variable (or an array) representing pixel y positions
order an integer, the polynomal order
Returns
-------
dpdy float value of dp/dy for the given (x,y) point(s)
where p(x,y) is the value of the polynomial
"""
dpdy = 0.0
k = 1 # index for coefficients
for i in range(1, order + 1):
for j in range(i + 1):
if j > 0:
dpdy = dpdy + j * a[k] * x**(i - j) * y**(j - 1)
k += 1
return dpdy
|
b5d4bf2eb03a8583c3bf6c9b93e8384484a42d77
| 473,039
|
def x2trace(distance, interval):
"""Return the trace number closest to a given distance. The input distance is effectively dimesionless given that the interval is proportional to it. NB: Not sure if the returned trace is a python index or gpr trace but that needs to be specified here.
Attributes:
distance <float>: the distance along a GPR line that a trace must be identified;
interval <float>: the distance interval in meters between adjacent traces.
Alternative names:
line_trace
"""
trace = round(distance / interval)
return(trace)
|
7a9af5ff21293566993d2921ac0ff4df3ac780ac
| 567,977
|
def get_field(proto_cls, field_name):
"""Return proto field from class and field name."""
return proto_cls.DESCRIPTOR.fields_by_name[field_name]
|
86bbd22bb02fe84cf9e9f03f8e77be28c328a6ff
| 189,863
|
def default_empty(default):
"""Check an input against its falsy value or return a default."""
def get_value(test_value):
if test_value:
return test_value
else:
return default
return get_value
|
b3ff34d8fb5d46a62ad11a439dad7d03fb4425f2
| 28,515
|
def validate_predefined_argument(argument_name: str, argument_value: object, argument_options: list) -> bool:
"""
Validate predefined argument is a valid option.
Args:
argument_name (str): The name of the argument to validate.
argument_value (object): The value of the argument to validate.
argument_options (list): Argument predifuend options.
Returns:
bool: True if the argument is valid, otherwise raise an exception.
"""
if not isinstance(argument_value, list):
argument_value = [argument_value]
for value in argument_value:
if value not in argument_options:
raise Exception(f'Invalid {argument_name} argument. Please provide one of the following options:'
f'{str(argument_options)}')
return True
|
1157ceb56e563701edf564c8ba685b0966320955
| 89,719
|
def to_numpy(a):
"""Convert tensors to numpy arrays."""
return [x.cpu().detach().numpy() for x in a]
|
21c3261382e614131fee9c6e2e2b61da3679801a
| 135,819
|
def parse_ipmi_sdr(output):
"""Parse the output of the sdr info retrieved with ipmitool"""
hrdw = []
for line in output:
items = line.split("|")
if len(items) < 3:
continue
if "Not Readable" in line:
hrdw.append(('ipmi', items[0].strip(), 'value', 'Not Readable'))
continue
hrdw.append(('ipmi', items[0].strip(), 'value',
'%s' % items[1].split()[0].strip()))
units = ""
for unit in items[1].split()[1:]:
units = "%s %s" % (units, unit.strip())
units = units.strip()
if units:
hrdw.append(('ipmi', items[0].strip(), 'unit', units))
return hrdw
|
7525b21926ced8dc44f4730c283432b869905786
| 672,778
|
def int_to_bytes(value: int) -> bytes:
""" Return the bytes for a given integer. """
return value.to_bytes((value.bit_length() + 7) // 8, byteorder='big')
|
c4c5c02d437964e006acef6292ffffdf48c20c88
| 577,920
|
import pickle
def load_checkpoints(pklf_name):
"""
Loads in saved checkpoints from pickle file (used e.g. to repeat the analysis part...)
:param pklf_name: name of the saved pickle file
:return: obejects saved by BluePyOpt"""
with open(pklf_name, "rb") as f:
cp = pickle.load(f)
return cp["generation"], cp["halloffame"], cp["logbook"], cp["history"]
|
d485e30ddf99af968d4a20070a3cd4f3f88c0603
| 216,940
|
def convert36ToAscii(start, chs):
"""Convert decimal address to ascci address
:param start: NBP adress in decimal reverse:Big Endian Byte Order: The most significant byte (the "big end") of the data is placed at the byte with the lowest address
:param chs:tsring of all digits and ascii upper
:return:NBP adress in ASCII
"""
if start == 0:
return ''
chs_len = len(chs)
ch = chs[start%chs_len]
return ch + convert36ToAscii(start//chs_len, chs)
|
e8b00d5d0a49d4f128b84a4c874e268b5d92ca9e
| 648,390
|
from typing import Callable
from typing import Dict
def expected_utility(u: Callable[[float], float], X: Dict[int, float]) -> float:
"""
効用関数uの下で確率くじXのもたらす期待効用を返します。
Parameters
----------
u: Callable[[float], float]
効用関数
X: Dict[int, float]
確率くじ。X[k] = p とすると確率くじXは確率pでk円になることを表す。
Xのkeyをk_1, ..., k_n とするとき、X[k_1] + ... + X[k_n] = 1を満たす必要がある。
Returns
-------
float
u(X)の期待値
"""
return sum(p * u(k) for k, p in X.items())
|
50e9bbd5b5c4df8cf706c1050300931e0d79e46a
| 560,282
|
import re
def convert_to_seconds(duration_str) -> int:
"""
for (s)econds, (m)inutes, (h)ours, (d)ays, (w)eeks
return duration in seconds
"""
seconds = 0
if re.match(r"[0-9]+$", duration_str):
seconds = int(duration_str)
elif re.match(r"[0-9]+s$", duration_str):
seconds = int(duration_str[:-1])
elif re.match(r"[0-9]+m$", duration_str):
seconds = 60 * int(duration_str[:-1])
elif re.match(r"[0-9]+h$", duration_str):
seconds = 3600 * int(duration_str[:-1])
elif re.match(r"[0-9]+d$", duration_str):
seconds = 86400 * int(duration_str[:-1])
elif re.match(r"[0-9]+w$", duration_str):
seconds = 7 * 86400 * int(duration_str[:-1])
return seconds
|
e46ccb0d7b51c8c805f44b77f23fdfb18f6e0d97
| 374,013
|
def fn_filter_words_in_column(df, col_to_search, search_words, any_words=True):
"""Filter for any (default) or all words in a dataframe column. Case insensitive.
df: dataframe to search
col_to_search: column name to search as a str
search_words: list of search words
any_words: Default True to search for any word. False will search to match all words.
"""
if any_words == True:
df_f = df.loc[df[col_to_search].str.lower().apply(
lambda x: any(word.lower() in x for word in search_words))]
else:
df_f = df.loc[df[col_to_search].str.lower().apply(
lambda x: all(word.lower() in x for word in search_words))]
return df_f
|
c90fbc0ef0df1859049b37a90005d3ff901edfcf
| 134,005
|
def lda_filter_articles(ids, articles):
"""From sets of ids to set of sentences (str)
:argument ids: dictionary (keys are topics and values are ids of articles
:argument articles: list of str
:returns a dictionary, keys are topics and values are sentences (str)
"""
articles_per_topic = {}
for topic in ids.keys():
tmp_topic_articles = [article for i,article in enumerate(articles) if i in ids[topic]]
articles_per_topic[topic] = ' '.join(tmp_topic_articles)
return articles_per_topic
|
20fbb9b5c64c491654b5999e93580167213c4f8e
| 263,615
|
def intersect(list1, list2):
"""
Compute the intersection of two sorted lists.
Returns a new sorted list containing only elements that are in
both list1 and list2.
This function can be iterative.
"""
intersection = []
append = intersection.append
idx1 = 0
idx2 = 0
while idx1 < len(list1) and idx2 < len(list2):
if list1[idx1] < list2[idx2]:
idx1 += 1
elif list1[idx1] > list2[idx2]:
idx2 += 1
else:
append(list1[idx1])
idx1 += 1
idx2 += 1
return intersection
|
36ead411915016330387e6e589a95cac9ebb62b7
| 518,222
|
from typing import Dict
from typing import List
def _get_available_sensor_identifier(pID_dict: Dict[str, Dict[str, List[str]]]) -> str:
"""Returns an available sensor identifier that has been recorded. Will favor
"name" over "mapped_id" over "sensor_id" over "number".
Parameters
----------
pID_dict: Dict[str, Dict[str, List[str]]],
Nested dictionary that stores information about the pIDs from every player-
identifying column in every group.
'pID_dict[group][identifying_column] = [pID1, pID2, ..., pIDn]'
When recording and exporting Kinexon data, the pID can be stored
in different columns. Player-identifying columns are "sensor_id", "mapped_id",
and "full_name". If the respective column is in the recorded data, its pIDs are
listed in pID_dict.
Returns
-------
identifier: str
One sensor identifier that has been recorded.
"""
player_identifiers = ["name", "mapped_id", "sensor_id", "number"]
available_identifier = [
idt for idt in player_identifiers if idt in list(pID_dict.values())[0]
]
identifier = available_identifier[0]
return identifier
|
d7e92d84694b52fa1847801a833b8679be272a6a
| 554,908
|
def invert_dict(d):
"""Inverts a dictionary, returning a map from val to a list of keys.
If the mapping key->val appears in d, then in the new dictionary
val maps to a list that includes key.
d: dict
Returns: dict
"""
inverse = {}
for key in d:
val = d[key]
inverse.setdefault(val, []).append(key)
return inverse
|
b9ec2128b8035a2c122bfd177fe92426a942e74d
| 416,423
|
def turn_Series_into_string(series_obj):
"""
Turn a pandas Series object (of strings)
into one large string. Intended for text columns
in pandas DataFrames
:param series_obj : a pandas Series object (usually text column)
:returns long_string : string of the series_obj
"""
try:
long_string = ""
for i in range(len(series_obj)):
long_string += series_obj.values[i]
long_string += "\n"
return long_string
except:
print("<!>ERROR in turn_into_string()")
return
|
a4dcbd86aeed245f15590604cc19c776ed65d8ad
| 400,230
|
def parse(utxo, offset=0):
""" Parses a given serialized UTXO to extract a base-128 varint.
:param utxo: Serialized UTXO from which the varint will be parsed.
:type utxo: hex str
:param offset: Offset where the beginning of the varint if located in the UTXO.
:type offset: int
:return: The extracted varint, and the offset of the byte located right after it.
:rtype: hex str, int
"""
i = 0
ret = 0
go = True
while go:
next_byte = ord(utxo[i])
go = bool(next_byte & 0x80)
ret = (ret << 7 | next_byte & 0x7f) + go
i += 1
return ret,i
|
6330898ae2113370b1f1ec5e2b8ba0ec326eb48d
| 689,066
|
def ubtou(str_in):
""" Shorthand for converting unicode bytes to UTF-8 """
if not isinstance(str_in, bytes):
return str_in
return str_in.decode('utf-8')
|
9f9949e2b61cc73d4edde7295fd809f212414289
| 376,316
|
def get_base_model(model):
"""
For the given model, return the highest concrete model in the inheritance tree -
e.g. for BlogPage, return Page
"""
if model._meta.parents:
model = model._meta.get_parent_list()[0]
return model
|
bd8d717ae6806859e6ea7461e45ce0a0eb6cb922
| 468,926
|
import re
def get_genres_from_soup(soup):
"""Get the genres of a book.
Parameters
----------
soup : BeautifulSoup
BeautifulSoup object created from a book page.
Returns
-------
list
Book genres.
"""
genres_elements = soup.find_all('a', {'href': re.compile('/genres/')}, class_='bookPageGenreLink')
return list(map(lambda element: element.get_text(), genres_elements))
|
16db0fc8cb58cdcf19aa89ea8fef27078d33a390
| 701,661
|
def check_if_site_is_ordered(structure, site_index):
"""Checks if a given site in a given structure is fully ordered.
Args:
structure (pymatgen.Structure): A pymatgen structure.
site_index (int): The site index.
Returns:
(bool)
"""
return (len(structure[site_index].species.elements) == 1)
|
c73b2839bccbb1d520287188656666ec035bfa13
| 258,816
|
from typing import List
from typing import Tuple
def filter_multiple_pronunications(
lexicon: List[Tuple[str, List[str]]]
) -> List[Tuple[str, List[str]]]:
"""Remove multiple pronunciations of words from a lexicon.
If a word has more than one pronunciation in the lexicon, only
the first one is kept, while other pronunciations are removed
from the lexicon.
Args:
lexicon:
The input lexicon, containing a list of (word, [p1, p2, ..., pn]),
where "p1, p2, ..., pn" are the pronunciations of the "word".
Returns:
Return a new lexicon where each word has a unique pronunciation.
"""
seen = set()
ans = []
for word, tokens in lexicon:
if word in seen:
continue
seen.add(word)
ans.append((word, tokens))
return ans
|
faa106c3b16e7efbcd49471714260cb734f72385
| 666,935
|
def wrap(func, keys):
"""
Primitive to wrap the output of a function into dictionary keys.
Args:
func: The function to enable output wrapping on.
keys: The name of the keys to use in the output dictionary.
"""
if type(keys) is not tuple:
keys = [keys]
def _wrapped(*args, **kwargs):
res = func(*args, **kwargs)
if type(res) is not tuple:
res = [res]
return {k:v for k, v in zip(keys, res)}
return _wrapped
|
f3fcdfb2a2796fc09ff48386ac5e4131118cd041
| 109,040
|
def goal_key(tup):
"""Sort goals by position first and by id second (transform str ids into ints)"""
goal_id, goal_pos = tup
if isinstance(goal_id, str):
return goal_pos, int(goal_id.split("_")[0])
return goal_pos, goal_id
|
88e51faf9ea6395cfb636b4c88057a662a2041c4
| 177,517
|
def get_games_for_module(module, active_only=False):
"""
For a given module return a queryset of related game postings.
"""
queryset = module.gameposting_set.all()
if active_only:
queryset = queryset.exclude(status__in=['closed', 'cancel'])
return queryset
|
a59798d7b829448670d89d281dfdd8e4d9a1989f
| 255,015
|
def list_to_tuple(x):
""" Convert a list to a tuple recursively. """
assert isinstance(x, list)
return tuple(list_to_tuple(y) if isinstance(y, list) else y for y in x)
|
38fded1bd81fd529fddfd33030c452bfd00bc898
| 198,173
|
import torch
def get_all_predictions(model, loader, device):
"""Get All predictions for model
Args:
model (Net): Trained Model
loader (Dataloader): instance of dataloader
device (str): Which device to use cuda/cpu
Returns:
tuple: all predicted values and their targets
"""
model.eval()
all_preds = torch.tensor([]).to(device)
all_targets = torch.tensor([]).to(device)
with torch.no_grad():
for data, target in loader:
data, targets = data.to(device), target.to(device)
all_targets = torch.cat(
(all_targets, targets),
dim=0
)
output = model(data)
preds = output.argmax(dim=1)
all_preds = torch.cat(
(all_preds, preds),
dim=0
)
return all_preds, all_targets
|
aca84ffaa16408bc691d60c8207318c7e214a509
| 669,643
|
def get_utm_crs(lon_center, lat_center):
"""Derive CRS for UTM zone matching coordinates
Parameters
----------
lon_center, lat_center : float
coordinates of center of image
Returns
-------
dict
init: epsg code
"""
zone_base = 32700 if lat_center < 0 else 32600
epsg_code = zone_base + 1 + int((180 + lon_center) / 6)
return {'init': f'epsg:{epsg_code}'}
|
54a068e9786a870f9eef9c7c82270f776acf0bb1
| 186,664
|
def nset(dic, keys, val):
""" Set a nested key-value pair - in-place.
No-op when a value already exists.
Example:
x = {}
nset(x, ['a', 'b', 'c'], 0)
print(x)
> {'a': {'b': {'c': 0}}}
"""
for key in keys[:-1]:
dic = dic.setdefault(key, {})
dic[keys[-1]] = dic.setdefault(keys[-1], val)
return dic
|
943dd923e0c6b6232d33dd0238f1e8af05c74c36
| 217,576
|
def strfdelta(tdelta, fmt):
"""Utility function. Formats a `timedelta` into human readable string."""
d = {"days": tdelta.days}
d["hours"], rem = divmod(tdelta.seconds, 3600)
d["minutes"], d["seconds"] = divmod(rem, 60)
return fmt.format(**d)
|
0057311e65dc8517a46ff4bbaff53b4e99417187
| 633,847
|
def request_is_from_scholarship_head(request): # pylint: disable=invalid-name
"""
Returns whether the request from the scholarship head user.
Returns: bool
"""
return request.user.has_perm("Scholarship.scholarship_head")
|
f4044f92c5fc88497fbd19c7a3b7da0b33f2abb8
| 231,982
|
def keyword_filter(keys, **kwargs):
"""
Find first instance of a key in kwargs and return the matching key/value pair.
:param keys: Iterable containing keys we which is search kwargs for.
:param kwargs: Mapping which contains key/value pairs we're filtering down.
"""
return next(iter(((k, str(v)) for (k, v) in ((k, kwargs.get(k)) for k in keys) if v)), (None, None))
|
b7ea0a078cb04556076a449aed1744267a62ba50
| 238,888
|
def is_coloring(G, coloring):
"""Determine if the coloring is a valid coloring for the graph G."""
# Verify that the coloring is valid.
for (s, d) in G.edges:
if coloring[s] == coloring[d]:
return False
return True
|
b10a934f1c9f229c85dcf7ba4c5e5304c9c1afed
| 596,612
|
def get_resolution(wofls):
"""Get the resolution for a WOfLs product."""
resolutions = {
'ga_ls_wo_3': (-30, 30),
'wofs_albers': (-25, 25),
'ga_s2_wo_3': (-10, 10),
}
return resolutions[wofls]
|
45d45585f0ae1f60e3089a6e2f4fe771327b3abe
| 79,588
|
def round_updown_to_x(num_in, x, direction="up"):
"""
Rounds a given value (num_in) to the nearest multiple of x, in a given
direction (up or down)
:param num_in: Input value
:param x: Value to round to a multiple of
:param direction: Round up or down. Default: 'up'
:return: Rounded number
"""
if direction == "down":
num_out = int(num_in) - int(num_in) % int(x)
else:
num_out = num_in + (x - num_in) % int(x)
return num_out
|
206b582843eea234b5b655dddd54eea6e32eec42
| 701,128
|
def calc_precision(tp: int, fp: int) -> float:
"""Calculate Precision.
Args:
tp (int): amount of TP.
fp (int): amount of FP.
Returns:
float: precision for the given amounts of TP and FP.
"""
if tp + fp != 0:
precision = float(tp / (tp + fp))
else:
# prevent zero division error.
precision = 0
return precision
|
60a5908da00f20c718c17bd5d3cd1306751256e0
| 122,992
|
import csv
def get_rows(csv_file):
"""Open csv file and return contents as list of lists."""
with open(csv_file, newline='') as f:
rows = [row for row in csv.reader(f)]
return rows[1:]
|
3020f124645bb12bc475abaa9e7ce271cfda8a74
| 182,210
|
def framework_monitor_context_switch(client, enabled=None):
"""Query or set state of context switch monitoring.
Args:
enabled: True to enable monitoring; False to disable monitoring; None to query (optional)
Returns:
Current context switch monitoring state (after applying enabled flag).
"""
params = {}
if enabled is not None:
params['enabled'] = enabled
return client.call('framework_monitor_context_switch', params)
|
977e4372ba93f299f63aeeceec94084333e6d360
| 254,519
|
def get_side_nav_menu(html_soup):
"""
Returns -> [list] list of side navigation menus[type dict]
Menu -> [Dict] attrs:
'name' -> [str] name of menu
'link' -> [str] link for the menu
'image' -> [str] URL for menu image
Params -> [requests.HTML object] HTML of web page.
"""
# Container ul with all navigation menus
menu_div = html_soup.find("ul#suckertree1", first=True)
# list of divs containing menu title and link
menu_items = menu_div.find("li")
side_menus = []
for menu in menu_items:
menu_a_tag = menu.find("a", first=True)
name = menu_a_tag.text
link = menu_a_tag.attrs.get("href")
full_link = html_soup.url + link
image = menu.find("img", first=True).attrs.get("src")
# image_link = html_soup.url + image
menu_dict = {"name": name, "link": full_link, "image": image}
side_menus.append(menu_dict)
return side_menus
|
bf222548b9b55dd032c76e52cfce73570b12f9fe
| 574,322
|
def dedup(records):
"""Remove any identical records from the list.
Args:
records (list(dict)): the list of dicts to be filtered.
Returns:
list(dict): the list of records with any duplicates removed.
The list returned contains records in the same order as the original list.
"""
seen = set()
filtered = []
for record in records:
key = tuple(sorted(record.items()))
if key not in seen:
seen.add(key)
filtered.append(record)
return filtered
|
f3aadddf1458a08d36331a74722e12057d8ab8f9
| 12,689
|
def is_ethiopia_dataset(name):
"""Names with 'TR' at start or Ethiopia"""
return name.upper().startswith('TR') or \
name.lower().find('ethiopia') > -1
|
81f9981a4389f9c9fe7a19f23f8a66541116b04d
| 673,676
|
def cmd_opts(request):
"""Returns pytest request options object"""
return request.config.option
|
61c9b2f14d9efc90a648dc3a0ce485c7dd18a7ec
| 534,773
|
def clean_outcomert(row):
"""
Intended for use with DataFrame.apply()
Returns an array with 'outcomert' converted from milliseconds to seconds if the 'study' variable is '3'
"""
if int(row['study']) >= 3:
return float(row['outcomert'] * .001)
else:
return row['outcomert']
|
a6f2ea1d688555796e56caf3be9f1390fea83cea
| 524,762
|
def strip_plus1(number: str) -> str:
"""
Strip leading "+1-" if present. NANP numbers on the platform seem to be stored as 10D only
:param number:
:return:
"""
return number and number.startswith('+1-') and number[3:] or number
|
298872bccd693f3d67df09c9d77c02dce72e5711
| 51,528
|
import re
def regex_replacement_stack(replacements):
"""
Make a function that applies a sequence of regex replacements to text.
"""
compiled_replacers = [(re.compile(match), replace) for (match, replace) in replacements]
def _replace(text):
for compiled_re, replacement in compiled_replacers:
text = compiled_re.sub(replacement, text)
return text
return _replace
|
5075749cd78263a7805e1820280fadba819c75cb
| 505,182
|
def replace_header(headers, name, value):
"""
Updates the headers replacing the first occurance of the given name
with the value provided; asserting that no further occurances
happen. Note that this is _not_ the same as remove_header and then
append, as two distinct operations (del followed by an append) are
not atomic in a threaded environment. Returns the previous header
value for the provided name, if any. Clearly one should not use
this function with ``set-cookie`` or other names that may have more
than one occurance in the headers.
"""
name = name.lower()
i = 0
result = None
while i < len(headers):
if headers[i][0].lower() == name:
assert not result, "two values for the header '%s' found" % name
result = headers[i][1]
headers[i] = (name, value)
i += 1
if not result:
headers.append((name, value))
return result
|
11ea82f897e6a080a643da8bd36ee1aa9fa9b9c2
| 146,903
|
def apply_filter(filter_conf: dict, name: str) -> bool:
"""Given a filter configuration, determine if the name should
be included or excluded.
Confer the default_filter function for the structure of a
filter configuration.
Returns:
A flag indicating if the table is ok to be included.
"""
ok = name in filter_conf["items"]
if filter_conf["type"] == "exclude":
ok = not ok
elif filter_conf["type"] != "include":
raise ValueError
return ok
|
66256ce98085e7d83f0bf6565baddfc859de2c80
| 371,771
|
def read_input(path: str) -> list:
"""
Read game board file from path.
Return list of str.
"""
board = []
with open(path, 'r', encoding='utf-8') as board_fl:
for line in board_fl:
if '\n' in line:
board.append(line[:-1])
else:
board.append(line)
return board
|
c0b22624c3b313f81b347cf8d6469044364e8676
| 435,426
|
import zlib
def complexity(s):
"""Compress string and return the size of the compression."""
s = s.encode("utf-8") # Python 3 fix.
compr = zlib.compress(s)
c = float(len(compr))
return c
|
11986b3f56761fa50e90005093d1b205bd2841f5
| 574,599
|
def num_coin_changes_dp(amount, coins):
"""Number of coin changes by bottom-up dynamic programming.
Time complexity: O(a*n), where a is amount, and n is number of coins.
Space complexity: O(a*n).
"""
# Apply bottom-up DP with memoization tabular T: (n+1)x(amount+1).
n = len(coins)
T = [[0] * (amount + 1) for c in range(n + 1)]
# For amount 0, set T[c][0] equal 1.
for c in range(1, n + 1):
T[c][0] = 1
for c in range(1, n + 1):
for a in range(1, amount + 1):
if coins[c - 1] <= a:
# If can change, sum num of ways with coin n included & excluded.
T[c][a] = T[c][a - coins[c - 1]] + T[c - 1][a]
else:
# Cannot make a change by coin c.
T[c][a] = T[c - 1][a]
return T[-1][-1]
|
b472e0ff2e43a26cb16d0dade972d27ae4eec508
| 436,590
|
def chao1_var_no_doubletons(singles, chao1):
"""Calculates chao1 variance in absence of doubletons.
From EstimateS manual, equation 7.
chao1 is the estimate of the mean of Chao1 from the same dataset.
"""
s = float(singles)
return s*(s-1)/2 + s*(2*s-1)**2/4 - s**4/(4*chao1)
|
6b93743a35c70c9ed5b9f3fc9bece1e9363c5802
| 708,619
|
def _sort_dict_by_keys(dictionary: dict) -> dict:
"""
Utility method to recursively sort a dictionary by it's keys.
Keys are sorted alphabetically in ascending order.
:type dictionary: dict
:param dictionary: input dictionary
:rtype dict
:return dictionary sorted by keys
"""
return {k: _sort_dict_by_keys(v) if isinstance(v, dict) else v for k, v in sorted(dictionary.items())}
|
bfd7a7a4166947509bb34f8c713accd595d8ec9d
| 434,994
|
def get_passive_action(df, trial, sample):
"""Get data for a replay.
Parameters
----------
df : pandas.DataFrame
Data to be replayed
trial, sample : int
Indices into the data for which to fetch actions
Returns
-------
keys_rts : list of tuples
each entry in the list contains a tuple of the pressed key and the
reaction time associated with the keypress.
"""
admissible_actions = ['sample', 'stop', 'forced_stop', 'premature_stop']
df = df[(df['trial'] == trial) &
(df['action_type'].isin(admissible_actions))]
key = int(df['action'].tolist()[int(sample)])
rt = float(df['response_time'].tolist()[int(sample)])
key = {0: 'left', 1: 'right', 2: 'down'}[key]
keys_rts = [(key, rt)]
return keys_rts
|
9985fa075b0864c9707aff5d234e93512df23424
| 256,531
|
import torch
def place_fourth_atom(a_coord: torch.Tensor, b_coord: torch.Tensor,
c_coord: torch.Tensor, length: torch.Tensor,
planar: torch.Tensor,
dihedral: torch.Tensor) -> torch.Tensor:
"""
Given 3 coords + a length + a planar angle + a dihedral angle, compute a fourth coord
"""
bc_vec = b_coord - c_coord
bc_vec = bc_vec / bc_vec.norm(dim=-1, keepdim=True)
n_vec = (b_coord - a_coord).expand(bc_vec.shape).cross(bc_vec)
n_vec = n_vec / n_vec.norm(dim=-1, keepdim=True)
m_vec = [bc_vec, n_vec.cross(bc_vec), n_vec]
d_vec = [
length * torch.cos(planar),
length * torch.sin(planar) * torch.cos(dihedral),
-length * torch.sin(planar) * torch.sin(dihedral)
]
d_coord = c_coord + sum([m * d for m, d in zip(m_vec, d_vec)])
return d_coord
|
928fb14497e8856a8edcfdd7ac233207ee32ee4b
| 527,467
|
def diff(it1, it2):
"""Find the differences between two iterables"""
s2 = set(it2)
diff = [x for x in it1 if x not in s2]
return diff
|
163a445679bcaee0ce8aa7db320456ecee49d794
| 43,472
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.