content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import math
def atan_deg(value):
""" returns atan as angle in degrees """
return math.degrees(math.atan(value) )
|
ad7f79e4ebbfc364bddb5f20afd01b8cf70f9c08
| 30,400
|
def guardian(targetstr, ng_patterns):
""" @param ng_patterns A string list.
@return None if no matched. """
contains = []
for pattern in ng_patterns:
if targetstr.find(pattern)!=-1:
contains.append(pattern)
if contains:
return contains
return None
|
cbcf1006c323d2b98ef5b825981de9d87315d8f8
| 30,401
|
def _add_extension_assets(client, customer_id, language_code):
"""Creates new assets for the hotel callout.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID.
language_code: the language of the hotel callout feed item text.
Returns:
a list of asset resource names.
"""
operations = []
# Create a hotel callout asset operation for each of the below texts.
for text in ["Activities", "Facilities"]:
operation = client.get_type("AssetOperation")
asset = operation.create
asset.hotel_callout_asset.text = text
asset.hotel_callout_asset.language_code = language_code
operations.append(operation)
asset_service = client.get_service("AssetService")
# Issues the create request to create the assets.
response = asset_service.mutate_assets(
customer_id=customer_id, operations=operations
)
resource_names = [result.resource_name for result in response.results]
# Prints information about the result.
for resource_name in resource_names:
print(
"Created hotel callout asset with resource name "
f"'{resource_name}'."
)
return resource_names
|
bc213f9be915845b9ebb977a8b10ec941a640089
| 30,402
|
import math
import numpy
def vector_from_angle(degrees):
"""Returns a unit vector in the xy-plane rotated by the given degrees from
the positive x-axis"""
radians = math.radians(degrees)
z_rot_matrix = numpy.identity(4)
z_rot_matrix[0, 0] = math.cos(radians)
z_rot_matrix[0, 1] = -math.sin(radians)
z_rot_matrix[1, 0] = math.sin(radians)
z_rot_matrix[1, 1] = math.cos(radians)
return tuple(numpy.dot(z_rot_matrix, (1, 0, 0, 0))[:3])
|
7fa35acc01a4cd61665cfa03cfa8e89e354ab4a6
| 30,403
|
def file_location():
"""This function requests a file location from the user."""
while True:
print('\nPlease enter the COMPLETE location of your file in one of these formats:')
try:
location = input('C:\\\\Users\\\\Admin\\\\file.txt OR C:/Users/Admin/file.txt: ')
break
except SyntaxError: # Used C:\ instead of C:\\ or C:/
print('Entry failed, please use the correct format!')
continue
return location
|
aedb23e683aecba1767968accb43913e20143b19
| 30,404
|
import math
def subtended_angle(size, distance):
"""
Return the subtended angle angle of a sphere with certain size and distance:
https://en.wikipedia.org/wiki/Subtended_angle. Used to determine the size of
the bounding box located at a certain position in the image.
"""
angle = math.fabs(size) / math.fabs(distance)
return math.degrees(angle)
|
46b74d3fa14321e3f7004ec0fdcc6b8875abc50e
| 30,405
|
import csv
def read(path):
"""
Returns list of dictionaries.
All returned values will be strings.
Will assume that first row of file contains column names.
"""
file = open(path, 'r', encoding = 'utf-8')
reader = csv.reader(file, delimiter = '\t', quotechar = '', quoting = csv.QUOTE_NONE)
result = []
header = reader.__next__()
for values in reader:
entry = {}
for i in range(len(header)):
entry[header[i]] = values[i]
result.append(entry)
file.close()
return result
|
0506a59d28c9ee750dd6045217319097bfa9c662
| 30,406
|
def lab_monthly(dataframe):
"""
Extract monthly data from a labstat file
Parameters:
dataframe (dataframe): A dataframe containing labstat data
Returns:
dataframe
"""
dataframe['month'] = dataframe['period'].str[1:].copy().astype(int)
dataframe['value'] = dataframe['value'].astype(int)
dataframe = dataframe.loc[~dataframe['month'].isin([13])]
return dataframe
|
440c8502558c7b3746814706b2c4267b87e9d399
| 30,410
|
def find_empty_square(board):
"""
Return a tuple containing the row and column of the first empty square
False if no squares are empty
"""
for i, row in enumerate(board):
for j, square in enumerate(row):
if square == 0:
return (i, j)
return False
|
98b4b50527a33abec3134a22c5a9cff5a874b171
| 30,412
|
import json
import requests
def post(key, url, contents):
"""Updates contents of an entry in Stache.
Arguments:
key: The stache X-STACHE-KEY.
url: The stache endpoint.
contents: The body of the post to update stache entry with.
Returns:
Response to post message.
Requirements:
Url must be full endpoint, including https://stache...
Contents must be a dictionary, containing at least a 'secret' key.
"""
contents["secret"] = json.dumps(contents["secret"])
response = requests.post(url, json=contents, headers=key)
response.raise_for_status()
contents["secret"] = json.loads(contents["secret"])
return response
|
93128fc330943d476b298e3ecb9d28ebfe75e1cf
| 30,413
|
from functools import reduce
def pair_up(rows, fieldname):
""" Combine rows to get all fields in one record
Due to the structure of the ADSB protocol, rows have EITHER position or speed/steer/etc.
This combines rows so that the new fake rows have both.
"""
def _pair_up (memo, row):
if row["lon"]:
memo["last_lon"] = row
if row[fieldname]:
memo["last_of_interest"] = row
if memo["last_lon"] and memo["last_of_interest"] and abs(memo["last_lon"]["parsed_time"] - memo["last_of_interest"]["parsed_time"]).total_seconds() < 10:
memo["pairs"].append({"lat": memo["last_lon"]["lat"], "lon": memo["last_lon"]["lon"], "corrected_time": memo["last_lon"]["corrected_time"], fieldname: memo["last_of_interest"][fieldname]})
memo["last_lon"] = None
memo["last_of_interest"] = None
return memo
viable_rows = reduce(_pair_up, rows, {"last_lon": None, "last_of_interest": None, "pairs": []})["pairs"]
return viable_rows
|
1df3a0573674e38bf9f359db8480b11a44534835
| 30,417
|
def user_can_edit_setting_type(user, model):
""" Check if a user has permission to edit this setting type """
return user.has_perm("{}.change_{}".format(
model._meta.app_label, model._meta.model_name))
|
cfc2cde620718635493374ef62966539f25ba362
| 30,418
|
import sys
import traceback
def format_exception_info():
"""
formats Exception info
This function implements the formatting of exception info.
"""
exc_type, exc_value, exc_tb = sys.exc_info()
return traceback.format_exception(exc_type, exc_value, exc_tb)
|
64d818a11868652dac6fd2a896a6aa3030a6c6b6
| 30,420
|
def verify_notebook_name(notebook_name: str) -> bool:
"""Verification based on notebook name
:param notebook_name: Notebook name by default keeps convention:
[3 digit]-name-with-dashes-with-output.rst,
example: 001-hello-world-with-output.rst
:type notebook_name: str
:returns: Return if notebook meets requirements
:rtype: bool
"""
return notebook_name[:3].isdigit() and notebook_name[-4:] == ".rst"
|
87a839ddffc32613d74775bf532953c8263093cd
| 30,421
|
import os
def cleanup_path(x):
"""Cleans up a relative path. Converts any os.path.sep to '/' on Windows."""
if x:
x = x.rstrip(os.path.sep).replace(os.path.sep, '/')
if x == '.':
x = ''
if x:
x += '/'
return x
|
3a401ddad4ef54299cc0ae7c25946cf92c989a7a
| 30,422
|
import json
def str_to_json(s):
"""
Deserialize JSON formatted string 's' to a dict.
Args:
s: JSON string to be deserialized
Examples:
>>> str_to_json('{}')
{}
>>> str_to_json('{"a": 1, "c": {"d": 3}, "b": 2}') == {u'a': 1, u'c': {u'd': 3}, u'b': 2}
True
>>> str_to_json('a')
Traceback (most recent call last):
...
ValueError: No JSON object could be decoded
"""
return json.loads(s.decode())
|
95feed74f9ddda9f3db8b5e03d52bf19fe1538fe
| 30,423
|
def can_auto_accept_record_request(user_request, user, action):
"""
Return True if `user_request` (RecordRequest or Record DeleteRequest) being
done by `user` can be auto accepted for `action`.
Skipping corner cases (included in code below) this checks:
- if user has access to Record AND
- if user has access to Domain (which Record belongs to)
"""
def _validate_domain(domain):
if not domain:
raise Exception(
"Can't check auto acceptance without domain set"
)
can_auto_accept = False
domain = (
user_request.domain
if action != 'delete' else user_request.target.domain
)
_validate_domain(domain)
if action == 'create':
can_auto_accept = (
user_request.domain.can_auto_accept(user) and
not user_request.is_sec_acceptance_required()
)
elif action == 'update':
can_auto_accept = (
user_request.domain.can_auto_accept(user) and
user_request.record.can_auto_accept(user) and
not user_request.is_sec_acceptance_required()
)
elif action == 'delete':
can_auto_accept = (
user_request.target.domain.can_auto_accept(user) and
user_request.target.can_auto_accept(user) and
not user_request.is_seo_acceptance_required()
)
return can_auto_accept
|
5703aa7a6fa5cb0d597a27c98ccbb43be5aa071a
| 30,425
|
import re
def valid_mac(mac=None):
"""Validates Ethernet MAC Address
:param: mac address to validate
:type: str
:return: denotes true if MAC proper format else flase
:rtype: bool
"""
if mac is None or mac.strip() == "":
return False
mac = mac.strip()
return re.match("^([0-9A-Fa-f]{2}[:.-]?){5}([0-9A-Fa-f]{2})$", mac)
|
356f8ce8e16fb25271aa2f1dea845febd19864cb
| 30,426
|
def quantize_tensor(tensor, tensor_details):
"""
scale the tensor according to the specified detail from tflite model
:param tensor:
:param tensor_details:
:return:
"""
scale, zero_point = tensor_details['quantization']
# print(f"tensor_details: name:{tensor_details['name']}, scale:{scale}, zero_point:{zero_point}, dtype:{tensor_details['dtype']}")
integer_tensor = tensor / scale + zero_point
integer_tensor = integer_tensor.astype(tensor_details['dtype'])
return integer_tensor
|
aff9f27d0ce726d44884e20af3c793b08b29c75f
| 30,427
|
def getPathValues(d, path):
"""
Given a nest structure,
return all the values reference by the given path.
Always returns a list.
If the value is not found, the list is empty
NOTE: Processing a list is its own recursion.
"""
pos = path.find('.')
currentpath = path[0:pos] if pos > 0 else path
nextpath = path[pos + 1:] if pos > 0 else None
lbracket = path.find('[')
itemnum = None
if lbracket >= 0 and (pos < 0 or lbracket < pos):
rbracket = path.find(']')
itemnum = int(path[lbracket + 1:rbracket])
currentpath = path[0:lbracket]
# keep the bracket for the next recurive depth
nextpath = path[lbracket:] if lbracket > 0 else nextpath
if type(d) is list:
result = []
if itemnum is not None:
result.extend(getPathValues(d[itemnum], nextpath))
else:
for item in d:
# still on the current path node
result.extend(getPathValues(item, path))
return result
if pos < 0:
if currentpath == '*':
result = []
for k, v in d.iteritems():
result.append(v)
return result
return [d[currentpath]] if currentpath in d and d[currentpath] else []
else:
if currentpath == '*':
result = []
for k, v in d.iteritems():
result.extend(getPathValues(v, nextpath))
return result
return getPathValues(d[currentpath], nextpath) if currentpath in d else []
|
0d6cd9a0d07b38eea1e3b2a16bdc7321f961794f
| 30,428
|
def hasmethod(obj, methodname):
"""Does ``obj`` have a method named ``methodname``?"""
if not hasattr(obj, methodname):
return False
method = getattr(obj, methodname)
return callable(method)
|
a71dcdac40d97de91bc789068a885cab467a6f48
| 30,429
|
def _batch_name(split_name, offset, dataset):
"""Returns the file name for a given batch.
Args:
split_name: "train"|"test"
offset: batch index (0-indexed)
dataset: "cifar10"|"cifar100"
"""
if dataset == 'cifar10':
if split_name == 'train':
return 'data_batch_%d' % (offset + 1) # 1-indexed.
else:
return 'test_batch'
else:
return split_name
|
b360268ef38bb5fee5fa02b36b19c7d0ae6a1ab7
| 30,432
|
def bisection(f, xl, xr, tol):
"""
Solves an equation using the bisection method
Input:
f - Function
lb - Left bound
rb - Right bound
tol - Tolerance
"""
max_iter = 100
iter = 0
fl = f.Eval(xl)
if fl==0:
return xl
fr = f.Eval(xr)
if fr==0:
return xr
if fl*fr>0:
print('xl = '+str(xl))
print('xr = '+str(xr))
print('fl = '+str(fl))
print('fr = '+str(fr))
raise NameError("Root not bracketed")
xm = 0.5*(xl+xr)
while(abs(xl-xr)>tol):
xm = 0.5*(xl+xr)
fm = f.Eval(xm)
if fm==0:
return xm
if fm*fl>0:
xl = xm
elif fm*fr>0:
xr = xm
else:
raise NameError('Something bad happened. Probably a NaN')
if iter>max_iter:
raise NameError('Maximum number of iterations exceeded in bisection')
else:
iter = iter + 1
return xm
|
1f1c6a808f5d548b8d9c07349b36ee3badbf1df5
| 30,433
|
import argparse
def get_cli_parser():
"""Creates CLI parser"""
parser = argparse.ArgumentParser(
description='Find N most common colors in images.')
parser.add_argument('file', metavar='<INPUT FILE>', type=str, nargs=1,
help='File with list of urls separated by new line')
parser.add_argument('--output_file',
metavar='<OUTPUT FILE>', type=str, nargs=1,
help='CSV file that will contain a list of urls '
'with hex encoded colors. Default is '
'INPUT FILE-colors.csv')
parser.add_argument('--number-of-colors', dest='num_colors',
action='store', default=3,
help='Number of colors to output. Default is 3')
return parser
|
e9826c6be2210c4e7d5de6452119d28ebf1faa32
| 30,434
|
from typing import Dict
from typing import List
def create_dirs_list(all_confs: Dict[int, Dict[str, list]]) -> List[dict]:
"""Creates required directories for a project.
Uses the information provided by `config_info()`
to create directories according to the user's configuration
file.
Args:
all_confs (Dict[str, list]): A `dict` of configuration
information. This should be created using the
`config_info()` function.
Returns:
List[dict]: A list of directories to create. Each `item` in
the list is a `dict` that contains scene names as `keys`
and scene elements as `values`. Scene elements are what
`good-bot` will record or create.
"""
if not isinstance(all_confs, dict):
raise TypeError(
"create_dirs_list(): This function takes a dictionnary as an input."
)
dirs_list: List[dict] = []
for (
scene_number,
contents,
) in all_confs.items(): # Keys are scene numbers, values is the content
# Those dirs are always created
to_create: List[str] = ["asciicasts", "embeds", "gifs", "videos"]
for content_type, instructions in contents.items():
if instructions: # There are items in the list.
to_create.append(content_type) # Things like the editor are added here.
# Read has been added in the previous block
if "read" in to_create:
to_create.append("audio") # MP3 files
dirs_list.append({f"scene_{scene_number}": to_create})
return dirs_list
|
3ca08e48a5e27bfcb9c3d0d751cf7efc553ea34b
| 30,435
|
def github_action(pytestconfig):
"""Return True/False depending on the --cleanse command line argument."""
return pytestconfig.getoption("github").lower() == "true"
|
b1eafe50fb0664b9791269968f7d4611091c7e47
| 30,436
|
from datetime import datetime
def parse_time(line):
"""
All logging lines begin with a timestamp, parse it and return the datetime it represents.
Example: 2016-04-14 14:50:33,325
:param line: the log line containing time
:return: native datetime
"""
return datetime.strptime(line[:19], '%y-%m-%d %h:%M:%S')
|
40d0e755693bd93a0d1ecf1969b39a5c001b1cca
| 30,438
|
def reconstruct_abstract(inverted):
"""Reconstruct a string from a {word: idx} dict."""
idx = {}
for word, ii in inverted.items():
for i in ii:
idx[i] = word
words = [word for i, word in sorted(idx.items())]
return " ".join(words)
|
6e2f752387e7e697a5de10ce5a4afe5f920f839f
| 30,441
|
def seatid(code):
""" The code on the boarding pass is a binary representation of the place number …
Just convert B & R to 1 and F & R to 0.
>>> seatid("BFFFBBFRRR")
567
>>> seatid("FFFBBBFRRR")
119
>>> seatid("BBFFBBFRLL")
820
"""
code = code.translate(code.maketrans("BFRL", "1010"))
return int(code, 2)
|
487e1fa358c30309539413a0a06b53dc26cb8d9f
| 30,442
|
import sys
def get_func_name():
"""
Get name of the caller of this function. Helper to create the function call construct.
"""
return sys._getframe(1).f_code.co_name
|
d892d5335a73a518b84715d89e1a88aa461f2f27
| 30,443
|
def code() -> str:
"""
Example G-code module, a drawing of a flower.
Please simulate first, before milling.
"""
return """
G91 G17
G0 Y10 X-10
G0 Y0 X-5
G0 Y5 X0
G0 Y0 X5
G0 Y0 X-5
G0 Y-5 X0
G3 Y-5 X5 J0 I5
G0 Y0 X5
G0 Y5 X0
G3 Y5 X-5 J0 I-5
G0 Y-5 X0
G0 Y-10 X10
G0 Y0 X-5
G0 Y-15 X-15
G0 Y0 X5
G0 Y5 X0
G0 Y0 X-5
G0 Y-5 X0
G0 Y5 X0
G2 Y5 X5 J0 I5
G0 Y0 X5
G0 Y-5 X0
G2 Y-5 X-5 J0 I-5
G0 Y5 X0
G0 Y10 X10
G0 Y0 X-30
G3 Y0 X-10 J0 I-5
G3 Y0 X10 J0 I5
G0 Y0 X5
G3 Y5 X5 J5 I0
G3 Y10 X-10 J0 I-10
G3 Y-5 X-5 J-5 I0
G0 Y-5 X0
G0 Y5 X0
G3 Y5 X-5 J0 I-5
G3 Y-10 X-10 J-10 I0
G3 Y-5 X5 J0 I5
G0 Y0 X5
G0 Y0 X-5
G3 Y-5 X-5 J-5 I0
G3 Y-10 X10 J0 I10
G3 Y5 X5 J5 I0
G0 Y5 X0
G0 Y-5 X0
G3 Y-5 X5 J0 I5
G3 Y10 X10 J10 I0
G3 Y5 X-5 J0 I-5
G0 Y0 X-5
"""
|
1f2859dcb09b7e0155e340d77d3dba38cd38b5f5
| 30,444
|
def format_size(bsize):
"""
Pretty formatting of byte count
:param bsize:
"""
postfixes = ["bytes", "KiB", "MiB", "GiB"] # don't even bother with terabytes
i = 0
while bsize > 1024:
bsize = bsize / 1024
i += 1
return "{0:.2f}{1}".format(bsize, postfixes[i])
|
87e751cb09c0f85779809461295fad9bcc0def52
| 30,445
|
import subprocess
def genxlc(file, xl_dir, xlc_dir):
"""Generate .xlc from .xl."""
cmd = 'cargo run -q -- --verbose --no-output {}/{}.xl > {}/{}.xlc'\
.format(xl_dir, file, xlc_dir, file)
process = subprocess.run(cmd, shell=True)
return process.returncode
|
46d9b8492ec19e0d81c3eb35f416c427c7d5652b
| 30,446
|
from typing import List
from typing import Dict
def construct_rows(header: list, rows: list) -> List[Dict]:
"""Construct a list of csv row dicts.\n
Arguments:
header {list} -- csv header\n
rows {list} -- csv contents\n
to warp if there is only a single row, e.g. [row]\n
Returns:
List[Dict] -- a list of csv rows\n
"""
row_dicts = [{k: v for k, v in zip(header, row)} for row in rows]
return row_dicts
|
771b2dfde99a8b517331695d160eb9f809e4933c
| 30,447
|
def to_android_abi(arch):
"""converts conan-style architecture into Android-NDK ABI"""
# https://cmake.org/cmake/help/latest/variable/CMAKE_ANDROID_ARCH_ABI.html
return {'armv5el': 'armeabi',
'armv5hf': 'armeabi',
'armv5': 'armeabi',
'armv6': 'armeabi-v6',
'armv7': 'armeabi-v7a',
'armv7hf': 'armeabi-v7a',
'armv8': 'arm64-v8a'}.get(str(arch), str(arch))
|
2d66ce02b6547026498ab4fbabb4e8d38311e7a2
| 30,448
|
def verror_msg(pos: int, prog: str, msg: str) -> str:
"""Returns the formatted error message for the given paramters.
Args:
pos: Thosition of where the error is.
prog: The program where the error is.
msg: The error message.
"""
result = f"{prog}\n"
result += " " * pos
result += "^ "
result += f"{msg}"
return result
|
ad7483823df859f9498e33157ad1127922a30c62
| 30,451
|
import requests
import zipfile
import io
def get_zip_file(url):
"""
Get zip file from provided URL
"""
with requests.get(url, stream=True) as f:
z = zipfile.ZipFile(io.BytesIO(f.content))
return z
|
2db522396d4ffde212c858b3544cfaa99b03bba0
| 30,452
|
def parse_rule(l, cats):
"""Parses a sound change rule or category.
Given a line l with the format 'a > b / c_d ! e_f', produces a dict of
the form
{
'from': 'a',
'to': 'b',
'before': 'c',
'after': 'd',
'unbefore': 'e',
'unafter': 'f'
}.
If l is of the form 'a > b / c_d ! e_f | g > h / i_j ! k_l', the output is
a list of the '|' delimited rules.
If l is of the form 'a = b c d', the output is a dict of the form
{
'cat_name': 'a',
'category': ['b', 'c', 'd']
}. Category names in curly brackets are expanded.
Args:
l: The line of text to parse.
cats: The dict of categories to use in the rule.
Returns:
A dict representing either a sound change rule or category, or a list
of several sound changes.
"""
word_start = r'(?<=^|\s)'
word_end = r'(?=$|\s)'
out = {}
if len(l.split(' = ')) == 2:
# If there is an equals sign, it's a category
out['cat_name'] = l.split(' = ')[0].strip()
category = l.split(' = ')[1]
# expand categories
for c in cats:
category = category.replace('{' + c + '}', ' '.join(cats[c]))
out['category'] = category.split()
else:
if len(l.split(' | ')) > 1:
# It's a list of sound changes
return [parse_rule(ll, cats) for ll in l.split(' | ')]
# Otherwise, it's a sound change rule
try:
# Attempt to set 'from' and 'to'. If there isn't a ' > ', it will
# raise an IndexError when trying to set 'to', so 'from' will be
# set, but 'to' will not. This could be used when parsing a rule to
# be used as a search pattern, and not as a sound change. Need to
# split on ' / ' and ' ! ' in case it is being used in this way.
out['from'] = l.split(' > ')[0].split(' / ')[0].split(' ! ')[0]
# Treat '0' like ''
if out['from'] == '0':
out['from'] = ''
out['to'] = l.split(' > ')[1].split(' / ')[0].split(' ! ')[0]
# Treat '0' like ''
if out['to'] == '0':
out['to'] = ''
except IndexError:
pass
try:
# Attempt to set 'before' and 'after'. If there isn't a ' / ', it
# will raise an IndexError, and neither will be set. If there isn't
# a '_', it will raise an IndexError when trying to set 'after', so
# 'before' will be set, but 'after' will not.
out['before'] = l.split(' / ')[1].split('_')[0].split(' ! ')[0]
out['before'] = out['before'].replace('#', word_start)
out['after'] = l.split(' / ')[1].split('_')[1].split(' ! ')[0]
out['after'] = out['after'].replace('#', word_end)
except IndexError:
pass
try:
# Attempt to set 'unbefore' and 'unafter'. Same comments apply as
# for 'before' and 'after'. Note that the negative conditions must
# come after the positive conditions, if both exist.
out['unbefore'] = l.split(' ! ')[1].split('_')[0]
out['unbefore'] = out['unbefore'].replace('#', word_start)
out['unafter'] = l.split(' ! ')[1].split('_')[1]
out['unafter'] = out['unafter'].replace('#', word_start)
except IndexError:
pass
return out
|
db9239fcf8d13d8884dd57a351e312de4caf2c61
| 30,455
|
import re
from typing import Counter
def get_term_frequencies(series, min_str_len=1, ngram=1):
"""
Get number of occurence of each term in the given pandas series
Parameters
----------
series: pandas series
Series to extract the term frequencies from
min_str_len: int, optional (default value=1)
Minimum string length for a word
ngram: int, optional (default value=1)
Number of words to get frequency from.
Return
-------
Python dict ({word:frequency})
"""
text = " ".join(series.values.astype(str))
regex = re.compile(r"\w+{}".format(r" \w+"*(ngram-1)))
words = re.findall(regex, text.lower())
words = [w for w in words if len(w) >= min_str_len]
word_dict = Counter(words)
return dict(word_dict)
|
560247a4fa3a6a1321caf9eadd45ba889eb517cc
| 30,456
|
from typing import List
from typing import Dict
def ids_to_models(model_ids: List[str]) -> Dict[str, List[str]]:
"""
Convert model IDs (MODEL_NAME-MODEL_VERSION) to a dictionary with its keys being
the model names and its values being lists of the associated versions for each given model name.
"""
models = {}
for model_id in model_ids:
model_name, model_version = model_id.rsplit("-", maxsplit=1)
if model_name not in models:
models[model_name] = [model_version]
else:
models[model_name].append(model_version)
return models
|
5a2b3bdc1361954a78630ca8bac4ebd0aefe8293
| 30,459
|
def _flatten(list_of_lists):
"""
Turns a nested list of lists ([a,[b,c]]) into a flat list ([a,b,c]).
"""
out = []
if type(list_of_lists) is int:
return [list_of_lists]
for item in list_of_lists:
if type(item) is int:
out.append(item)
elif type(item) is list:
out += _flatten(item)
return out
|
8b2489d18a277e15e6f8c67baad70ff874c6e2bf
| 30,460
|
def cleanse_data(data):
""" Converts a null data display to None Value """
if data == None:
return ""
else:
return data
|
45b2332cad4d7f2bf0bc9c5dc92118e7b163b4a3
| 30,461
|
def get_nested(dct, key_pattern):
""" Get value(s) from a nested dict
Args:
dct: dict having str keys and values which may be other `dct`s
key_pattern: str giving dotted key
Returns the value. Note that if key_pattern does not go to full depth then a dict is returned.
"""
d = dct
patt_parts = key_pattern.split('.')
for kp in patt_parts:
if kp in d:
d = d[kp]
else:
raise KeyError("No such key '%s'" % key_pattern)
return d
|
4e7de6f56a6bcc62fc41b377f9ba2b144ac5627d
| 30,462
|
def DeltaT(year):
"""
Calculate difference between ephemeris time and universal time in days
:param year:
:return:
"""
#From https://eclipse.gsfc.nasa.gov/SEhelp/deltatpoly2004.html
return -(26.92+0.32217*(year-2000)+0.005589*(year-2000)**2)/86400.0
#return -32.184/86400.0
#return -(66.0+(year-2000)*1.0)/86400.0
|
a21e58a7edf43ab3b819131a9920eafb703e0dd5
| 30,463
|
def _get_data(mp, table=None):
"""
Task to avoid serialization of lambdas
"""
if table:
return mp.data[table]
else:
return mp.data
|
cf509d3ec556117d625ba80488a05c86ef1f0770
| 30,464
|
import zipfile
def dictionnaire(fichier):
"""
renvoie la liste de tous les mots contenus dans le fichier dont le nom est passé en argument
argument : fichier, de type chaine de caractères. C'est le nom du fichier compressé contenant le dictionnaire
résultat : de type liste de chaines de caractères. Chaque élément de la liste correspond à une ligne du fichier"""
with zipfile.ZipFile(fichier, 'r') as f:
l = f.namelist()
if len(l) != 1:
raise ValueError("L'archive devrait contenir exactement 1 fichier mais en contient {}".format(len(l)))
r = f.read(l[0]).decode(encoding="UTF-8", errors="strict").split("\n")
return [m for m in r if len(m) != 0]
|
1b65eaeed81c92ead518aed42f360b026a1d8436
| 30,465
|
def brute_force(my_in):
"""
Don't joke! :-)
"""
for i in range(len(my_in)):
for j in range(i+1,len(my_in)):
if my_in[i]+my_in[j]==2020:
return(my_in[i]*my_in[j])
|
23bdb20396f9ad94f34ee0d0458e2dd5b63414d9
| 30,466
|
def sw_update_strategy_db_model_to_dict(sw_update_strategy):
"""Convert sw update db model to dictionary."""
result = {"id": sw_update_strategy.id,
"type": sw_update_strategy.type,
"subcloud-apply-type": sw_update_strategy.subcloud_apply_type,
"max-parallel-subclouds":
sw_update_strategy.max_parallel_subclouds,
"stop-on-failure": sw_update_strategy.stop_on_failure,
"state": sw_update_strategy.state,
"created-at": sw_update_strategy.created_at,
"updated-at": sw_update_strategy.updated_at}
return result
|
93e8da29292998a37d031d6087bb0afba73fc995
| 30,468
|
from typing import Dict
from typing import Tuple
import random
def train_test_split(chip_dfs: Dict, test_size=0.2, seed=1) -> Tuple[Dict, Dict]:
"""Split chips into training and test set.
Args:
chip_dfs: Dictionary containing key (filename of the chip) value (dataframe with
geometries for that chip) pairs.
test_size: Relative number of chips to be put in the test dataset. 1-test_size is the size of the
training data set.
"""
chips_list = list(chip_dfs.keys())
random.seed(seed)
random.shuffle(chips_list)
split_idx = round(len(chips_list) * test_size)
train_split = chips_list[split_idx:]
val_split = chips_list[:split_idx]
train_chip_dfs = {k: chip_dfs[k] for k in sorted(train_split)}
val_chip_dfs = {k.replace('train', 'val'): chip_dfs[k] for k in sorted(val_split)}
return train_chip_dfs, val_chip_dfs
|
baaa6c1f929d12def3c4611c99a1c694bb52fc29
| 30,469
|
def is_decimal_amount(string: str) -> bool:
"""Checks if string is a decimal amount (e.g. 1/2, 1/4, etc..)
Args:
string: string to be checked.
Returns:
True if string is a decimal amount, False otherwise.
"""
if "/" not in string or len(string.split("/")) != 2:
return False
string_split = string.split("/")
return string_split[0].isnumeric() and string_split[1].isnumeric()
|
5b820eb8586b0eee94c9b122a20f0a7442111777
| 30,470
|
def std_input(text, default):
"""Get input or return default if none is given."""
return input(text.format(default)) or default
|
d0b38f828dec28cd9ae7d251fcb3bb0bbf324264
| 30,471
|
def custom_func_average(populator, *args, **kwargs):
"""Get an Excel formula to calculate the average of multiple values.
Parameters
----------
args : list
A list of all values to calcualte the average of. Any value that is empty ("") or nan ("nan") are ignored.
"""
args = [a for a in args if a not in ["", "nan"]]
if len(args) == 0:
return "" ##DIV/0!"
return "AVERAGE({})".format(",".join(args))
|
a013f910448df761e3d6d05ef837910bf64b7aa7
| 30,472
|
def document_fraction_for_viewport_position(document_size, viewport_size, viewport_position):
"""
We take any point of the viewpoint, and calculate its relative position with respect to the posible positions it
can be in (in practice: we use the top of the viewport, and realize that it cannot be lower than a viewport_size
from the bottom).
+--------------------+
|Document |
| |
+----------+ <------------lowest possible position of the top of the viewport, a.k.a. 100%
|Viewport | |
| | |
| | |
| | |
| | |
+----------+---------+
"""
if document_size <= viewport_size:
return None
return viewport_position / (document_size - viewport_size)
|
80b7af9ff36ca19cb4a91c82a24a60aa964f13b8
| 30,473
|
def institutions(cur):
"""Returns prototype and agentids of institutions
Parameters
----------
cur: sqlite cursor
sqlite cursor
Returns
-------
sqlite query result (list of tuples)
"""
return cur.execute('SELECT prototype, agentid FROM agententry '
'WHERE kind = "Inst"').fetchall()
|
e02233b997cba0a14f1bdcfcecf720deb9b4a494
| 30,474
|
import os
def make_path(path):
"""
For a given path, create the folders if they do not exist
:param path: (str) The path
:return: (bool) Whether or not it finished correctly
"""
return os.makedirs(path, exist_ok=True)
|
31c8a860b35c16f96cecd5b5a75fbdf64d596a1b
| 30,475
|
def key(space, w_arr):
""" Fetch a key from an array """
return w_arr._key(space)
|
0c454c897ad6f7e6f3e35af7127d2f644f4556a2
| 30,476
|
import socket
def is_internet_connected() -> bool:
"""
check if connected to internet or not
"""
return socket.gethostbyname(socket.gethostname()) != '127.0.0.1'
|
a15eb5ca5f6b728e6f7fae309655049a531b7bea
| 30,477
|
def format_string(indices):
"""Generate a format string using indices"""
format_string = ''
for i in indices:
format_string += ', %s' % (i)
return format_string
|
5bc61727f74b97648961e7b9ccf8299a9680648b
| 30,479
|
def element_wise_product(X, Y):
"""Return vector as an element-wise product of vectors X and Y"""
assert len(X) == len(Y)
return [x * y for x, y in zip(X, Y)]
|
e01ec3720ac6b2fa06cca1186cf1a5a4d8703d38
| 30,480
|
import os
def validate_path_exists(s):
"""If s is a path, return s, else False"""
if s is None:
return None
if os.path.exists(s):
return s
else:
raise RuntimeError('"%s" should be a path but it does not exist' % s)
|
0632568887071d67b83ac09f717dec5a1b668c7e
| 30,482
|
def wrap(string, max_width):
"""
:param string:
:param max_width:
:return:
"""
s=''
for i in range(0,len(string),max_width):
s=s+string[i:i+max_width]
s=s+'\n'
return s
|
324b2e76ccf44f71745983faa4e2548f86dacca0
| 30,483
|
def get_schedule_config(config):
"""Get all the config related to scheduling this includes:
- Active Days
- Start Time
- End Time
It will also convert the hours and minute we should start/end in to ints.
Args:
config (ConfigParser): ConfigParser object used to get the config.
Returns:
dict: Containing the active days, start and end times.
"""
active_days_list = [
day.strip() for day in config.get("schedule", "active_days").split(",")
]
start_time_hour, start_time_minute = config.get("schedule", "start_time").split(":")
end_time_hour, end_time_minute = config.get("schedule", "end_time").split(":")
return {
"active_days": active_days_list,
"start": {"hour": int(start_time_hour), "minute": int(start_time_minute)},
"end": {"hour": int(end_time_hour), "minute": int(end_time_minute)},
}
|
d362a44e80f5ae8bca5ab5ac7fe164777beae1ed
| 30,486
|
def prop_FC(csp, newVar=None):
"""Do forward checking. That is check constraints with
only one uninstantiated variable. Remember to keep
track of all pruned variable,value pairs and return """
# book-keeping for pruned variable-value pair for future restoring
pruned_pairs = []
# forward check helper function for a single unary constraint C
def fc_check(C, X):
"""return True if DWO doesn't occur, False otherwise"""
for d in X.cur_domain():
# assign to X
X.assign(d)
# current assignment to variables in scope C (including X)
curr_val = []
for var in C.get_scope():
curr_val.append(var.get_assigned_value())
# if assigning d to X falsifies C, prune d
if not C.check(curr_val):
X.prune_value(d)
pruned_pairs.append((X, d))
# unassign for future looping
X.unassign()
# DWO
if X.cur_domain_size() == 0:
return False
return True
# get constraints
if not newVar:
cons = csp.get_all_cons()
else:
cons = csp.get_cons_with_var(newVar)
# forward check for unary constraints
for c in cons:
if c.get_n_unasgn() == 1:
x = c.get_unasgn_vars()[0]
if not fc_check(c, x):
return False, pruned_pairs
return True, pruned_pairs
|
cd3b410fc53123216b5f7b726e3ef624ea4395b1
| 30,488
|
def p_empty_or_comment(line, comment='#'):
"""Predicate matching strings that are not empty and don't start
with a comment character. For best results, the line should no
longer contain whitespace at the start."""
return len(line) and not line.startswith(comment)
|
d0eef18237b0cac9f019d866d48d02813b1c2e95
| 30,489
|
def rowcol_to_index(row, col, width):
"""Convert row major coordinates to 1-D index."""
return width * row + col
|
cc9bce2ba17337025e9982865abdc63f4ad90658
| 30,490
|
def accuracy_boolean(data, annotation, method, instance):
"""
Determine whether method output is correct.
Parameters
----------
data: color_image, depth_image
annotation: Annotation from blob_annotator
method: function[instance, data] -> BoundingBox
instance: instance of benchmark
Returns
-------
found, missed, extra
"""
## Prediction
bounding_boxes = method(instance, *data)
prediction = bool(len(bounding_boxes))
## Expected
expected = bool(len(annotation.findall('object')))
## Calculate accuracy
found = int(prediction == expected)
missed = 1 - found
extra = 0
return found, missed, extra
|
c28ef0b521895d4eef2a996e376cf0853b4a809a
| 30,491
|
import struct
def hex_to_float(hex_number):
"""
将16进制转为单浮点数
"""
return struct.unpack('<f', struct.pack('<I', hex_number))[0]
|
8bd94e1d64709bd0b8f93d81778f1dd18e19d320
| 30,492
|
import random
def dummy_decision(board):
""" make a random dummy move
"""
legal_moves = list(board.legal_moves)
return random.choice(legal_moves)
|
8c44acf1aff245ed532e48c35e440440b6ddcaef
| 30,493
|
def leia_int(txt):
"""
-> Verifica se o valor digitado é numérico
:param txt: mensagem a ser mostrado para o usuário
:return: retorna o valor digitado pelo o usuário, caso seja um número
"""
while True:
num = input(txt)
if num.isnumeric():
break
else:
print('\033[31;1mERRO! Digite um número.\033[m')
return num
|
f6a1c7104706e5a181a73aacd43f91a75df3e3f1
| 30,496
|
import os
def get_pinfo():
"""
get process id of parent and current process
"""
ppid = os.getppid()
pid = os.getpid()
return ppid, ppid
|
c0cacbd76b11a72cbec7712e4d6ae3d75253fecb
| 30,497
|
def alive(p):
""" Check For the existence of a unix pid. """
return p.is_alive()
|
1c76cadaa2d1c05c52589a39041e9eb1b15542c2
| 30,498
|
import os
def _get_template_dirs():
"""existing directories where to search for jinja2 templates. The order
is important. The first found template from the first found dir wins!"""
return filter(lambda x: os.path.exists(x), [
# user dir
os.path.join(os.path.expanduser('~'), '.py2pack', 'templates'),
# system wide dir
os.path.join('/', 'usr', 'share', 'py2pack', 'templates'),
# usually inside the site-packages dir
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates'),
])
|
a70c0d7af83b588f8f92f993c6cf5d507ae1fd33
| 30,499
|
def lazyload(loadopt, attr):
"""Indicate that the given attribute should be loaded using "lazy"
loading.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
.. seealso::
:ref:`loading_toplevel`
:ref:`lazy_loading`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "select"})
|
31ca5f0a6958aaa037b19535730c7f1d790cd4d3
| 30,500
|
def address_column_split(a):
"""
Extracts city and state in an address and provides separate columns for each
"""
asplit = a.split(",")
city = asplit[0].split()[-1]
state = asplit[1].split()[0]
return city, state
|
539123969beb28f2623466db193077f4351d04cb
| 30,502
|
def derivative(f, h):
"""(function, float) -> function
Approximates the derivative of function <f> given an <h> value.
The closer <h> is to zero, the better the estimate
"""
return lambda x: (f(x + h) - f(x)) / h
|
1dbb4035580f0bbb16339ee38ba863a2af144b63
| 30,503
|
def get_ltm_cells(cells):
"""converts matrix indices so all are below the diagonal
cells: list of indices into a 2D integer-indexable object
(typically a list or lists of array of arrays)
"""
new_cells = []
for i, j in cells:
if i == j:
continue
if i < j:
i, j = j, i
new_cells.append((i, j))
# remove duplicates
new_cells = sorted(set(new_cells))
return new_cells
|
b45c63b67ec3666faa54ae49e994c883a20905ac
| 30,508
|
import os
def get_delimiter():
"""finds path delimiter
Returns:
delimiter
"""
if os.name == 'posix':
delimiter = '/'
else:
delimiter = '\\'
return delimiter
|
f4bfaf92aac3578ba2c9b0c00bc8a1025dcbf49c
| 30,509
|
def nextDWord(offset):
"""
Align `offset` to the next 4-byte boundary.
"""
return ((offset + 3) >> 2) << 2
|
4c51fdf80b2f684645fee85755549c46208361ae
| 30,510
|
import math
def dist(p1, p2):
"""
Returns distance between two any-dimensional points given as tuples
"""
return math.sqrt(sum([(i - j)**2 for i, j in zip(p1, p2)]))
|
6c57680ae32ea33aba591ab2fb75aabe41af1df5
| 30,512
|
import math
def default_if_invalid(value, default):
"""Converts numbers which are None or NaN (not a number) to string"""
if value is None or (isinstance(value, float) and math.isnan(value)):
return default
return value
|
32c3eb119909a1a14f583bc922487d9e98aadd6c
| 30,513
|
import os
def count_files(folder):
"""
Counts the number of files under a folder using `os.walk`
"""
num = 0
for _, _, files in os.walk(folder):
num += len(files)
return num
|
f20db6d6b91e541e4ef5248c79d82523f9f8ac68
| 30,515
|
def waitpid(pid, options):
"""Wait for completion of a child process given by process id pid, and
return a tuple containing its process id and exit status indication.
:type pid: int
:type options: int
:rtype: (int, int)
"""
return 0, 0
|
b4800f7bab1f1296e421c7fb20f9a6666fcb0883
| 30,517
|
def func(a, b):
"""
:params: a,b
:ptypes: String,String
:returns: out
:rtype: String
"""
return a + b
|
d1b7996f19501337b6181a5500a564ed79ecb6b4
| 30,518
|
def consultar_saldo(contas, numero_conta):
"""
Devolve o saldo da conta identificada por 'numero_conta'.
"""
if numero_conta in contas:
saldo = contas[numero_conta]
return saldo
else:
return False
|
081ad50aa4c561dc32f68afaaa88baefb6143ed2
| 30,519
|
import os
def supertouch(path: str) -> str:
"""Creates paths and touches files.
:param path: Path to create, including file, if any. To create a directory only, end the path with a '/'.
:return:
"""
file, *dirs = (None, path) if path.endswith("/") else path.split("/")[::-1]
dir_path = "/".join(dirs[::-1])
if dirs:
try:
os.makedirs(dir_path)
except FileExistsError:
pass
if file:
# File exists, abort creation
if os.path.isfile(path):
return path
f = open(path, 'w')
f.close()
return path
|
f8be68d9f50964a2788b6dc7953833f8d8bb05ed
| 30,520
|
def choose_optimal( estimator, observation, epsilon, nA ):
""" Ignore all inputs except the observation and choose an optimal action
Or, something close to optimal
"""
a_left = 0
a_neutral = 1
a_right = 2
action = a_neutral
pos = observation[0]
vel = observation[1]
if pos < -1.0:
action = a_right
elif pos < 0:
if vel >= 0:
action = a_right
else:
action = a_left
elif pos > 0:
if vel >= 0:
action = a_right
else:
action = a_left
return action
|
7799f2e95b700b9818d60137d5c153dba48a341e
| 30,522
|
def pad_bytes(b, multiple, padding=b'\x00'): # type: (bytes, int, bytes) -> bytes
""" Pad 'b' to a length divisible by 'multiple' """
padded_len = (len(b) + multiple - 1) // multiple * multiple
return b + padding * (padded_len - len(b))
|
93cbe68d4ce1351c9f3af0f4d66b64d690d577f5
| 30,523
|
def range_count(start, stop, count):
"""Generate a list.
Use the given start stop and count with linear spacing
e.g. range_count(1, 3, 5) = [1., 1.5, 2., 2.5, 3.]
"""
step = (stop - start) / float(count - 1)
return [start + i * step for i in range(count)]
|
41d4e73e1cf85655a0df06c976d9187c9964dc59
| 30,524
|
def load(filename):
"""
Load a file as an `h5py.File`-like object.
Format supported:
- h5 files, if `h5py` module is installed
- Spec files if `SpecFile` module is installed
.. deprecated:: 0.4
Use :meth:`open`, or :meth:`silx.io.open`. Will be removed in
Silx 0.5.
:param str filename: A filename
:raises: IOError if the file can't be loaded as an h5py.File like object
:rtype: h5py.File
"""
return open(filename)
|
7fe6b48432f1f3751da2c6d8334ef1c73cbdcf6f
| 30,525
|
import os
def _strip_resource_hash(relpath):
"""Removes the first part of relpath, assuming it's s resource hash.
Asserts length of hash to validate this assumption.
"""
parts = relpath.split(os.path.sep)
# If this assertion fails, the scheme used for resource caching
# hash possibly changed (e.g. overridden to use another scheme or
# the hash algorithm changed, etc.)
assert parts and len(parts[0]) == 56, parts
return os.path.sep.join(parts[1:])
|
365b520598e471e1a0d25fe1209b7f3baaad1964
| 30,526
|
def dens(gamma, pres, eint):
"""
Given the pressure and the specific internal energy, return
the density
Parameters
----------
gamma : float
The ratio of specific heats
pres : float
The pressure
eint : float
The specific internal energy
Returns
-------
out : float
The density
"""
return pres/(eint*(gamma - 1.0))
|
ae37519718d3019546a4775cf65ca6697397cf5f
| 30,527
|
def pyeval(*expression):
"""Python-eval 'expression' and return the result (coerce value to string if necessary).
Multiple 'expression' arguments will be concatenated first.
"""
try:
val = eval(''.join(expression))
return isinstance(val, (str, int, float)) and val or str(val)
except:
pass
|
c83f8cc2db978c2bfb97127c994c0ab9ca35f67d
| 30,529
|
import random
def genQuestion(amount):
""" check entered then return random number string. """
if not isinstance(amount, int):
return (False, "The number you entered is not specified.")
elif amount > 10:
return (False, "The number you entered is out of range.")
else:
sample = '0123456789'
genNumList = random.sample(sample, amount)
return (True, "".join(genNumList))
|
1fd0d6876f2c91321e6b647eb11a4a5bf21f7ea4
| 30,530
|
def dodrawdown(df):
"""computes the drawdown of a time series."""
dfsum = df.cumsum()
dfmax = dfsum.cummax()
drawdown = - min(dfsum-dfmax)
return drawdown
|
e45dbdd19238e04975d5d14beb6348b0a744ada9
| 30,531
|
def zc_mock_get_source_ip(mock_get_source_ip):
"""Enable the mock_get_source_ip fixture for all zeroconf tests."""
return mock_get_source_ip
|
3640e7508ec4b8e4ab0fedcc184b703e1fd39d11
| 30,532
|
import xxhash
def xxh128(data):
"""
Helper function to calculate a 2 concatenated xxh64 hash for provided data, used as key for several Substrate
Parameters
----------
data
Returns
-------
"""
storage_key1 = bytearray(xxhash.xxh64(data, seed=0).digest())
storage_key1.reverse()
storage_key2 = bytearray(xxhash.xxh64(data, seed=1).digest())
storage_key2.reverse()
return "{}{}".format(storage_key1.hex(), storage_key2.hex())
|
1ed367b5926e80219b74ba89a003e296cf44a810
| 30,533
|
import torch
def quaternion_linear_rotation(input, zero_kernel, r_weight, i_weight, j_weight, k_weight, bias=None,
quaternion_format=False, scale=None):
"""
Applies a quaternion rotation transformation to the incoming data:
The rotation W*x*W^t can be replaced by R*x following:
https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation
Works for unitary and non unitary weights.
The initial size of the input must be a multiple of 3 if quaternion_format = False and
4 if quaternion_format = True.
"""
square_r = (r_weight*r_weight)
square_i = (i_weight*i_weight)
square_j = (j_weight*j_weight)
square_k = (k_weight*k_weight)
norm = torch.sqrt(square_r+square_i+square_j+square_k + 0.0001)
r_n_weight = (r_weight / norm)
i_n_weight = (i_weight / norm)
j_n_weight = (j_weight / norm)
k_n_weight = (k_weight / norm)
norm_factor = 2.0
square_i = norm_factor*(i_n_weight*i_n_weight)
square_j = norm_factor*(j_n_weight*j_n_weight)
square_k = norm_factor*(k_n_weight*k_n_weight)
ri = (norm_factor*r_n_weight*i_n_weight)
rj = (norm_factor*r_n_weight*j_n_weight)
rk = (norm_factor*r_n_weight*k_n_weight)
ij = (norm_factor*i_n_weight*j_n_weight)
ik = (norm_factor*i_n_weight*k_n_weight)
jk = (norm_factor*j_n_weight*k_n_weight)
if quaternion_format:
if scale is not None:
rot_kernel_1 = torch.cat([zero_kernel, scale * (1.0 - (square_j + square_k)), scale *(ij-rk), scale *(ik+rj)], dim=0)
rot_kernel_2 = torch.cat([zero_kernel, scale *(ij+rk), scale *(1.0 - (square_i + square_k)), scale *(jk-ri)], dim=0)
rot_kernel_3 = torch.cat([zero_kernel, scale *(ik-rj), scale *(jk+ri), scale *(1.0 - (square_i + square_j))], dim=0)
else:
rot_kernel_1 = torch.cat([zero_kernel, (1.0 - (square_j + square_k)), (ij-rk), (ik+rj)], dim=0)
rot_kernel_2 = torch.cat([zero_kernel, (ij+rk), (1.0 - (square_i + square_k)), (jk-ri)], dim=0)
rot_kernel_3 = torch.cat([zero_kernel, (ik-rj), (jk+ri), (1.0 - (square_i + square_j))], dim=0)
zero_kernel2 = torch.cat([zero_kernel, zero_kernel, zero_kernel, zero_kernel], dim=0)
global_rot_kernel = torch.cat([zero_kernel2, rot_kernel_1, rot_kernel_2, rot_kernel_3], dim=1)
else:
if scale is not None:
rot_kernel_1 = torch.cat([scale * (1.0 - (square_j + square_k)), scale *(ij-rk), scale *(ik+rj)], dim=0)
rot_kernel_2 = torch.cat([scale *(ij+rk), scale *(1.0 - (square_i + square_k)), scale *(jk-ri)], dim=0)
rot_kernel_3 = torch.cat([scale *(ik-rj), scale *(jk+ri), scale *(1.0 - (square_i + square_j))], dim=0)
else:
rot_kernel_1 = torch.cat([1.0 - (square_j + square_k), (ij-rk), (ik+rj)], dim=0)
rot_kernel_2 = torch.cat([(ij+rk), 1.0 - (square_i + square_k), (jk-ri)], dim=0)
rot_kernel_3 = torch.cat([(ik-rj), (jk+ri), (1.0 - (square_i + square_j))], dim=0)
global_rot_kernel = torch.cat([rot_kernel_1, rot_kernel_2, rot_kernel_3], dim=1)
if input.dim() == 2 :
if bias is not None:
return torch.addmm(bias, input, global_rot_kernel)
else:
return torch.mm(input, global_rot_kernel)
else:
output = torch.matmul(input, global_rot_kernel)
if bias is not None:
return output+bias
else:
return output
|
729c783c9976712df8a107c2e57002f40d51f63b
| 30,534
|
def check_mol_only_has_atoms(mol, accept_atom_list):
"""Checks if rdkit.Mol only contains atoms from accept_atom_list."""
atom_symbol_list = [atom.GetSymbol() for atom in mol.GetAtoms()]
return all(atom in accept_atom_list for atom in atom_symbol_list)
|
472f0c263df1c5306176a5e40e321513f72e7697
| 30,535
|
import re
def apply_and_filter_by_regex(pattern, list_of_strings, sort=True):
"""Apply regex pattern to each string and return result.
Non-matches are ignored.
If multiple matches, the first is returned.
"""
res = []
for s in list_of_strings:
m = re.match(pattern, s)
if m is None:
continue
else:
res.append(m.groups()[0])
if sort:
return sorted(res)
else:
return res
|
26aa766dbd211ca04ec33ace5d0823b059005690
| 30,536
|
def merge_shapes(shape, merged_axis: int, target_axis: int) -> list:
"""
Merge two axes of a shape into one, removing `merged_axis` and multiplying the size of the `target_axis` by the size
of the `merged_axis`.
:param shape: The full shape, tuple or tf.TensorShape.
:param merged_axis: The index of the axis to remove.
:param target_axis: The index of the axis to add to.
:return: A list representing the merged shape.
"""
shape = list(shape)
shape[target_axis] *= shape[merged_axis]
shape.pop(merged_axis)
return shape
|
d2ca4f91f7a99f9cef232d163c36ccac0e89fa18
| 30,537
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.