content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def unmatched_places(w, open, close):
"""
Given a word ``w`` and two letters ``open`` and
``close`` to be treated as opening and closing
parentheses (respectively), return a pair ``(xs, ys)``
that encodes the positions of the unmatched
parentheses after the standard parenthesis matching
procedure is applied to ``w``.
More precisely, ``xs`` will be the list of all ``i``
such that ``w[i]`` is an unmatched closing parenthesis,
while ``ys`` will be the list of all ``i`` such that
``w[i]`` is an unmatched opening parenthesis. Both
lists returned are in increasing order.
EXAMPLES::
sage: from sage.combinat.tableau import unmatched_places
sage: unmatched_places([2,2,2,1,1,1],2,1)
([], [])
sage: unmatched_places([1,1,1,2,2,2],2,1)
([0, 1, 2], [3, 4, 5])
sage: unmatched_places([], 2, 1)
([], [])
sage: unmatched_places([1,2,4,6,2,1,5,3],2,1)
([0], [1])
sage: unmatched_places([2,2,1,2,4,6,2,1,5,3], 2, 1)
([], [0, 3])
sage: unmatched_places([3,1,1,1,2,1,2], 2, 1)
([1, 2, 3], [6])
"""
lw = len(w)
places_open = []
places_close = []
for i in range(lw):
letter = w[i]
if letter == open:
places_open.append(i)
elif letter == close:
if places_open == []:
places_close.append(i)
else:
places_open.pop()
return places_close, places_open
|
cfd2a50c50cecc6fcc08d4fd95df56ad503fa4d2
| 699,290
|
def _clip(n: float) -> float:
"""
Helper function to emulate numpy.clip for the specific
use case of preventing math domain errors on the
acos function by "clipping" values that are > abs(1).
e.g. _clip(1.001) == 1
_clip(-1.5) == -1
_clip(0.80) == 0.80
"""
sign = n / abs(n)
if abs(n) > 1:
return 1 * sign
else:
return n
|
78308e70a405b3b3d94827c00705e8842b242cde
| 699,291
|
import os
def skip_files(root):
"""Get a list of filenames to skip.
:param root: Root directory of the package template
:type root: str
"""
# We can use .gitattributes as a quick starting point
gitattributes = os.path.join(root, ".gitattributes")
fnames = []
with open(gitattributes, "r") as file_:
for line in file_.readlines():
fnames.append(line.split()[0])
fnames.extend([dir_ for dir_ in os.listdir(root) if dir_[0] == "."])
return fnames
|
22394d690040d1e70a18aea85a99b11ae8ff540d
| 699,292
|
import shutil
def hpc_batch_detect() -> str | None:
"""
Assuming a known job batching system, we will create a template for the user
to verify and then the user will run.
"""
batcher = None
if shutil.which("qsub"):
batcher = "qsub"
return batcher
|
99c3b6d7dc9fe332b7cf027d53ca172c1776890f
| 699,293
|
import os
import json
def load_json(base_path):
"""
Load processing log
Args:
base_path (str): base directory
Returns:
"""
# Get file path
json_file = os.path.join(base_path, 'processing_log.json')
# Read file
with open(json_file, 'r') as f:
df = json.load(f)
return df
|
2821b70cfafe6fc352695eeffd63248bee350605
| 699,294
|
import math
def line_intersect(pt1, pt2, ptA, ptB):
"""
Taken from https://www.cs.hmc.edu/ACM/lectures/intersections.html
this returns the intersection of Line(pt1,pt2) and Line(ptA,ptB)
returns a tuple: (xi, yi, valid, r, s), where
(xi, yi) is the intersection
r is the scalar multiple such that (xi,yi) = pt1 + r*(pt2-pt1)
s is the scalar multiple such that (xi,yi) = pt1 + s*(ptB-ptA)
valid == 0 if there are 0 or inf. intersections (invalid)
valid == 1 if it has a unique intersection ON the segment
"""
DET_TOLERANCE = 0.00000001
# the first line is pt1 + r*(pt2-pt1)
# in component form:
x1, y1 = pt1
x2, y2 = pt2
dx1 = x2 - x1
dy1 = y2 - y1
# the second line is ptA + s*(ptB-ptA)
x, y = ptA
xB, yB = ptB
dx = xB - x
dy = yB - y
# we need to find the (typically unique) values of r and s
# that will satisfy
#
# (x1, y1) + r(dx1, dy1) = (x, y) + s(dx, dy)
#
# which is the same as
#
# [ dx1 -dx ][ r ] = [ x-x1 ]
# [ dy1 -dy ][ s ] = [ y-y1 ]
#
# whose solution is
#
# [ r ] = _1_ [ -dy dx ] [ x-x1 ]
# [ s ] = DET [ -dy1 dx1 ] [ y-y1 ]
#
# where DET = (-dx1 * dy + dy1 * dx)
#
# if DET is too small, they're parallel
#
DET = (-dx1 * dy + dy1 * dx)
if math.fabs(DET) < DET_TOLERANCE: return (0, 0, 0, 0, 0)
# now, the determinant should be OK
DETinv = 1.0 / DET
# find the scalar amount along the "self" segment
r = DETinv * (-dy * (x - x1) + dx * (y - y1))
# find the scalar amount along the input line
s = DETinv * (-dy1 * (x - x1) + dx1 * (y - y1))
# return the average of the two descriptions
xi = (x1 + r * dx1 + x + s * dx) / 2.0
yi = (y1 + r * dy1 + y + s * dy) / 2.0
return (xi, yi, 1, r, s)
|
73ffd9674302f3be7b0edd9cdddd95df198919cb
| 699,295
|
def render_icon(icon):
"""
Render a Bootstrap glyphicon icon
"""
return '<span class="glyphicon glyphicon-{icon}"></span>'.format(icon=icon)
|
4fcc501cbea07d3dbe99f35f63dc39c8f629c4c4
| 699,296
|
import binascii
def hex_to_bytes(hex_repn: str) -> bytes:
"""
Convert a hexidecimal string representation of
a block of bytes into a Pyton bytes object.
This function is the inverse of bytes_to_hex.
hex_to_bytes('F803') -> b'\xf8\x03'
"""
return binascii.unhexlify(hex_repn)
|
2c456a353e1cce75998d1b2dd9b077af962ee41d
| 699,297
|
import re
def get_text_url_base(content):
"""Return base URL to full text based on the content of the landing page.
Parameters
----------
content : str
The content of the landing page for an rxiv paper.
Returns
-------
str or None
The base URL if available, otherwise None.
"""
match = re.match('(?:.*)"citation_html_url" content="([^"]+).full"',
content, re.S)
if match:
return match.groups()[0]
return None
|
c679e9e1e8b074f7fea3aaf9eb256542e20a4d7d
| 699,298
|
def calculateMatchValue(incompleteObject, knownObject):
"""Calculate an integer match value, scoring +1 for each match"""
matchvalue = 0
for catkey, catval in incompleteObject.items():
if knownObject[catkey] == catval:
matchvalue += 1
return matchvalue
|
bd0efa089fa9edc2e290a46ca7e18b3d22bd1594
| 699,299
|
def LCS1(a, b, i, j):
""" LCS recursion """
if i >= len(a) or j >= len(b):
return 0
elif a[i] == b[j]:
return 1 + LCS1(a, b, i+1, j+1)
else:
return max(LCS1(a, b, i+1, j), LCS1(a, b, i, j+1))
|
0b1819ee67dd6fc60e66b8c2b83f79c062a149db
| 699,300
|
import os
def noxcptRmDir(sDir, oXcptRet = False):
"""
No exceptions os.rmdir wrapper.
"""
oRet = True;
try:
os.rmdir(sDir);
except:
oRet = oXcptRet;
return oRet;
|
e1611ecd30927ef73d53a773d8ccc4bcb29df8cb
| 699,301
|
def get_contents_dir(node):
"""Return content signatures and names of all our children
separated by new-lines. Ensure that the nodes are sorted."""
contents = []
for n in sorted(node.children(), key=lambda t: t.name):
contents.append('%s %s\n' % (n.get_csig(), n.name))
return ''.join(contents)
|
147d26ae5fca9540a081feef495dfa4bb0fca8eb
| 699,302
|
import os
import sys
def _prepend_path(env):
"""
Make sure the PATH contains the location where the Python binary
lives. This makes sure cli tools installed in a virtualenv work.
"""
if env is None:
env = os.environ
env = dict(env)
new = os.path.dirname(sys.executable)
path = env.get('PATH')
if path is not None:
new = new + ':' + path
env['PATH'] = new
return env
|
7e5f8c59a5f24fc5b603e437db80f11464715aea
| 699,303
|
def two_way_merge(array1, array2):
"""
Given two sorted arrays, merge them into a single sorted array.
This is a very simple operation but is a precursor to k-way merge.
We compare each element at the beginning of each array and remove smaller one and add it to the merged array.
If one of the arrays run out of elements, we just copy rest of the other array into the merged array and return.
Time Complexity: O(n) - Since we only need to do one comparison operation per element.
Auxiliary Space: O(n) - We will store the final sorted data in an array of size O(n),
which is the combined length of the given arrays.
If you want to see the fully animated video explanation, it is here: https://www.youtube.com/watch?v=Xo54nlPHSpg
:param array1: A sorted array of values.
:param array2: Another sorted array of values.
:return: A single sorted array.
"""
longer_arr_len = len(array1) + len(array2)
merged_arr = []
for i in range(longer_arr_len):
if len(array1) == 0:
merged_arr += array2
break
if len(array2) == 0:
merged_arr += array1
break
if array1[0] < array2[0]:
merged_arr.append(array1.pop(0))
else:
merged_arr.append(array2.pop(0))
return merged_arr
|
6d1a4779634809b7ff63159928af668596f5c00e
| 699,304
|
import torch
def fake_image_tensor(H, W, C) -> torch.Tensor:
""" Fake image tensor """
torch.manual_seed(0)
return torch.rand(C, H, W)
|
3cf09924a856ff46240804c6f4ceea65ce70410c
| 699,305
|
def wavelength_to_energy(wavelength: float):
"""Conversion from photon wavelength (angstroms) to photon energy (eV)"""
return 1.2398 / wavelength * 1e4
|
fc067b9fd9cae68103742e8998b6aa0117d906d0
| 699,306
|
import os
def makedirs(name, exist_ok=False, **kwargs):
"""
Make the directories in the given path. The ``exist_ok`` argument was
added in python 3.2+.
"""
if not (exist_ok and os.path.exists(name)):
os.makedirs(name, **kwargs)
return name
|
aabea356ed392eb26117f25b0918e80b1dd6a5eb
| 699,307
|
import threading
def concurrent_map(func, data):
"""
Similar to the bultin function map(). But spawn a thread for each argument
and apply `func` concurrently.
Note: unlike map(), we cannot take an iterable argument. `data` should be an
indexable sequence.
"""
N = len(data)
result = [None] * N
# wrapper to dispose the result in the right slot
def task_wrapper(i):
result[i] = func(data[i])
threads = [threading.Thread(target=task_wrapper, args=(i,)) for i in range(N)]
for t in threads:
t.start()
for t in threads:
t.join()
return result
|
5fb482055699087ca7f7b3b95b7d8c425894882f
| 699,308
|
def pack_quotes(quotes, **kwargs):
"""
:param quotes: a list of BeautifulSoup tags containing quotes, and quote details
:return: a dictionary of packaged quotes
"""
packed_quotes = {}
for group in quotes:
raw = group.select_one(kwargs.get('quotetag'))
raw_quote = raw.string
raw_quote = raw_quote.strip()
rank, quote = raw_quote.split(" ", 1)
rank = int(rank.rstrip("."))
raw = group.select_one(kwargs.get('movietag'))
raw_movie, raw_year = raw.strings
raw_movie = raw_movie.strip()
movie = raw_movie.title()
raw_year = raw_year.lstrip('(')
year = raw_year.rstrip(')')
packed_quotes[rank] = {"Quote": quote, "Movie": movie, "Year": year}
return packed_quotes
|
011382f79572fedd0c40f10952d71c77ba25ae37
| 699,309
|
def van_vleck_weisskopf(νeval, ν, γ, δ):
"""Complex Van Vleck-Weisskopf line shape function.
νeval GHz frequency at which line shape is evaluated
ν GHz center frequency of line
γ GHz width parameter of line
δ - overlap parameter of line
Returned value is unitless.
"""
term1 = (1. - 1j*δ)/(ν - νeval - 1j*γ)
term2 = (1. + 1j*δ)/(ν + νeval + 1j*γ)
return νeval * (term1 - term2)
|
f2f221196fbe911d4582cfc74bf260bb87ce7db0
| 699,310
|
def exists_management_key_with_priority_zero(
active_management_keys, new_management_keys, management_keys_to_revoke
):
"""
Checks if a management key of priority zero would be present if the management keys will be updated according
to the given parameters.
Parameters
----------
active_management_keys: dict
The currently active management keys
new_management_keys: dict
The management keys to be added
management_keys_to_revoke: set
The management keys to be revoked
Returns
-------
bool
"""
orig_management_keys = active_management_keys.copy()
for alias in management_keys_to_revoke:
del orig_management_keys[alias]
orig_management_keys.update(new_management_keys)
return min(map(lambda key: key.priority, orig_management_keys.values())) == 0
|
e4bff322bf4cb9d4b6ccc002f277a5af63e993e4
| 699,311
|
import os
def get_files_recursively(directory):
"""
Get a list of the absolute paths of all the files present in a direcory, recursively
"""
files = []
for base_path, _, sub_files in os.walk(directory):
# make relative paths
if directory.endswith("/"):
files += [ os.path.join(base_path, x)[len(directory):] for x in sub_files ]
else:
files += [ os.path.join(base_path, x)[len(directory)+1:] for x in sub_files ]
return files
|
03ff3967b8061232386910c36f6a80a806fca50a
| 699,312
|
import re
def is_valid_username(field):
"""
Checks if it's an alphanumeric + underscore, lowercase string, at least
two characters long, and starts with a letter, ends with alphanumeric
"""
return re.match(r"[a-z][0-9a-z_]*[a-z0-9]$", field) is not None
|
b4c870951715e103d58d1187703276dbb21e84cf
| 699,313
|
import os
def create_path(path):
"""
This function receives a path and creates the directory in the case it doesn't exist
Arg:
csvfile (Path): path to a file
Return:
created_path: absolute Path to a created file
"""
# Split the path in
# dir and csvfile pair
abs_path=os.path.abspath(path)
dir_file = os.path.split(abs_path)
dir_alone=dir_file[0]
file_alone=dir_file[1]
if not os.path.exists(dir_alone):
os.makedirs(dir_alone)
created_path=os.path.join(dir_alone, file_alone)
return(created_path)
|
dee201e7df29779a986230d897a17402777c0dd5
| 699,314
|
def rowSum_A(X, norm = False):
"""
[Added 22/10/2018]
Computes rowSum**2 for dense array efficiently, instead of using einsum
"""
n = len(X)
s = 0
for i in range(n):
s += X[i]**2
if norm:
s **= 0.5
return s
|
cd6b147fb171094d3a4a2e77c1d5e63b07a6e560
| 699,315
|
import math
def find_divisors(n):
"""
Find divisors of n
"""
#div = []
count = 0
sqrt_n = int(math.sqrt(n))
if sqrt_n**2==n:
count += 1
#div.append(sqrt_n)
for d in range(1, sqrt_n):
if n%d==0:
#div.append(d)
#div.append(n//d)
count += 2
return count
|
8d6d0406024dae195c0f13db60e0a65bee93dd6c
| 699,317
|
import sys
def map_annotations(experiment, sample, meta_dir, cv_map):
"""
Maps NCBI SRA annotations to controlled vocabularies.
This function creates both a JSON and tab delimited metadata file that
maps metadata from NCBI to known controlled vocabulary terms. The purpose
of this is to help ensure uniformity in metadata details between
runs of GEMmaker and between different sample sets.
:param experiment: A dictionary containing the experiment metadata.
:param sample: A dictionary containing the sample metadata.
:param run: A dictionary containing the run metatdata.
:return: a dictionary of exerpiment annotations
"""
# For case-insenitivity, convert all SRA tags to lower-case
tags = [x.lower() for x in cv_map.index.values]
# Now that we have all the metadata loaded create the non-nested
# annotation dictionary
annots = {}
# Build info about the biological sample.
# Term: biological sample, sep:00195
# Term: data:2091, Accession
# Term: schema:title
# Term: schema:name
annots["sep:00195"] = {}
annots["sep:00195"]["data:2091"] = sample.get("@accession", "")
annots["sep:00195"]["schema:title"] = sample.get("TITLE", "")
annots["sep:00195"]["schema:name"] = sample.get("@alias", "")
# Add the organism and it's child terms.
# Term: organism, obi:0100026
# Term: rdfs:label
# Term: Scientific Name, NCIT:C43459
# Term: NCBI Taxonomy ID, data:1179
annots["sep:00195"]["obi:0100026"] = {}
annots["sep:00195"]["obi:0100026"]["rdfs:label"] = sample["SAMPLE_NAME"].get("SCIENTIFIC_NAME", "")
annots["sep:00195"]["obi:0100026"]["NCIT:C43459"] = sample["SAMPLE_NAME"].get("SCIENTIFIC_NAME", "")
annots["sep:00195"]["obi:0100026"]["data:1179"] = sample["SAMPLE_NAME"].get("TAXON_ID", "")
# Iterate through the sample attributes
if "SAMPLE_ATTRIBUTES" in sample and "SAMPLE_ATTRIBUTE" in sample["SAMPLE_ATTRIBUTES"]:
attrs = sample["SAMPLE_ATTRIBUTES"]["SAMPLE_ATTRIBUTE"]
if not isinstance(attrs, list):
attrs = [attrs]
for attr in attrs:
# Skip tags with missing values.
if attr["VALUE"].lower() == "missing":
continue
# Handle the cultivar
elif attr["TAG"].lower() == "cultivar":
annots["sep:00195"]["obi:0100026"]["GEMmaker:infraspecific_type"] = "cultivar"
annots["sep:00195"]["obi:0100026"]["TAXRANK:0000045"] = attr["VALUE"]
# Handle tags in the mapping file.
elif attr["TAG"].lower() in tags:
cvs = cv_map.loc[attr["TAG"].lower()]['CV_IDs'].split(',')
code = "annots['%s'] = '%s'" % ("']['".join(cvs), attr["VALUE"])
exec(code)
print(code)
else:
sys.stderr.write("Unhandled sample attribute: \"%s\": \"%s\"\n" % (attr["TAG"], attr["VALUE"]))
return annots
|
cca25408225658cbdca30c5e738acaa6d5c2c707
| 699,318
|
import importlib
def load_plugin(name):
"""
:param name:
"""
return importlib.import_module(f"plugins.{name}", ".").run
|
5631f51672dd8a4597a49b4336584e769577ff09
| 699,319
|
def exchange_ecYeast(s1, subystem):
"""
this function is used to define the exchange reaction
s1=['a --> b','a <=> c', 'H+ [extracellular] + L-citrulline [extracellular] <=> H+ [cytoplasm] L-citrulline [cytoplasm]', ' a--> ']
subsystem = ['a','a','b','']
"""
for i, x in enumerate(s1):
print(i)
if ' --> ' in x:
x0 = x.split(' --> ')
if len(x0[1]) >=1 and len(x0[0]) >=1:
#subystem.append('General') # exchange
subystem[i] = subystem[i]
else:
subystem[i] ='Exchange reaction' #exchange
print(subystem[i])
if ' <=> ' in x:
x0 = x.split(' <=> ')
if len(x0[1]) >=1 and len(x0[0]) >=1:
#subystem.append('General') # exchange
subystem[i] = subystem[i]
else:
subystem[i] ='Exchange reaction' #exchange
print(subystem[i])
else:
subystem[i] = subystem[i]
return subystem
|
0f8eb84b3270c910acd673491238d52b2744f68c
| 699,320
|
def readResults(file_name, _start_time, _end_time):
"""Read micro-data from file_name, structured on a separate line per year. In particular, read from start_year until
end_year, both inclusive"""
# Read list of float values, one per household
data_float = []
with open(file_name, "r") as _f:
for line in _f:
if _start_time <= int(line.split(',')[0]) <= _end_time:
for column in line.split(',')[1:]:
data_float.append(float(column))
return data_float
|
4ca6bb102b0605d3472430725a8349fa891dd247
| 699,321
|
import subprocess
import os
def run_nodejs(js_filename):
""" Run given file in nodejs and capture output """
proc = subprocess.Popen(
['node', js_filename], stdout=subprocess.PIPE)
outs, _ = proc.communicate(timeout=10)
outs = outs.decode('ascii', errors='ignore')
outs = outs.replace(os.linesep, '\n')
return outs
|
623c4201638272dfb40a35d7ce1ddb6949eed39a
| 699,322
|
def array_pack(img):
"""
img: numpy array. I primarily use this for OpenCV images
"""
d = img.tobytes()
s = img.shape
t = img.dtype
return (s, t, d)
|
e01398caf1018000d0dabd776945a223e39b130b
| 699,323
|
def select_number(list_of_numbers):
"""The player select *one* number of a list of numbers"""
answer = ""
while ((not answer.isdecimal()) or int(answer) not in list_of_numbers):
answer = input("Please type selected number and press ENTER: ")
return int(answer)
|
af91b18508ff361b0524a551949aac7d93f9afc6
| 699,324
|
def welcome():
"""最简单的接口
"""
return 'Hello World'
|
5ae25fb699343bb48cdf113dc783a37290b3c38a
| 699,325
|
def cast(s):
""" Function which clarifies the implicit type of
strings, a priori coming from csv-like files.
Example
-------
>>> cast('1')
1
>>> cast('1.')
1.0
>>> cast('1E+0')
1.0
>>> cast('1E+1')
10.0
>>> cast('one')
'one'
>>> cast('One')
'one'
"""
s = str(s)
try:
float(s)
if '.' in s or 'E' in s:
return float(s)
else:
return int(s)
except:
return s.lower()
|
5e2c9b3b913a733608cab614ae0f08421e489196
| 699,326
|
def is_macosx_sdk_path(path):
"""
Returns True if 'path' can be located in an OSX SDK
"""
return (path.startswith('/usr/') and not path.startswith('/usr/local')) or path.startswith('/System/')
|
59b60fa00bf6dd10cf207e6421ea3e4828de5b9c
| 699,327
|
import math
def _mean_standard(dist):
"""Find the mean and standard deviation."""
# In the dist dictionary, the key is the value of the metric and
# the value is the number of times it appears. So, the sample
# value is the key and the number of samples for the value is the
# value in dist for that key.
total_samples = sum(dist.values())
total_values = sum(key*value
for key, value in dist.items())
mean = total_values/total_samples
std_squared = sum((value/total_samples) * (key - mean)**2
for key, value in dist.items())
std = math.sqrt(std_squared)
return mean, std
|
3d90807a619b6fe090571a3f07db807e60c3849c
| 699,328
|
def shift2d(x,w,a,b):
"""Shift a 2d quadrature rule to the box defined by the tuples a and b"""
xs=[[a[0]],[a[1]]]+x*[[b[0]-a[0]],[b[1]-a[1]]]
ws=w*(b[0]-a[0])*(b[1]-a[1])
return xs,ws
|
84c2090cc0e1689a79aa2c7fa40fb399bc03f535
| 699,329
|
def weight_on_edge(data, zxing):
"""
This weight is independent of the cell_width.
So even, when we have already a good estimation of the cell width,
this weight does not need to take the width into account.
"""
value_before = data[zxing]
value_after = data[zxing + 1]
slope = value_after - value_before
return - value_before / slope
|
3af14f15589dd5227a21b53598b896a8cca4c79a
| 699,330
|
import os
def get_logging_config():
"""
Returns logging configuration file path
:return: str
"""
return os.path.normpath(os.path.join(os.path.dirname(__file__), '__logging__.ini'))
|
a2322151c261f7f89bbdc004d128acbad701645f
| 699,331
|
import dill
def hashable(a) -> int:
"""
Hashes many kinds of objects, including some that are unhashable through the builtin `hash` function.
Lists and tuples are hashed based on their elements.
"""
if isinstance(a, dict):
# first hash the keys and values with hashable
# then hash the tuple of int-tuples with the builtin
return hash(tuple((hashable(k), hashable(v)) for k, v in a.items()))
if isinstance(a, (tuple, list)):
# lists are mutable and not hashable by default
# for memoization, we need the hash to depend on the items
return hash(tuple(hashable(i) for i in a))
try:
return hash(a)
except TypeError:
pass
# Not hashable >>>
try:
return hash(dill.dumps(a))
except Exception:
if hasattr(a, "__dict__"):
return hashable(a.__dict__)
else:
return id(a)
|
29f55a4586f09156618a70e8443dc8b847260e14
| 699,332
|
def effective_kernel(kernel_shape, dilations):
"""
Args:
kernel_shape: tuple[int] representing the kernel shape in each
given dimension.
dilations: tuple[int] representing the dilation of the kernel
in each given dimension. Must be the same length as
kernel_shape, and is assumed to give the dimensions in
the same order as kernel_shape
Returns: tuple[int] representing the effective shape of the kernel
in each given dimension, with each dimension in the order given,
taking into account dilation.
See http://deeplearning.net/software/theano/tutorial/conv_arithmetic.html#dilated-convolutions
Note that a dilation of 1 is equivalent to having no dilation.
"""
if len(kernel_shape) != len(dilations):
raise ValueError(
"kernel_shape ({}) and dilations ({}) must be the same length".format(
len(kernel_shape), len(dilations)
)
)
return [(k - 1) * d + 1 for k, d in zip(kernel_shape, dilations)]
|
a955d185c924514bf981c2512a8e78646d523144
| 699,333
|
def int_from_32bit_array(val):
"""Converts an integer from a 32 bit bytearray
:param val: the value to convert to an int
:type val: int
:rtype: int
"""
rval = 0
for fragment in bytearray(val):
rval <<= 8
rval |= fragment
return rval
|
ca2f32c7eac6f42aeed688510fa57e717edfacdf
| 699,334
|
def build_evaluation_from_config_item(configuration_item, compliance_type, annotation=None):
"""Form an evaluation as a dictionary. Usually suited to report on configuration change rules.
Keyword arguments:
configuration_item -- the configurationItem dictionary in the invokingEvent
compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE
annotation -- an annotation to be added to the evaluation (default None)
"""
eval_ci = {}
if annotation:
eval_ci['Annotation'] = annotation
eval_ci['ComplianceResourceType'] = configuration_item['resourceType']
eval_ci['ComplianceResourceId'] = configuration_item['resourceId']
eval_ci['ComplianceType'] = compliance_type
eval_ci['OrderingTimestamp'] = configuration_item['configurationItemCaptureTime']
return eval_ci
|
05769fc549acdcec7ed611abeff5f0b4f695e57e
| 699,335
|
import os
import json
def load_json_config(app, config_filename):
""" Adds support for eventador's config.json format """
filename = os.path.join(app.config.root_path, config_filename)
with open(filename) as json_file:
obj = json.load(json_file)
keys = list(obj.keys())
for key in keys:
obj[key.upper()] = obj[key]
app.config.from_object(obj)
# save off the config as well
app.config['EV_CONFIG'] = obj
def setup_db_configs(app, config):
host = os.getenv('DB_HOST', config.get('host', None))
port = os.getenv('DB_PORT', config.get('port', None))
database = os.getenv('DB_DATABASE', config.get('database', None))
user = os.getenv('DB_USER', config.get('user', None))
passwd = os.getenv('DB_PASSWORD', config.get('password', None))
app.config['SQLALCHEMY_DATABASE_URI'] = f"postgresql://{user}:{passwd}@{host}:{port}/{database}"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
setup_db_configs(app, obj.get('db_config', {}))
return obj
|
f3156d41b075cabc04628f1ad2e8f5ca7bb8551b
| 699,336
|
import functools
def requires_attr(attr_name, raiser):
"""
Methods wrapped in this decorator raise an error if a required
attribute is not set.
"""
def decorator(meth):
@functools.wraps(meth)
def wrapper(self, *args, **kwargs):
if getattr(self, attr_name, None) is None:
raiser(self, meth, attr_name)
return meth(self, *args, **kwargs)
return wrapper
return decorator
|
e88d5ecfc39688683c7c29fbfc68db79cc760216
| 699,337
|
def _pretty_print_label(d):
"""Internal utility to pretty print point label info."""
s = " %s: "%repr(d[0])
entry_keys = list(d[1].keys())
ki = 0
kimax = len(entry_keys)
for k in entry_keys:
keys = list(d[1][k].keys())
if len(keys) == 0:
s += "{%s: {}}"%k
else:
s += "{%s: {keys=%s}}"%(k,",".join(keys))
if ki < kimax-1:
s += ', '
ki += 1
return s
|
4972b54d2402d87e6fe276605c59d21cfdc91533
| 699,338
|
def dict2obj(target, change_dict=True):
"""
将dict转换成obj对象
change_dict 用于控制是否转换target内部dict为obj
:param target: dict
:param change_dict: bool
:return: obj
"""
class Obj(object):
def __init__(self, d, change_dict):
for a, b in d.items():
if change_dict is True:
if isinstance(b, (list, tuple)):
setattr(self, a,
[Obj(x, change_dict) if isinstance(x, dict) else x
for x in b])
else:
setattr(self, a, Obj(b, change_dict) if isinstance(
b, dict) else b)
else:
setattr(self, a, b)
return Obj(target, change_dict=change_dict)
|
4beec4d45528e2939a0c63333b8a401e34df3b3d
| 699,339
|
def splitbycharset(txt, charset):
"""Splits a string at the first occurrence of a character in a set.
Args:
txt: Text to split.
chars: Chars to look for (specified as string).
Returns:
(char, before, after) where char is the character from the character
set which has been found as first; before and after are the substrings
before and after it. If none of the characters had been found in the
text, char and after are set to the empty string and before to the
entrire string.
"""
for firstpos, char in enumerate(txt):
if char in charset:
break
else:
return '', txt, ''
return txt[firstpos], txt[:firstpos], txt[firstpos+1:]
|
c23577eb96c9621909acaa816e8791e95dbf493d
| 699,340
|
def process_meta_review(meta_review):
"""
Do something with the meta-review JSON!
This is where you would:
1. Translate meta_review["ty_id"] to your own hotel ID, dropping any unmapped hotels
2. Traverse the JSON structure to find the data you are interested in, documented at
http://api.trustyou.com/hotels/documentation.html
3. Store data in your own database, to run reports, or serve them to a live website
Here, for demonstration purposes, we print a little custom summary sentence.
"""
ty_id = meta_review["ty_id"]
trust_score = meta_review.get("summary", {}).get("score")
badges = meta_review.get("badge_list", [])
def strip_markup(text):
"""
Badge texts contain some formatting hints. Remove them for display on the terminal.
"""
return (text
.replace("<strong class=\"label\">", "")
.replace("</strong>", "")
.replace("<span class=\"hotel-type\">", "")
.replace("</span>", "")
.replace("<strong>", "")
)
badge_texts = list(
strip_markup(badge["text"])
for badge
in badges[1:] # skip the overall badge, which is always in first place
)
sentence = "Hotel {ty_id} has a score of {trust_score}, and got awarded these badges: {badge_texts}".format(
ty_id=ty_id,
trust_score=trust_score,
badge_texts=(", ".join(badge_texts) or "No badges!")
)
print(sentence)
|
681ace9b9685f4bfd284ac6e41efde12bafbb1b9
| 699,341
|
def find_natural_number(position: int) -> tuple:
"""Return natural number at specified position and number its a part of."""
num_range = ""
counter = 0
while len(str(num_range)) < position:
counter += 1
num_range += str(counter)
return int(num_range[-1]), counter
|
62aff0c400ac007fc67dafd25eb4cacf63262105
| 699,342
|
def camel_case(snake_case: str) -> str:
"""Converts snake case strings to camel case
Args:
snake_case (str): raw snake case string, eg `sample_text`
Returns:
str: camel cased string
"""
cpnts = snake_case.split("_")
return cpnts[0] + "".join(x.title() for x in cpnts[1:])
|
b3080a3e3520209581cbd381ffaff24045343115
| 699,343
|
def _guess_bounds(cube, coords):
"""Guess bounds of a cube, or not."""
# check for bounds just in case
for coord in coords:
if not cube.coord(coord).has_bounds():
cube.coord(coord).guess_bounds()
return cube
|
44cdcdc8ee0dd821d3abdef880efe11c7cf8d48e
| 699,344
|
def reduce_repeat_bb(text_list, break_token):
"""
convert ['<b>Local</b>', '<b>government</b>', '<b>unit</b>'] to ['<b>Local government unit</b>']
PS: maybe style <i>Local</i> is also exist, too. it can be processed like this.
:param text_list:
:param break_token:
:return:
"""
count = 0
for text in text_list:
if text.startswith('<b>'):
count += 1
if count == len(text_list):
new_text_list = []
for text in text_list:
text = text.replace('<b>', '').replace('</b>', '')
new_text_list.append(text)
return ['<b>' + break_token.join(new_text_list) + '</b>']
else:
return text_list
|
1c9d88ca295ab31831773264c2ede597b707f3f6
| 699,345
|
def ferret_result_limits(efid):
"""
Abstract axis limits for the shapefile_writexyval PyEF
"""
return ( (1, 1), None, None, None, None, None, )
|
ba5c3d3a5760b11384b551c08464c16587e58437
| 699,346
|
import os
def mkdir(path_name):
"""
make dirs if not exist
Args:
path_name: dir path
Returns:
path
"""
if not os.path.exists(path_name):
os.makedirs(path_name)
return path_name
|
1a67d78870bed50a59e49a290ff57c6d6e58e2d4
| 699,347
|
def configmap():
"""
https://kubernetes.io/docs/tasks/configure-pod-container/configmap/
"""
return {
'apiVersion': 'v1',
'kind': 'ConfigMap',
'metadata': {
'name': 'testcfgmap-game-config'
},
'data': {
'game.properties': "enemies=aliens\nlives=3\nenemies.cheat=true\nenemies.cheat.level=noGoodRotten\nsecret.code.passphrase=UUDDLRLRBABAS\nsecret.code.allowed=true\nsecret.code.lives=30",
'ui.properties': "color.good=purple\ncolor.bad=yellow\nallow.textmode=true\nhow.nice.to.look=fairlyNice"
}
}
|
aadc00e8046be12370202f82adca12b222b0696f
| 699,348
|
def identify_contentType(url):
"""
Given a URL for a content, it identifies the type of the content
:param url(str): URL
:returns: Type of the content
"""
extensions = ['mp3', 'wav', 'jpeg', 'zip', 'jpg', 'mp4', 'webm', 'ecar', 'wav', 'png']
if ('youtu.be' in url) or ('youtube' in url):
return "youtube"
elif url.endswith('pdf'):
return "pdf"
elif any(url.endswith(x) for x in extensions):
return "ecml"
else:
return "unknown"
|
720a9ea9adaab547309f61b6858b0eb19ddebafe
| 699,349
|
def nice_frequency(frequency):
"""return a string of the frequency with SI unit and a reasonable number
of digits"""
if frequency < 1e3:
return "%dHz" % frequency
elif frequency < 10e3:
return "%.3fkHz" % (frequency / 1e3)
elif frequency < 100e3:
return "%.2fkHz" % (frequency / 1e3)
elif frequency < 1e6:
return "%.1fkHz" % (frequency / 1e3)
elif frequency < 10e6:
return "%.3fMHz" % (frequency / 1e6)
elif frequency < 1e9:
return "%.2fMHz" % (frequency / 1e6)
return "%.2fGHz" % (frequency / 1e9)
|
8fefa58236c57878be9f051e9cbfff85d398f87c
| 699,350
|
def stack(x, filters, num_blocks, block_fn, use_bias, stride1=2, name=None):
"""A set of stacked residual blocks.
Arguments:
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
use_bias: boolean, enables or disables all biases in conv layers.
num_blocks: integer, blocks in the stacked blocks.
block_fn: a function of a block to stack.
stride1: default 2, stride of the first layer in the first block.
name: string, stack label.
Returns:
Output tensor for the stacked blocks.
"""
x = block_fn(x, filters, conv_shortcut=True, stride=stride1, use_bias=use_bias, name=f'{name}_block1')
for i in range(2, num_blocks + 1):
x = block_fn(x, filters, use_bias=use_bias, name=f'{name}_block{i}')
return x
|
1ed366778a5abba4e05c070da05e3c87cbd92560
| 699,351
|
def compute(x, y):
"""Compute sum of passed in arguments
:param x: Integer (operand 1)
:param y: Integer (operand 2)
:rtype: Integer
"""
if x > 100 or x < 0 or y > 100 or y < 0:
raise ValueError("Passed in value out of bounds")
return x + y
|
beb518f7f9b23f1fa5dc7ac82386a9cbf34a5cd5
| 699,353
|
def pid_info(output_line):
"""
Take a line of 'ps -o pid,comm' output and return the PID number and name.
The line looks something like:
9108 orterun
or
10183 daos_server
Need both items. Return a tuple (name, pid)
Note: there could be leading spaces on the pid.
"""
info = output_line.lstrip().split()
try:
return info[1], info[0]
except Exception as e:
print("Unable to retrieve PID info from {}".format(output_line))
return "", None
|
479c51e562b7bbeb7e812fad4b2cb1f771e2e830
| 699,354
|
from typing import List
from typing import Tuple
def two_columns(pairs: List[Tuple]) -> str:
"""
Join pairs (or more) of strings to a string.
:param pairs: List of tuples of strings
:return: String separated by newlines
"""
return '\n'.join([' '.join(map(str, t)) for t in pairs])
|
810fdab52b4641c32f54c6adc772f8352b8f2ae6
| 699,357
|
def get_record_as_json(cur, tablename, row_id):
"""
Get a single record from the database, by id, as json.
"""
# IMPORTANT NOTE: Only use this function in trusted input. Never on data
# being created by users. The table name is not escaped.
q = """
SELECT row_to_json(new_with_table)
FROM (SELECT {t}.*, '{t}' AS tablename FROM {t}) new_with_table
WHERE id=%s;""".format(
t=tablename,
)
cur.execute(q, (row_id,))
return cur.fetchone()[0]
|
00521ae582a013f97b77494944e0f6e04069ed05
| 699,358
|
def stream_handler_set_stream(stream_handler_instance, stream):
"""
Backport of StreamHandler.setStream() for Python 2
"""
if stream is stream_handler_instance.stream:
result = None
else:
result = stream_handler_instance.stream
stream_handler_instance.acquire()
try:
stream_handler_instance.flush()
stream_handler_instance.stream = stream
finally:
stream_handler_instance.release()
return result
|
0da047fc1a4e7c17e91d51769ede549551ab9813
| 699,359
|
def zip_(self, iterable):
"""A zip object yielding tuples until an input is exhausted.
>>> [1, 2, 3].zip([1, 2, 3])
[(1, 1), (2, 2), (3, 3)]
>>> [1, 2, 3].zip([3, 2])
[(1, 3), (2, 2)]
"""
return list(zip(self, iterable))
|
b71ea3414a7817959e5b58ad7cd476b7caa3c770
| 699,360
|
def segno(x):
"""
Input: x, a number
Return: 1.0 if x>0,
-1.0 if x<0,
0.0 if x==0
"""
if x > 0.0: return 1.0
elif x < 0.0: return -1.0
elif x == 0.0: return 0.0
|
5caccfa037b2972755958f647f009660aefae59d
| 699,361
|
def swap_xyz_string(xyzs, permutation):
"""
Permutate the xyz string operation
Args:
xyzs: e.g. ['x', 'y+1/2', '-z']
permuation: list, e.g., [0, 2, 1]
Returns:
the new xyz string after transformation
"""
if permutation == [0,1,2]:
return xyzs
else:
new = []
for xyz in xyzs:
tmp = xyz.replace(" ","").split(',')
tmp = [tmp[it] for it in permutation]
if permutation == [1,0,2]: #a,b
tmp[0] = tmp[0].replace('y','x')
tmp[1] = tmp[1].replace('x','y')
elif permutation == [2,1,0]: #a,c
tmp[0] = tmp[0].replace('z','x')
tmp[2] = tmp[2].replace('x','z')
elif permutation == [0,2,1]: #b,c
tmp[1] = tmp[1].replace('z','y')
tmp[2] = tmp[2].replace('y','z')
elif permutation == [1,2,0]: #b,c
tmp[0] = tmp[0].replace('y','x')
tmp[1] = tmp[1].replace('z','y')
tmp[2] = tmp[2].replace('x','z')
elif permutation == [2,0,1]: #b,c
tmp[0] = tmp[0].replace('z','x')
tmp[1] = tmp[1].replace('x','y')
tmp[2] = tmp[2].replace('y','z')
new.append(tmp[0] + ", " + tmp[1] + ", " + tmp[2])
return new
|
efd0eb400be0dca26b49c3b2d3a04a385a2fc6e4
| 699,363
|
def speed_2_pace(speed):
"""Convert speed in km/hr to pace in s/km"""
return 60*60/speed
|
25379049e98622ec5888461a9f6455be399d5f5a
| 699,364
|
from pathlib import Path
def get_dictionaries(base: Path) -> list[tuple[str, list[Path]]]:
"""Make a list of available dictionaries and the interesting files.
:param base: The Apple Dictionaries root directory.
:return: A list of tuples of name, files for each dictionary.
"""
all_dicts = sorted(
(
(
dic.stem,
[
f
for f in (dic / "Contents" / "Resources").iterdir()
if f.suffix != ".lproj"
],
)
for dic_path in Path(base).iterdir()
if dic_path.is_dir()
for dic in (dic_path / "AssetData").iterdir()
),
key=lambda x: x[0],
)
return [(name, files) for name, files in all_dicts if files]
|
98068ecb847aa6975cc8dc7eed59b8948316f191
| 699,366
|
def generate_recipient_from(nhais_cypher):
"""
Generates the recipient cypher. This value can be deduced from the nhais_cypher provided.
The nhais cypher can be 2 to 3 characters is length.
If it is 2 characters in length it will append "01" to generate the recipient cypher
If it is 3 characters in length it will append "1" to generate the recipient cypher
:param nhais_cypher: The nhais cypher provided. Should be 2-3 characters in length
:return: The recipient cypher
"""
recipient = ''
if len(nhais_cypher) == 3:
recipient = nhais_cypher + '1'
elif len(nhais_cypher) == 2:
recipient = nhais_cypher + "01"
return recipient
|
ac29a09f246fc38b43301cc1b00a4056fc49e203
| 699,367
|
def _convert_color(color):
"""Convert a 24-bit color to 16-bit.
The input `color` is assumed to be a 3-component list [R,G,B], each with
8 bits for color level.
This method will translate that list of colors into a 16-bit number,
with first 5 bits indicating R component,
last 5 bits indicating B component, and remaining
6 bits in the middle indicating G component.
i.e., 16-bit color -> (5 bits, 6 bits, 5 bits) -> (R,G,B).
"""
for i in color:
if i not in range(256):
raise ValueError("Valid color value for R, G, B is 0 - 255.")
red, green, blue = color
return ((blue & 0xF8) << 8) | ((green & 0xFC) << 3) | ((red & 0xF8) >> 3)
|
6635e30d574288146c5ff07bf3d0d1661cd33b97
| 699,368
|
def MakeFutureReservationMessage(messages, reservation_name, sku_properties,
time_window, share_settings, reservation_zone):
"""Constructs a future reservation message object."""
future_reservation_message = messages.FutureReservation(
name=reservation_name,
specificSkuProperties=sku_properties,
timeWindow=time_window,
zone=reservation_zone)
if share_settings:
future_reservation_message.shareSettings = share_settings
return future_reservation_message
|
4ca46162638df14977e7292c25926aecc6b8876e
| 699,369
|
import os
def output_unconverted_csv_file(path, sheet_name):
"""
sheet_name(str: CSVに変換できないsheetの名前)に対し, sheet_name.csvを作成し,
その中にsheet_nameを書き込んでから保存するメソッド.
"""
with open(os.path.join(path, "{}.csv".format(sheet_name)), 'w',
newline='', encoding='cp932', errors='ignore') as unconverted_csv_file:
unconverted_csv_file.write(("シート {} はCSVに変換できませんでした。".format(sheet_name)))
unconverted_csv_file.write("シートの中に写真・グラフが入ったり、"
"シートに入っている有効データが少なかったりするかもしれません。")
return None
|
87e8cfb56930d7aee2934d7f3154cae7c0b14e08
| 699,370
|
def list_get(lst, index, default=None):
"""
A safety mechanism for accessing uncharted indexes of a list. Always remember: safety first!
:param lst: list
:param index: int
:param default: A default value
:return: Value of list at index -or- default value
"""
assert type(lst) == list, "Requires a list type"
return_value = default
try:
return_value = lst[index]
except IndexError:
pass
return return_value
|
41ca83d6a9b0ba66858617b48dcb6c43ca5a6a54
| 699,371
|
def iterate_items(collection, *args, **kwargs):
"""
iterates over the items of given object.
:param type collection: collection type to be used to
iterate over given object's items.
:rtype: iterator[tuple[object, object]]
"""
return iter(collection.items(*args, **kwargs))
|
49fde9f9c027ff1fc64a57ee67b06c91033f8755
| 699,372
|
from pathlib import Path
from typing import List
def read_raw_data(folder: Path, split: str, lang: str) -> List[str]:
"""Makes sure we are reading each file at most once.
There is 1000 sentences per file, one file per lang so the memory footprint is ok.
"""
file = folder / f"{lang}.{split}"
assert file.exists(), file
return file.read_text().splitlines()
|
451a795b8dcc8d4698f7538b7b55800f3015038d
| 699,373
|
from typing import Tuple
def is_multi_class(output_shape: Tuple[int, ...]) -> bool:
"""Checks if output is multi-class."""
# If single value output
if len(output_shape) == 1:
return False
return True
|
66726be66cce6f837b40287b75c547d28d709a74
| 699,374
|
def cradmin_instance_url(context, appname, viewname, *args, **kwargs):
"""
Template tag implementation of :meth:`cradmin_legacy.crinstance.BaseCrAdminInstance.reverse_url`.
Examples:
Reverse the view named ``"edit"`` within the app named ``"pages"``:
.. code-block:: htmldjango
{% load cradmin_legacy_tags %}
<a href='{% cradmin_instance_url appname="pages" viewname="edit" %}'>
Edit
</a>
Reverse a view with keyword arguments:
.. code-block:: htmldjango
{% load cradmin_legacy_tags %}
<a href='{% cradmin_instance_url appname="pages" viewname="list" mode="advanced" orderby="name" %}'>
Show advanced pages listing ordered by name
</a>
"""
request = context['request']
return request.cradmin_instance.reverse_url(
appname=appname, viewname=viewname, args=args, kwargs=kwargs)
|
9cea000ff75b68d536796cbcc323fef1b0a7059b
| 699,375
|
def calculate_atom_symmetry_number(molecule, atom):
"""
Return the symmetry number centered at `atom` in the structure. The
`atom` of interest must not be in a cycle.
"""
symmetry_number = 1
single = double = triple = benzene = num_neighbors = 0 # note that 0 is immutable
for bond in atom.edges.values():
if bond.is_single():
single += 1
elif bond.is_double():
double += 1
elif bond.is_triple():
triple += 1
elif bond.is_benzene():
benzene += 1
num_neighbors += 1
# If atom has zero or one neighbors, the symmetry number is 1
if num_neighbors < 2:
return symmetry_number
# Create temporary structures for each functional group attached to atom
molecule0 = molecule
molecule = molecule0.copy(True)
atom = molecule.vertices[molecule0.vertices.index(atom)]
molecule.remove_atom(atom)
groups = molecule.split()
# Determine equivalence of functional groups around atom
group_isomorphism = dict([(group, dict()) for group in groups])
for group1 in groups:
for group2 in groups:
if group1 is not group2 and group2 not in group_isomorphism[group1]:
group_isomorphism[group1][group2] = group1.is_isomorphic(group2)
group_isomorphism[group2][group1] = group_isomorphism[group1][group2]
elif group1 is group2:
group_isomorphism[group1][group1] = True
count = [sum([int(group_isomorphism[group1][group2]) for group2 in groups]) for group1 in groups]
for i in range(count.count(2) // 2):
count.remove(2)
for i in range(count.count(3) // 3):
count.remove(3)
count.remove(3)
for i in range(count.count(4) // 4):
count.remove(4)
count.remove(4)
count.remove(4)
count.sort()
count.reverse()
if atom.radical_electrons == 0:
if single == 4:
# Four single bonds
if count == [4]:
symmetry_number *= 12
elif count == [3, 1]:
symmetry_number *= 3
elif count == [2, 2]:
symmetry_number *= 2
elif count == [2, 1, 1]:
symmetry_number *= 1
elif count == [1, 1, 1, 1]:
symmetry_number *= 0.5 # found chirality
elif single == 3:
# Three single bonds
if count == [3]:
symmetry_number *= 3
elif count == [2, 1]:
symmetry_number *= 1
elif count == [1, 1, 1]:
symmetry_number *= 1
elif single == 2:
# Two single bonds
if count == [2]:
symmetry_number *= 2
# for resonance hybrids
elif single == 1:
if count == [2, 1]:
symmetry_number *= 2
elif double == 2:
# Two double bonds
if count == [2]:
symmetry_number *= 2
# for nitrogen resonance hybrids
elif single == 0:
if count == [2]:
symmetry_number *= 2
elif atom.radical_electrons == 1:
if single == 3:
# Three single bonds
if count == [3]:
symmetry_number *= 6
elif count == [2, 1]:
symmetry_number *= 2
elif count == [1, 1, 1]:
symmetry_number *= 1
elif single == 1:
if count == [2, 1]:
symmetry_number *= 2
elif count == [1, 1, 1]:
symmetry_number *= 1
elif atom.radical_electrons == 2:
if single == 2:
# Two single bonds
if count == [2]:
symmetry_number *= 2
return symmetry_number
|
d4b28f2b0cb793a5b6190c85c73bd56196ebec43
| 699,377
|
import torch
def get_discretized_transformation_matrix(matrix, discrete_ratio,
downsample_rate):
"""
Get disretized transformation matrix.
Parameters
----------
matrix : torch.Tensor
Shape -- (B, L, 4, 4) where B is the batch size, L is the max cav
number.
discrete_ratio : float
Discrete ratio.
downsample_rate : float/int
downsample_rate
Returns
-------
matrix : torch.Tensor
Output transformation matrix in 2D with shape (B, L, 2, 3),
including 2D transformation and 2D rotation.
"""
matrix = matrix[:, :, [0, 1], :][:, :, :, [0, 1, 3]]
# normalize the x,y transformation
matrix[:, :, :, -1] = matrix[:, :, :, -1] \
/ (discrete_ratio * downsample_rate)
return matrix.type(dtype=torch.float)
|
fd6ab95dcbb6fca9cab6b35fd5e20998fc12472b
| 699,378
|
import re
def remove_surrogates(s):
"""
The following is a helper function, used to convert two-character
surrogate sequences into single characters. This is needed
because some systems create surrogates but others don't.
"""
pieces = re.split('(&#\d+;)', s)
for i in range(3, len(pieces)-1, 2):
if pieces[i-1] != '': continue
high,low = int(pieces[i-2][2:-1]), int(pieces[i][2:-1])
if 0xd800 <= high <= 0xdbff and 0xdc00 <= low <= 0xdfff:
pieces[i-2] = '&#%d;' % (((high&0x3ff)<<10) +
(low&0x3ff) + 0x10000)
pieces[i] = ''
return ''.join(pieces)
|
31426a1cc76838dec58d78c8ab8a951f84f4eda0
| 699,379
|
def basic_bn_stem(model, data, dim_in, **kwargs):
"""Add a basic DenseNet stem. For a pre-trained network that used BN.
An AffineChannel op replaces BN during fine-tuning.
"""
dim = dim_in
p = model.Conv(data, 'conv1', 3, dim, 7, pad=3, stride=2, no_bias=1)
p = model.AffineChannel(p, 'conv1_bn', dim=dim, inplace=True)
p = model.Relu(p, p)
# p = model.MaxPool(p, 'pool1', kernel=3, pad=1, stride=2)
return p, dim
|
37469f6baccdc9ec0de5228b6398963e8a984a57
| 699,380
|
def fib_modified(a, b, n):
"""Computes the modified Fiboniacci-style function."""
for _ in range(2, n):
a, b = b, a + b * b
return b
|
103433c685d672019b42c1fd1a17a34bd3727764
| 699,381
|
from numpy import asarray
from numpy.testing import assert_equal
def nan_equal(a,b):
"""Are two arrays containing nan identical, assuming nan == nan?"""
a,b = asarray(a),asarray(b)
try: assert_equal(a,b)
except: return False
return True
|
a5f3186e202e22d79f46781fe3c9e6a1936f4a6a
| 699,382
|
def check_thermodynamic_consistency(tab, spec, *XYf):
"""Thermodynamic consistency: relative error"""# ε = (∂E/∂V|_S - P)/P )"""
XYf_DT = XYf[:]
rho = XYf_DT[0]
temp = XYf_DT[1]
Pt = tab.get_table('P{s}_DT', spec)(*XYf_DT)
dU_rho = tab.get_table('U{s}_DT', spec).dFx(*XYf_DT)
dP_T = tab.get_table('P{s}_DT', spec).dFy(*XYf_DT)
num = (rho**2 * dU_rho - Pt + temp*dP_T)**2
denum = ((rho**2 * dU_rho)**2 + Pt**2 + (temp*dP_T)**2)
res = (num/denum)**0.5
#idx = np.unravel_index(np.nanargmax(res), res.shape)
return res
|
7dbf10d7a8a0e0742def3c09d20d5a5768b9dab4
| 699,383
|
def message():
"""
Text message.
:return:
"""
return "That's easy to fix, but I can't be bothered."
|
abff6c119536ffab9a4506d64afe067131e066a2
| 699,384
|
import yaml
def represent_ordered_dict(dumper, data):
"""
Serializes ``OrderedDict`` to YAML by its proper order.
Registering this function to ``yaml.SafeDumper`` enables using ``yaml.safe_dump`` with ``OrderedDict``s.
"""
return dumper.represent_mapping(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
|
f8c83dd3b3ecf94077b7b465334ffa84df7e16a8
| 699,385
|
def raizCuadrada(n):
"""
estima la raiz cuadrada en 20 intervalos
"""
raiz = n/2 # La estimacion inicial sera 1/2 de n
for k in range(20):
raiz = (1/2)*(raiz + (n / raiz))
return raiz
|
5b3a36dc50d5fa8921dfcbe2a24f7467201c5339
| 699,386
|
def get_dot_size(val, verbose=False):
"""
Get the size of the marker based on val
:param val: the value to test
:param verbose: more output
:return: the size of the marker in pixels
"""
markersizes = [6, 9, 12, 15, 18]
# there should be one less maxmakervals than markersizes and then we use markersizes[-1] for anything larger
maxmarkervals = [10, 20, 30, 40]
for i,m in enumerate(maxmarkervals):
if val <= m:
return markersizes[i]
return markersizes[-1]
|
aa84c98a3cb78822dd02a9062f9f9a325907b310
| 699,387
|
import six
def _tuplify_version(version):
"""
Coerces the version string (if not None), to a version tuple.
Ex. "1.7.0" becomes (1, 7, 0).
"""
if version is None:
return version
if isinstance(version, six.string_types):
version = tuple(int(x) for x in version.split("."))
assert len(version) <= 4, version
assert (1, 3) <= version, version
assert all(isinstance(x, six.integer_types) for x in version), version
return version
|
97575c813ce585e7e928b129e67196dcece5db0a
| 699,388
|
import math
def is_hexagon(x):
"""
Checks if x is a hexagon number
"""
n = (1 + math.sqrt(8*x + 1))/4
return(n == round(n))
|
854aa38984d36cd956852a091318dd6edb0a06e1
| 699,389
|
import os
def intersect_files(flist1, flist2):
"""Return the intersection of two sets of filepaths, based on the file name
(after the final '/') and ignoring the file extension.
Examples
--------
>>> flist1 = ['/a/b/abc.lab', '/c/d/123.lab', '/e/f/xyz.lab']
>>> flist2 = ['/g/h/xyz.npy', '/i/j/123.txt', '/k/l/456.lab']
>>> sublist1, sublist2 = mir_eval.util.intersect_files(flist1, flist2)
>>> print sublist1
['/e/f/xyz.lab', '/c/d/123.lab']
>>> print sublist2
['/g/h/xyz.npy', '/i/j/123.txt']
Parameters
----------
flist1 : list
first list of filepaths
flist2 : list
second list of filepaths
Returns
-------
sublist1 : list
subset of filepaths with matching stems from ``flist1``
sublist2 : list
corresponding filepaths from ``flist2``
"""
def fname(abs_path):
"""Returns the filename given an absolute path.
Parameters
----------
abs_path :
Returns
-------
"""
return os.path.splitext(os.path.split(abs_path)[-1])[0]
fmap = dict([(fname(f), f) for f in flist1])
pairs = [list(), list()]
for f in flist2:
if fname(f) in fmap:
pairs[0].append(fmap[fname(f)])
pairs[1].append(f)
return pairs
|
fe4c6e23ea09c2e1770d3e49967f978615df5160
| 699,391
|
from typing import List
import bisect
def binary_search(elements: List[int], value: int) -> int:
"""Returns the index of the element closest to value.
Args:
elements (List): A sorted list.
"""
if not elements:
return -1
closest_index = bisect.bisect_left(elements, value, 0, len(elements) - 1)
element = elements[closest_index]
closest_distance = abs(element - value)
if closest_distance == 0:
return closest_index
for index in (closest_index - 1,):
if index < 0:
continue
distance = abs(elements[index] - value)
if closest_distance > distance:
closest_index = index
closest_distance = distance
return closest_index
|
1e1cb3ebe1b897e480aa93747108365f0e8326fd
| 699,392
|
import uuid
def create_record_id():
"""Create record id.
:return: Record id string.
:rtype: str
"""
return str(uuid.uuid4())
|
a582d8e0823061621e250f257ed4877032792fdc
| 699,393
|
from typing import Type
from typing import Callable
from typing import Any
def is_type(tipe: Type) -> Callable[[Any], bool]:
"""
:param tipe: A Type
:return: A predicate that checks if an object is an instance of that Type
"""
def predicate(value: Any) -> bool:
"""
:param value: An object
:return: Whether the object is an instance of the exprected Type
"""
return isinstance(value, tipe)
predicate.__name__ = f'_{is_type.__name__}_{tipe}'
return predicate
|
2b4be636675216c810f9e14f70fe8a5647552e3f
| 699,394
|
import os
def is_root():
"""Check admin permissions."""
return os.geteuid() == 0
|
f4d72e1139ef7e59f4c8764e65fb0d571ab8e32b
| 699,395
|
def MSI(record):
""" "Maximise Severity if Invalid": propagate an alarm state on the linked
record only if the alarm severity is `INVALID_ALARM`.
When propagated, the alarm status will become `LINK_ALARM`.
Example (Python source)
-----------------------
`my_record.INP = MSI(other_record)`
Example (Generated DB)
----------------------
`field(INP, "other MSI")`
"""
return record('MSI')
|
872b13eafe3dd76a962f24d2f3108990b5fda40e
| 699,396
|
def strip_constants(df, indices):
"""Remove columns from DF that are constant input factors.
Parameters
----------
* df : DataFrame, of input parameters used in model run(s).
* indices : list, of constant input factor index positions.
Returns
----------
* copy of `df` modified to exclude constant factors
"""
df = df.copy()
const_col_names = df.iloc[:, indices].columns
df = df.loc[:, ~df.columns.isin(const_col_names)]
return df
|
5a68d9beaa39cb2651ccd995af20d25165ccb03e
| 699,397
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.