content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def coeff_modulus_192(poly_modulus_degree):
"""
Returns the default coefficients modulus for a given polynomial modulus degree.
:param poly_modulus_degree: Polynomial modulus degree (1024, 2048, 4096, 8192, 16384, or 32768)
:return:
"""
return seal.coeff_modulus_128(poly_modulus_degree)
|
fa606e19b0deb92e645fef85058146f91f06b012
| 3,640,300
|
def __add_statement(is_position: bool) -> Statement:
"""
Adds a new statement to the database
:param is_position: True if the statement should be a position
:return: New statement object
"""
db_statement = Statement(is_position=is_position)
DBDiscussionSession.add(db_statement)
DBDiscussionSession.flush()
return db_statement
|
9c5ac1b906ed87961aea50a309e605d7dc28ac38
| 3,640,301
|
def xgcd(a: int, b: int) -> tuple:
"""
Extended Euclidean algorithm.
Returns (g, x, y) such that a*x + b*y = g = gcd(a, b).
"""
x0, x1, y0, y1 = 0, 1, 1, 0
while a != 0:
(q, a), b = divmod(b, a), a
y0, y1 = y1, y0 - q * y1
x0, x1 = x1, x0 - q * x1
return b, x0, y0
|
3889038824447f65f5d99d5d2a6301d9717948fe
| 3,640,302
|
def correct_predictions(output_probabilities, targets):
"""
计算与模型输出中的某些目标类匹配的预测数量
Args:
output_probabilities: 不同输出类的概率张量
targets: 实际目标类的索引
Returns:
返回:“output_probabilities”中正确预测的数量
"""
_, out_classes = output_probabilities.max(dim=1)
correct = (out_classes == targets).sum()
return correct.item()
|
0e39f3bfa00fc20334cf679aa77d89523a34454c
| 3,640,303
|
def get_base_url(url: str) -> str:
"""
Return base URL for given URL.
Example:
Return http://example.com for input http://example.com/path/path
Return scheme://netloc
"""
url = format_url(url)
parsed = parse_url(url)
return'{uri.SCHEME}://{uri.NETLOC}/'.format(uri=parsed)
|
edeb5fa7c2ac1b06ed6f3ed9523e4324f21e6abf
| 3,640,304
|
def setup_tutorial():
"""
Helper function to check correct configuration of tf and keras for tutorial
:return: True if setup checks completed
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
if not hasattr(backend, "tf"):
raise RuntimeError("This tutorial requires keras to be configured"
" to use the TensorFlow backend.")
# Image dimensions ordering should follow the Theano convention
if keras.backend.image_dim_ordering() != 'tf':
keras.backend.set_image_dim_ordering('tf')
print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' "
"to 'th', temporarily setting to 'tf'")
return True
|
2310edce037d3f6fa8fd30b3fb28aaddfc9b941d
| 3,640,305
|
import re
def split_value(s, splitters=["/", "&", ","]):
"""Splits a string. The first match in 'splitters' is used as the
separator; subsequent matches are intentionally ignored."""
if not splitters:
return [s.strip()]
values = s.split("\n")
for spl in splitters:
spl = re.compile(r"\b\s*%s\s*\b" % re.escape(spl), re.UNICODE)
if not filter(spl.search, values):
continue
new_values = []
for v in values:
new_values.extend([st.strip() for st in spl.split(v)])
return new_values
return values
|
a9227a4dcf4c49393e6c784337754d1e2b1d30b4
| 3,640,306
|
def smape(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculates symmetric mean absolute percentage error SMAPE
Args:
y_true (np.ndarray): Actual values Y
y_pred (np.ndarray): Predicted values Y
Returns:
[float]: smape
"""
error = np.abs(y_true - y_pred) / (np.abs(y_true) + np.abs(y_pred))
return 100 * np.mean(error)
|
33948539bfe13c4f9426bf0bf4c95fcea56a1da5
| 3,640,307
|
def dealwithtype( x, t ):
""" return x and t as an array
broadcast values if shape of x != shape of y
and neither x or t are scalar
"""
x = np.asarray( x )
t = np.asarray( t )
if not x.shape and not t.shape:
pass
elif not x.shape:
x = x*np.ones_like( t )
elif not t.shape:
t = t*np.ones_like( x )
else:
x, t = np.meshgrid( x, t )
return x, t
|
5bf440c084d0cf1012e2cdcdf8639d2ff6334e67
| 3,640,308
|
def format_img_size(img, C: FasterRcnnConfiguration):
""" formats the image size based on config """
img_min_side = float(C.resize_smallest_side_of_image_to)
(height, width, _) = img.shape
if width <= height:
ratio = img_min_side / width
new_height = int(ratio * height)
new_width = int(img_min_side)
else:
ratio = img_min_side / height
new_width = int(ratio * width)
new_height = int(img_min_side)
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
return img, ratio
|
9233c92f48ee8c187695be9342f082d540e02a14
| 3,640,309
|
def build_tile_count_map(tile_counts):
"""Build a map from a tile key to a count."""
tile_count_map = defaultdict(int)
for tile_count in tile_counts:
tile = tile_count.tile
tile_key = (tile.letter, tile.value, tile.is_blank)
tile_count_map[tile_key] = tile_count.count
return tile_count_map
|
2b4f30e91224db92598925bd4d794d3e96092b07
| 3,640,310
|
import requests
import json
def get_uid_to_user(restful_url):
"""Gets uid -> user mapping from restful url"""
query_url = restful_url + "/GetAllUsers"
resp = requests.get(query_url)
if resp.status_code != 200:
logger.warning("Querying %s failed.", query_url)
return {}
data = json.loads(resp.text)
uid_to_user = {}
for item in data:
try:
uid = int(item[1])
user = item[0]
uid_to_user[uid] = user
except Exception as e:
logger.warning("Parsing %s failed: %s", item, e)
return uid_to_user
|
ebdaad0f129ecfdde3b18df4cd16f8e890879064
| 3,640,311
|
from typing import List
def parse_text(text):
"""
Parse raw text format playlists, each line must contain a single.
track with artist and title separated by a single dash. eg Queen - Bohemian Rhapsody
:param str text:
:return: A list of tracks
"""
tracks: List[tuple] = []
for line in text.split("\n"):
line = line.strip()
if not line or line.startswith("#"):
continue
parts = line.split("-", 1)
if len(parts) != 2:
continue
artist, track = list(map(str.strip, parts))
if not artist or not track or (artist, track) in tracks:
continue
tracks.append((artist, track))
return tracks
|
1307d7ced966aa388e570456964c5921ac54ccca
| 3,640,312
|
from re import S
def R_nl(n, l, r, Z=1):
"""
Returns the Hydrogen radial wavefunction R_{nl}.
n, l .... quantum numbers 'n' and 'l'
r .... radial coordinate
Z .... atomic number (1 for Hydrogen, 2 for Helium, ...)
Everything is in Hartree atomic units.
Examples::
>>> from sympy.physics.hydrogen import R_nl
>>> from sympy import var
>>> var("r Z")
(r, Z)
>>> R_nl(1, 0, r, Z)
2*(Z**3)**(1/2)*exp(-Z*r)
>>> R_nl(2, 0, r, Z)
2**(1/2)*(Z**3)**(1/2)*(2 - Z*r)*exp(-Z*r/2)/4
>>> R_nl(2, 1, r, Z)
Z*r*6**(1/2)*(Z**3)**(1/2)*exp(-Z*r/2)/12
For Hydrogen atom, you can just use the default value of Z=1::
>>> R_nl(1, 0, r)
2*exp(-r)
>>> R_nl(2, 0, r)
2**(1/2)*(2 - r)*exp(-r/2)/4
>>> R_nl(3, 0, r)
2*3**(1/2)*(3 - 2*r + 2*r**2/9)*exp(-r/3)/27
For Silver atom, you would use Z=47::
>>> R_nl(1, 0, r, Z=47)
94*47**(1/2)*exp(-47*r)
>>> R_nl(2, 0, r, Z=47)
47*94**(1/2)*(2 - 47*r)*exp(-47*r/2)/4
>>> R_nl(3, 0, r, Z=47)
94*141**(1/2)*(3 - 94*r + 4418*r**2/9)*exp(-47*r/3)/27
The normalization of the radial wavefunction is::
>>> from sympy import integrate, oo
>>> integrate(R_nl(1, 0, r)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 0, r)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 1, r)**2 * r**2, (r, 0, oo))
1
It holds for any atomic number:
>>> integrate(R_nl(1, 0, r, Z=2)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 0, r, Z=3)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 1, r, Z=4)**2 * r**2, (r, 0, oo))
1
"""
# sympify arguments
n, l, r, Z = S(n), S(l), S(r), S(Z)
# radial quantum number
n_r = n - l - 1
# rescaled "r"
a = 1/Z # Bohr radius
r0 = 2 * r / (n * a)
# normalization coefficient
C = sqrt((S(2)/(n*a))**3 * factorial(n_r) / (2*n*factorial(n+l)))
# This is an equivalent normalization coefficient, that can be found in
# some books. Both coefficients seem to be the same fast:
# C = S(2)/n**2 * sqrt(1/a**3 * factorial(n_r) / (factorial(n+l)))
return C * r0**l * laguerre_l(n_r, 2*l+1, r0) * exp(-r0/2)
|
6102519a8d32e61cbdbb689c02f36b13b8c4b840
| 3,640,313
|
def entity_by_name(name):
"""Adapt Entity.name (not Entity.class_name!) to entity."""
entities = zope.component.getUtility(
icemac.addressbook.interfaces.IEntities).getEntities(sorted=False)
for candidate in entities:
if candidate.name == name:
return candidate
raise ValueError("Unknown name: %r" % name)
|
42f3d2ecf172db6b0a54590d1d983b563e8c4d52
| 3,640,314
|
def export_single_floor(floor):
"""exports a single floor
"""
return mt.Floor(
*export_vertices(floor.Points),
id=str(next_id()),
ep_id=floor.Id,
type=str(id_map(floor.Type.Id)))
|
874df1e1732cdc91092038fe2859ecea45bb836b
| 3,640,315
|
import torch
def tensor_lab2rgb(input):
"""
n * 3* h *w
"""
input_trans = input.transpose(1, 2).transpose(2, 3) # n * h * w * 3
L, a, b = input_trans[:, :, :, 0:1], input_trans[:, :, :, 1:2], input_trans[:, :, :, 2:]
y = (L + 16.0) / 116.0
x = (a / 500.0) + y
z = y - (b / 200.0)
neg_mask = z.data < 0
z[neg_mask] = 0
xyz = torch.cat((x, y, z), dim=3)
mask = xyz.data > 0.2068966
mask_xyz = xyz.clone()
mask_xyz[mask] = torch.pow(xyz[mask], 3.0)
mask_xyz[~mask] = (xyz[~mask] - 16.0 / 116.0) / 7.787
mask_xyz[:, :, :, 0] = mask_xyz[:, :, :, 0] * 0.95047
mask_xyz[:, :, :, 2] = mask_xyz[:, :, :, 2] * 1.08883
rgb_trans = torch.mm(mask_xyz.view(-1, 3), torch.from_numpy(rgb_from_xyz).type_as(xyz)).view(
input.size(0), input.size(2), input.size(3), 3
)
rgb = rgb_trans.transpose(2, 3).transpose(1, 2)
mask = rgb > 0.0031308
mask_rgb = rgb.clone()
mask_rgb[mask] = 1.055 * torch.pow(rgb[mask], 1 / 2.4) - 0.055
mask_rgb[~mask] = rgb[~mask] * 12.92
neg_mask = mask_rgb.data < 0
large_mask = mask_rgb.data > 1
mask_rgb[neg_mask] = 0
mask_rgb[large_mask] = 1
return mask_rgb
|
6c9ebdfba0a22661c479296a2be285d82a7ac85b
| 3,640,316
|
import os
def get_absolute_path(file_name, package_level=True):
"""Get file path given file name.
:param: [package_level] - Wheather the file is in/out side the
`gmail_api_wrapper` package
"""
if package_level:
# Inside `gmail_api_wrapper`
dirname = os.path.dirname(__file__)
else:
# Outside `gmail_api_wrapper`
dirname = os.path.join(os.path.dirname(__file__), os.pardir)
file_path = os.path.abspath(os.path.join(dirname, file_name))
return file_path
|
70206d9f8b94603b3efaf89c1b53573e1e01ca4d
| 3,640,317
|
def thanos(planet: dict, finger: int) -> int:
""" Thanos can kill half lives of a world with a snap of the finger """
keys = planet.keys()
for key in keys:
if (++finger & 1) == 1:
# kill it
planet.pop(key)
return finger
|
5b6325297cb8f259c27b3eb7fa5618edd1486b9c
| 3,640,318
|
from typing import List
def ordered_list_item_to_percentage(ordered_list: List[str], item: str) -> int:
"""Determine the percentage of an item in an ordered list.
When using this utility for fan speeds, do not include "off"
Given the list: ["low", "medium", "high", "very_high"], this
function will return the following when when the item is passed
in:
low: 25
medium: 50
high: 75
very_high: 100
"""
if item not in ordered_list:
raise ValueError
list_len = len(ordered_list)
list_position = ordered_list.index(item) + 1
return (list_position * 100) // list_len
|
2aa1b0574664e53da6080ae4bc99d1f3c93fad96
| 3,640,319
|
def simple2tradition(line):
"""
将简体转换成繁体
"""
line = Converter('zh-hant').convert(line)
return line
|
f934bd3c573274b0c2d8345493850335e0d7b6b7
| 3,640,320
|
def normalize_colors(colors):
"""
If colors are integer 8bit values, scale to 0 to 1 float value used by opengl
:param colors:
:return:
"""
if colors.dtype is not np.float32:
colors = colors.astype(np.float32) / 255.0
return colors
|
5212d5678d9a2744fced474b19ee5099ee152158
| 3,640,321
|
def load_text_data(path, word_dict):
"""
Read the given path, which should have one sentence per line
:param path: path to file
:param word_dict: dictionary mapping words to embedding
indices
:type word_dict: WordDictionary
:return: a tuple with a matrix of sentences and an array
of sizes
"""
max_len = 0
all_indices = []
sizes = []
with open(path, 'rb') as f:
for line in f:
tokens = line.decode('utf-8').split()
this_len = len(tokens)
if this_len > max_len:
max_len = this_len
sizes.append(this_len)
inds = [word_dict[token] for token in tokens]
all_indices.append(inds)
shape = (len(all_indices), max_len)
sizes = np.array(sizes)
matrix = np.full(shape, word_dict.eos_index, np.int32)
for i, inds in enumerate(all_indices):
matrix[i, :len(inds)] = inds
return matrix, sizes
|
bcb58019917b3972a12968cd9b9a563c27356e50
| 3,640,322
|
def dirty(graph):
"""
Return a set of all dirty nodes in the graph.
"""
# Reverse the edges to get true dependency
return {n: v for n, v in graph.node.items() if v.get('build') or v.get('test')}
|
06835b52d7741716f1c67d951c0ab74758f476b4
| 3,640,323
|
def hangman(secret_word):
""" secret_word: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secret_word contains and how many guesses s/he starts with.
* The user should start with 6 guesses
* Before each round, you should display to the user how many guesses
s/he has left and the letters that the user has not yet guessed.
* Ask the user to supply one guess per round. Remember to make
sure that the user puts in a letter!
* The user should receive feedback immediately after each guess
about whether their guess appears in the computer's word.
* After each guess, you should display to the user the
partially guessed word so far.
Follows the other limitations detailed in the problem write-up.
"""
letters_guessed = ''
g_remaining, w_remaining = 6, 3
user_input = ''
def input_validator(user_input):
nonlocal letters_guessed
if len(user_input) == 1 and user_input.encode().isalpha():
if user_input.isupper():
user_input = user_input.lower()
if user_input not in letters_guessed:
letters_guessed += user_input
return True
else:
return False
def invalid_char_penalty():
nonlocal g_remaining, w_remaining
if w_remaining > 0:
w_remaining -= 1
penalty = "You have " + str(w_remaining) + " warnings left:"
else:
g_remaining -= 1
penalty = "You have no warnings left so you lose one guess:"
return penalty
def wrong_guess_penalty():
nonlocal g_remaining
if user_input.lower() in ('a', 'e', 'i', 'o', 'u'):
g_remaining -= 2
else:
g_remaining -= 1
# print welcome message
print(
f"Welcome to the game Hangman!\n"
f"I am thinking of a word that is {len(secret_word)} letters long\n"
f"You have {w_remaining} warnings left.")
while g_remaining >= 1:
# before proceeding into the loop, check if the word has been guessed
if is_word_guessed(secret_word, letters_guessed) == True:
print(
f"----------\n"
f"Congratulations, you won!\n"
f"Your total score for this game is: {len(set(secret_word)) * g_remaining}"
)
return
# printing required statements and take user_input
print(
f"----------\n"
f"You have {g_remaining} guesses left\n"
f"Available Letters: {get_available_letters(letters_guessed)}"
)
user_input = input("Please guess a letter: ")
# if user entered nothing, give him a free pass and loop again.
if user_input == '':
continue
# if it's already been guessed, issue a penalty notice and jump to next iteration
if user_input.lower() in letters_guessed:
print(
f"Oops! You've already guessed that letter. "
f"{invalid_char_penalty()} {get_guessed_word(secret_word, letters_guessed)}"
)
continue
# if it's invalid (non-English alphabet, blank, or len > 1), give user a warning/penalty
if input_validator(user_input) == False:
print(
f"Oops! That is not a valid letter. "
f"{invalid_char_penalty()} {get_guessed_word(secret_word, letters_guessed)}"
)
# if user_input is valid, check if it's correct and print accordingly
if input_validator(user_input) == True:
if user_input.lower() not in secret_word:
wrong_guess_penalty()
print(f"Oops! That letter is not in my word: {get_guessed_word(secret_word, letters_guessed)}")
elif user_input.lower() in secret_word:
print(f"Good guess: {get_guessed_word(secret_word, letters_guessed)}")
# if g_remaining ran out, print end of game
print(
f"-----------\n"
f"Sorry, you ran out of guesses. The word was {secret_word}."
)
|
91ff0b2ad0168d1c3a8dd15466ca2b15b4a9f557
| 3,640,324
|
def sin_potential(z):
"""Sin-like potential."""
z = tf.transpose(z)
x = z[0]
y = z[1]
# x, y = z
return 0.5 * ((y - w1(z)) / 0.4) ** 2 + 0.1 * tf.math.abs(x)
|
e95db66fc99acc3742e179af0ba557b2a81b4ec3
| 3,640,325
|
def erode_label(image_numpy, iterations=2, mask_value=0):
""" For each iteration, removes all voxels not completely surrounded by
other voxels. This might be a bit of an aggressive erosion. Also I
would bet it is incredibly ineffecient. Also custom erosions in
multiple dimensions look a little bit messy.
"""
iterations = np.copy(iterations)
if isinstance(iterations, list):
if len(iterations) != 3:
print 'The erosion parameter does not have enough dimensions (3). Using the first value in the eroison parameter.'
else:
iterations == [iterations, iterations, iterations]
for i in xrange(max(iterations)):
kernel_center = 0
edges_kernel = np.zeros((3,3,3),dtype=float)
if iterations[2] > 0:
edges_kernel[1,1,0] = -1
edges_kernel[1,1,2] = -1
iterations[2] -= 1
kernel_center += 2
if iterations[1] > 0:
edges_kernel[1,0,1] = -1
edges_kernel[1,2,1] = -1
iterations[1] -= 1
kernel_center += 2
if iterations[0] > 0:
edges_kernel[0,1,1] = -1
edges_kernel[2,1,1] = -1
iterations[0] -= 1
kernel_center += 2
edges_kernel[1,1,1] = kernel_center
label_numpy = np.copy(image_numpy)
label_numpy[label_numpy != mask_value] = 1
label_numpy[label_numpy == mask_value] = 0
edge_image = signal.convolve(label_numpy, edges_kernel, mode='same')
edge_image[edge_image < 0] = -1
edge_image[np.where((edge_image <= kernel_center) & (edge_image > 0))] = -1
edge_image[edge_image == 0] = 1
edge_image[edge_image == -1] = 0
image_numpy[edge_image == 0] = mask_value
return image_numpy
|
7eb1ff92c8c4e75fa4b6ba88365adf50c9013fc8
| 3,640,326
|
from pathlib import Path
import os
def _resolve_dir_against_charm_path(charm: CharmBase, *path_elements: str) -> str:
"""Resolve the provided path items against the directory of the main file.
Look up the directory of the main .py file being executed. This is normally
going to be the charm.py file of the charm including this library. Then, resolve
the provided path elements and, if the result path exists and is a directory,
return its absolute path; otherwise, return `None`.
"""
charm_dir = Path(str(charm.charm_dir))
if not charm_dir.exists() or not charm_dir.is_dir():
# Operator Framework does not currently expose a robust
# way to determine the top level charm source directory
# that is consistent across deployed charms and unit tests
# Hence for unit tests the current working directory is used
# TODO: updated this logic when the following ticket is resolved
# https://github.com/canonical/operator/issues/643
charm_dir = Path(os.getcwd())
alerts_dir_path = charm_dir.absolute().joinpath(*path_elements)
if not alerts_dir_path.exists():
raise InvalidAlertRulePathError(str(alerts_dir_path), "directory does not exist")
if not alerts_dir_path.is_dir():
raise InvalidAlertRulePathError(str(alerts_dir_path), "is not a directory")
return str(alerts_dir_path)
|
cb2462020ccbe14b4841932454b55fca6453b7ce
| 3,640,327
|
def computeBFGridPoint(basis, U, gpi, gps):
"""
Compute the bilinear form for one grid point with the points
stored in gps
@param basis: basis of sparse grid function,
@param U: list of distributions
@param gpi: HashGridPoint
@param gps: list of HashGridPoint
"""
n = len(gps)
s = np.ndarray(gpi.getDimension(), dtype='float')
ans = DataVector(n)
# run over all grid points
for j, gpj in enumerate(gps):
# print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2)
ans[j] = computeBFPairwise(basis, U, gpi, gpj)
ans[j] = float(np.prod(s))
return ans
|
4898e16847c8cb8fc8af3ffa3f793c18f2088d79
| 3,640,328
|
from typing import Optional
from typing import Collection
from typing import Pattern
from pathlib import Path
from typing import List
def list_files(commit: Optional[str] = None,
pathspecs: Collection[PathOrStr] = (),
exclude: Collection[Pattern[str]] = (),
repo: Optional[Path] = None) -> List[Path]:
"""Lists files with git ls-files or git diff --name-only.
Args:
commit: commit to use as a base for git diff
pathspecs: Git pathspecs to use in git ls-files or diff
exclude: regular expressions for Posix-style paths to exclude
repo: repository path from which to run commands; defaults to Path.cwd()
"""
if repo is None:
repo = Path.cwd()
if commit:
files = _diff_names(commit, pathspecs, repo)
else:
files = _ls_files(pathspecs, repo)
git_root = root(repo=repo).resolve()
return sorted(file for file in files if not any(
e.search(file.relative_to(git_root).as_posix()) for e in exclude))
|
8d96a41c5016b78a7e71654015fbfda50aa896d4
| 3,640,329
|
def sum_by_hexagon(df,resolution,pol,fr,to,vessel_type=[],gt=[]):
"""
Use h3.geo_to_h3 to index each data point into the spatial index of the specified resolution.
Use h3.h3_to_geo_boundary to obtain the geometries of these hexagons
Ex counts_by_hexagon(data, 8)
"""
if vessel_type:
df_aggreg=df[((df.dt_pos_utc.between(fr,to))&(df.StandardVesselType.isin(vessel_type)))]
else:
df_aggreg=df[df.dt_pos_utc.between(fr,to)]
if df_aggreg.shape[0]>0:
if gt:
df_aggreg=df_aggreg[df_aggreg.GrossTonnage.between(gt[0],gt[1])]
if resolution==8:
df_aggreg = df_aggreg.groupby(by = "res_8").agg({"co2_t":sum,"ch4_t":sum}).reset_index()
else:
df_aggreg = df_aggreg.assign(new_res=df_aggreg.res_8.apply(lambda x: h3.h3_to_parent(x,resolution)))
df_aggreg = df_aggreg.groupby(by = "new_res").agg({"co2_t":sum,"ch4_t":sum}).reset_index()
df_aggreg.columns = ["hex_id", "co2_t","ch4_t"]
df_aggreg["geometry"] = df_aggreg.hex_id.apply(lambda x:
{ "type" : "Polygon",
"coordinates":
[h3.h3_to_geo_boundary(x,geo_json=True)]
}
)
return df_aggreg
else:
return df_aggreg
|
883abde8562e093d44646e7db3795e22c6c918b8
| 3,640,330
|
def _ibp_sub(lhs, rhs):
"""Propagation of IBP bounds through a substraction.
Args:
lhs: Lefthand side of substraction.
rhs: Righthand side of substraction.
Returns:
out_bounds: IntervalBound.
"""
return lhs - rhs
|
45ed06feea14275ddd64e1ec60727123db52a5cd
| 3,640,331
|
from typing import Mapping
def toil_make_tool(
toolpath_object: CommentedMap,
loadingContext: cwltool.context.LoadingContext,
) -> Process:
"""
Emit custom ToilCommandLineTools.
This factory funciton is meant to be passed to cwltool.load_tool().
"""
if (
isinstance(toolpath_object, Mapping)
and toolpath_object.get("class") == "CommandLineTool"
):
return ToilCommandLineTool(toolpath_object, loadingContext)
return cwltool.workflow.default_make_tool(toolpath_object, loadingContext)
|
25998d1a6941b8255e8baa5b83da3ef13c004cd7
| 3,640,332
|
import json
import io
def sentinel_s1(metadata):
""" Parse metadata and return basic Item
with rasterio.open('/Users/scott/Data/sentinel1-rtc/local_incident_angle.tif') as src:
...: metadata = src.profile
...: metadata.update(src.tags())
"""
def get_datetime(metadata):
''' retrieve UTC start time from tif metadata'''
times = []
for i in range(1, int(metadata['NUMBER_SCENES'])+1):
m = json.loads(metadata[f'SCENE_{i}_METADATA'])
times += [m['start_time'], m['end_time']]
return min(times)
def get_orbits(metadata):
''' https://forum.step.esa.int/t/sentinel-1-relative-orbit-from-filename/7042 '''
adjust = {'S1B':27, 'S1A':73}
abs_orbit = int(metadata['ABSOLUTE_ORBIT_NUMBER'])
rel_orbit = ((abs_orbit - adjust[metadata['MISSION_ID']]) % 175) + 1
return abs_orbit, rel_orbit
def get_geometry(metadata):
''' determine valid pixel footprint and bbox '''
# get MGRS grid square footprint
gridfile = op.join(op.dirname(__file__), 'sentinel1-rtc-conus-grid.geojson')
gf = gpd.read_file(gridfile)
gf.rename(columns=dict(id='tile'), inplace=True)
gf_grid = gf[gf.tile == metadata['TILE_ID']]
bbox = list(gf_grid.total_bounds)
# read GRD frame footprints
frames = []
for i in range(1, int(metadata['NUMBER_SCENES'])+1):
m = json.loads(metadata[f'SCENE_{i}_METADATA'])
frames.append(gpd.read_file(io.StringIO(m['footprint'])))
footprints = gpd.pd.concat(frames)
# get valid data footprint
intersection = gpd.overlay(gf_grid, footprints, how='intersection')
valid_footprint = intersection.unary_union.convex_hull
geom = {"type": "Polygon",
"coordinates":[list(valid_footprint.exterior.coords)]}
return bbox, geom
dt = parse(get_datetime(metadata))
abs_orbit, rel_orbit = get_orbits(metadata)
bbox, geom = get_geometry(metadata)
# Item properties
props = {
'datetime': dt.strftime('%Y-%m-%dT%H:%M:%SZ'),
'platform': metadata['MISSION_ID'],
'constellation': 'sentinel-1',
'instruments': ['c-sar'],
'gsd': 20,
'proj:epsg': metadata['crs'].to_epsg(),
'sentinel:utm_zone': metadata['TILE_ID'][:2],
'sentinel:latitude_band': metadata['TILE_ID'][3],
'sentinel:grid_square': metadata['TILE_ID'][4:],
'sentinel:product_id': metadata['SCENES'].split(','),
'sat:orbit_state': metadata['ORBIT_DIRECTION'],
'sat:absolute_orbit': abs_orbit,
'sat:relative_orbit': rel_orbit
}
# match key s3://sentinel-s1-rtc-indigo/tiles/RTC/1/IW/12/S/YJ/2016/S1B_20161121_12SYJ_ASC
DATE = metadata['DATE'].replace('-','')
orbNames = {'ascending':'ASC', 'decending':'DSC'}
ORB = orbNames[metadata['ORBIT_DIRECTION']]
id = f"{metadata['MISSION_ID']}_{DATE}_{metadata['TILE_ID']}_{ORB}"
item = {
'type': 'Feature',
'stac_version': __stac_version__,
'stac_extensions': ['sar', 'sat', 'proj'],
'id': id,
'bbox': bbox,
'geometry': geom,
'properties':props
}
return item
|
c96b40417bdb68224f72738291386b799325584c
| 3,640,333
|
def get_loc(data, attr={'lr_mult':'0.01'}):
"""
the localisation network in lenet-stn, it will increase acc about more than 1%,
when num-epoch >=15
"""
loc = mx.symbol.Convolution(data=data, num_filter=30, kernel=(5, 5), stride=(2,2))
loc = mx.symbol.Activation(data = loc, act_type='relu')
loc = mx.symbol.Pooling(data=loc, kernel=(2, 2), stride=(2, 2), pool_type='max')
loc = mx.symbol.Convolution(data=loc, num_filter=60, kernel=(3, 3), stride=(1,1), pad=(1, 1))
loc = mx.symbol.Activation(data = loc, act_type='relu')
loc = mx.symbol.Pooling(data=loc, global_pool=True, kernel=(2, 2), pool_type='avg')
loc = mx.symbol.Flatten(data=loc)
loc = mx.symbol.FullyConnected(data=loc, num_hidden=6, name="stn_loc", attr=attr)
return loc
|
9216080263f5f9dde07eff96109d05ab4d583a08
| 3,640,334
|
def meanwave(signals):
""" This function computes the meanwave of various signals.
Given a set of signals, with the same number of samples, this function
returns an array representative of the meanwave of those signals - which is
a wave computed with the mean values of each signal's samples.
Parameters
----------
signals: matrix-like
the input signals.
Returns
-------
mw: array-like
the resulted meanwave
"""
return mean(signals,0)
|
11a477fed2b3cdf03226545a9a02c4500c6f4634
| 3,640,335
|
def set_difficulty():
"""Ask the difficult level and return the number of turns corresponding"""
if input("Choose a difficulty level. Type 'easy' or 'hard': ").lower() == "easy":
return EASY_TURNS
else:
return HARD_TURNS
|
746b01ca3e9ea22cd32b00fa923709ece2ee6a60
| 3,640,336
|
def delete_event_by_id(id, user_id):
"""Remove one event based on id."""
sql = "DELETE FROM events WHERE id = :id AND host_id = :user_id RETURNING title;"
db.session.execute(sql, {"id": id, "user_id": user_id})
db.session.commit()
return ["Event deleted."]
|
0e49df11f52574b89e96ff434c1e3b40130dbffc
| 3,640,337
|
def get_cmap_colors(cmap='jet',p=None,N=10):
"""
"""
cm = plt.get_cmap(cmap)
if p is None:
return [cm(i) for i in np.linspace(0,1,N)]
else:
normalize = matplotlib.colors.Normalize(vmin=min(p), vmax=max(p))
colors = [cm(normalize(value)) for value in p]
return colors
|
39073608961ab48e7b2ade6666b0107800825170
| 3,640,338
|
def reader_factory(load_from, file_format):
"""Select and return instance of appropriate reader class for given file format.
Parameters
__________
load_from : str or file instance
file path or instance from which to read
file_format : str
format of file to be read
Returns
_______
Reader instance
"""
if file_format == 'hdf5':
reader = hdf5Reader(load_from)
elif file_format == 'pickle':
reader = PickleReader(load_from)
else:
raise NotImplementedError("Format '{}' has not been implemented.".format(file_format))
return reader
|
b2379a0ff4b360989f68dcc412fa733011d17213
| 3,640,339
|
def scrape_with_selenium(chrome, chrome_webdriver, url, xpath_tup_list, timeout):
"""Scrape using Selenium and Chrome."""
result_dic = {}
with SeleniumChromeSession(chrome=chrome, chrome_webdriver=chrome_webdriver) as driver:
wait_conditions = []
for xpath_tup in xpath_tup_list:
wait_conditions.append(WaitCondition(xpath_tup[0], By.XPATH, xpath_tup[1]))
try:
driver.get(url)
except WebDriverException as error:
logger.error(F'Issue: {error} for url "{url}"')
else:
scraper_wait = ScraperWait(wait_conditions)
try:
WebDriverWait(driver, timeout).until(scraper_wait)
except TimeoutException:
logger.error(F'Timeout waiting for url "{url}"')
else:
result_dic = scraper_wait.found_elements
return result_dic
|
a17a942cfd586765c01bf10af6247145d70a84a5
| 3,640,340
|
def take_element_screenshot(page_screenshot: Image.Image, bbox: Rectangle) -> Image.Image:
"""
Returns the cropped subimage with the coordinates given.
"""
w, h = page_screenshot.size
if bbox.area == 0:
raise ValueError(f"Rectangle {bbox} is degenerate")
if bbox not in Rectangle(Point(0, 0), Point(w, h)):
raise ValueError(f"Rectangle {bbox} not contained in the viewport {(0, 0, w, h)}")
return crop_image(page_screenshot, bbox)
|
2387ecf34c1ae4118e1021b0cf5649b8cd2947ce
| 3,640,341
|
def officeOfRegistrar_forward(request, id):
"""form to set receiver and designation of forwarded file """
context = {"track_id": id}
return render(request, "officeModule/officeOfRegistrar/forwardingForm.html", context)
|
aaa237f75de98b45b477c5fc4542e12e6e257a5c
| 3,640,342
|
def vector_quaternion_arrays_allclose(vq1, vq2, rtol=1e-6, atol=1e-6, verbose=0):
"""Check if all the entries are close for two vector quaternion numpy arrays.
Quaterions are a way of representing rigid body 3D rotations that is more
numerically stable and compact in memory than other methods such as a 3x3
rotation matrix.
This special comparison function is needed because for quaternions q == -q.
Vector Quaternion numpy arrays are expected to be in format
[x, y, z, qx, qy, qz, qw].
# Params
vq1: First vector quaternion array to compare.
vq2: Second vector quaternion array to compare.
rtol: relative tolerance.
atol: absolute tolerance.
# Returns
True if the transforms are within the defined tolerance, False otherwise.
"""
vq1 = np.array(vq1)
vq2 = np.array(vq2)
q3 = np.array(vq2[3:])
q3 *= -1.
v3 = vq2[:3]
vq3 = np.array(np.concatenate([v3, q3]))
comp12 = np.allclose(np.array(vq1), np.array(vq2), rtol=rtol, atol=atol)
comp13 = np.allclose(np.array(vq1), np.array(vq3), rtol=rtol, atol=atol)
if verbose > 0:
print(vq1)
print(vq2)
print(vq3)
print(comp12, comp13)
return comp12 or comp13
|
d1f1bb82ce5570dce0c18f7c25798c8621badfa2
| 3,640,343
|
def compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3):
"""
Compute c_v coherence for various number of topics
Parameters:
----------
dictionary : Gensim dictionary
corpus : Gensim corpus
texts : List of input texts
limit : Max num of topics
Returns:
-------
model_list : List of LDA topic models
coherence_values : Coherence values corresponding to the LDA model with respective number of topics
"""
coherence_values = []
model_list = []
for num_topics in range(start, limit, step):
Lda = gensim.models.ldamodel.LdaModel
model = Lda(doc_term_matrix, num_topics=5, id2word = dictionary, passes=50)
model_list.append(model)
coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
return model_list, coherence_values
|
1f46c1d5960a0d637116d7da847368d30440dd29
| 3,640,344
|
def autocorr_quad(w, f, t, method = 'direct'):
"""
Calculate the vacuum state autocorrelation function
for propagation on a quadratic potential energy surface.
Parameters
----------
w : array_like
The harmonic frequency (in energy units) of each mode.
f : array_like
The derivative array, including up to at least second derivatives.
t : array_like
The time array, in units where :math:`\\hbar = 1`. (Alternatively,
the `t` array can be identified with :math:`t/\\hbar`.)
method : {'direct','integral','integral_log'}
The calculation method. See Notes
Returns
-------
C : ndarray
The autocorrelation function, :math:`C(t)`.
See also
--------
corr_quad_recursion_elements : Calculate quadratic correlator recursion coefficients
~nitrogen.math.spech_fft : Calculate the spectrum of an autocorrelation function
Notes
-----
For `method` = 'direct', a direct expression based on
a discontinuity-free BCH disentangling formula is used.
For `method` = 'integral', an alternative method is
used to first calculate the logarithmic derivative
of :math:`C(t)`. This is numerically integrated
by a cumulative version of Simpson's rule and then
exponentiated.
For `method` = 'integral_log', the integrated logarithm
is returned directly, without exponentiation. That is,
the branch-cut discontinuity-free logarithm of :math:`C(t)`
is returned.
For the integral methods, a sufficiently small time-step
in the `t` array is required for accurate results. The direct
method does not rely in numerical integration.
"""
n = len(w)
# Extract the gradient and hessian
F,K = _partition_darray(f, n)
h0 = f[0] # The energy offset
t = np.array(t)
if t.ndim != 1:
raise ValueError('t must be 1-dimensional')
if method == 'integral' or method == 'integral_log':
#
# Calculate the correlation function by
# integration of its logarithmic derivative
#
# Check for a valid time vector
if t[0] != 0:
raise ValueError('t[0] must be zero for integral methods')
if np.any( np.abs(np.diff(t) - (t[1]-t[0])) > 1e-8):
raise ValueError('Time vector must be uniformly spaced.')
#
# Calculate the correlator recursion coefficients
r,S,T = corr_quad_recursion_elements(w, f, t)
# Calculate the ODE coefficient sum
sumIH = 0
for i in range(n):
sumIH += 0.25 * ( (w[i] + K[i,i]) - (w[i] - K[i,i])*(r[:,i]**2 - T[:,i,i]))
sumIH += (-np.sqrt(0.5)) * F[i] * r[:,i]
for j in range(i): # j < i
sumIH += 0.5 * K[i,j] * (r[:,i] * r[:,j] - T[:,i,j])
g = (-1j) * sumIH # the derivative of the logarithm
#
# C'(t) = g * C(t)
#
# --> C(t) = exp[ integral of g(t) ]
#
# Integrate the logarithm via
# Simpson's 1/3 rule, cumulatively
#
logC = nitrogen.math.cumsimp(g, t)
# Add the energy offset phase correction
logC += -1j * h0 * t
if method == 'integral_log':
# Return the continuous logarithm of C
return logC
else:
# Return C
C = np.exp(logC)
return C
elif method == 'direct':
#
# Calculate the correlation function by
# the direct method
#
# First, calculate the propagation normal
# modes
rtW = np.diag(np.sqrt(w))
irW = np.diag(1/np.sqrt(w))
z2,L = np.linalg.eigh(rtW @ K @ rtW)
# Force L to have positive determinant!
if np.linalg.det(L) < 0:
L[:,0] *= -1
omega = np.sqrt(np.abs(z2))
sigma = np.array([1 if z2[i] > 0 else -1j for i in range(n)])
rtSO = np.diag(np.sqrt(sigma * omega))
irSO = np.diag(1/np.sqrt(sigma*omega))
LamP = irW @ L @ rtSO + rtW @ L @ irSO
LamM = irW @ L @ rtSO - rtW @ L @ irSO
iLamP = np.linalg.inv(LamP)
C = np.zeros_like(t, dtype = np.complex128)
def eta(x):
#
# eta(x) = (e^x - 1) / x
#
result_small = 1.0 + x/2 + x**2/6 + x**3/24 + x**4/120 + x**5/720 + x**6/5040
result_big = np.expm1(x) / (x + 1e-20)
result = np.choose(abs(x) > 1e-2,
[result_small, result_big])
return result
def zeta(x):
#
# zeta(x) = (e^x - x - 1) / x**2
#
result_small = 1/2 + x/6 + x**2/24 + x**3/120 + x**4/720 + x**5/5040 + x**6/40320
result_big = (np.expm1(x) - x) / (x + 1e-20)**2
result = np.choose(abs(x) > 1e-2,
[result_small, result_big])
return result
# Force all time values to be non-negative.
# Afterward, negative time can be evaluated
# via the hermiticity of C(t)
for i in range(len(t)):
tp = abs(t[i]) # The current time value
# The exp^- diagonal
em = np.diag(np.exp(-1j * tp * sigma*omega))
#
# Calculate det(exp[A'])**1/2:
#
# A factoring and eigendecomposition
# procedure ensures there are no
# branch-cut discontinuities
#
quad_term = np.exp(-1j * tp * sum(sigma*omega) / 2)
quad_term *= np.linalg.det(LamP / 2) ** -1
M = iLamP.T @ em @ iLamP @ LamM @ em @ LamM.T
evs = np.linalg.eigvals(M)
for a in evs:
quad_term *= np.sqrt(1 - a)**-1
#
# Calculate the gradient contributions
#
hp = -1j*tp*h0 # Trivial phase contribution
# The eta^- and zeta^- diagonal matrices
etam = np.diag(eta(-1j*tp*sigma*omega))
zetam = np.diag(zeta(-1j*tp*sigma*omega))
# First term
temp1 = iLamP @ LamM @ em @ LamM.T @ iLamP.T
G1 = -etam @ temp1 @ np.linalg.inv(np.eye(n) - em@temp1) @ etam
# Second term
temp2 = iLamP.T @ em @ iLamP @ LamM @ em @ LamM.T
temp3 = etam @ LamM.T @ np.linalg.inv(np.eye(n) - temp2) @ iLamP.T @ etam
G2 = -(temp3 + temp3.T)
# Third term
G3 = -2*zetam - etam @ LamM.T @ \
np.linalg.inv(np.eye(n) - temp2) @ \
iLamP.T @ em @ iLamP @ LamM @ etam
Gamma = G1 + G2 + G3
Fbar = (LamP - LamM).T @ F
hp += (tp/4)**2 * np.dot(Fbar, Gamma @ Fbar)
C[i] = quad_term * np.exp(hp)
# For negative time values, correct
# for the complex conjugate
if t[i] < 0:
C[i] = np.conjugate(C[i])
return C
else:
raise ValueError('Invalid method option')
|
3844cabc58e1fa2ca4b6f4e0a0709d3e1270b6d3
| 3,640,345
|
def add_project(body):
"""
POST /api/projects
:param body:
:return:
"""
try:
return {
'title': 'Succeed to Create Project',
'detail': svcProject.add_project(body)
}, 200
except Exception as e:
raise DefaultError(title='Failed to Create Project', detail=str(e))
|
e65e72fc5a1702b3fb619012539c6695464fcf93
| 3,640,346
|
def new_client(user_id: str, session=DBSession) -> Client:
""" from user_id get a miniflux client
:param user_id: telegram chat_id
:param session: database session class
:type user_id: Union[int, str]
:raise UserNotBindError: user not bind a miniflux account
"""
session = session()
user = session.query(User).filter(User.id == user_id).first()
session.close()
if user is None:
raise UserNotBindError
return Client(SERBER_ADDR, username=user.username, password=user.password)
|
1330ca2b4ee016a9a3e593ba4668380635a3b8e5
| 3,640,347
|
from typing import Union
def create_utility_meters(
hass: HomeAssistantType,
energy_sensor: Union[VirtualEnergySensor, GroupedEnergySensor],
sensor_config: dict,
) -> list[UtilityMeterSensor]:
"""Create the utility meters"""
utility_meters = []
if not sensor_config.get(CONF_CREATE_UTILITY_METERS):
return []
meter_types = sensor_config.get(CONF_UTILITY_METER_TYPES)
for meter_type in meter_types:
name = f"{energy_sensor.name} {meter_type}"
entity_id = f"{energy_sensor.entity_id}_{meter_type}"
_LOGGER.debug("Creating utility_meter sensor: %s", name)
# Below is for BC purposes. Can be removed somewhere in the future
if AwesomeVersion(__short_version__) < "2021.10":
utility_meter = VirtualUtilityMeterSensor(
energy_sensor.entity_id, name, meter_type, entity_id
)
else:
if not DATA_UTILITY in hass.data:
hass.data[DATA_UTILITY] = {}
hass.data[DATA_UTILITY][entity_id] = {
CONF_SOURCE_SENSOR: energy_sensor.entity_id,
CONF_METER_TYPE: meter_type,
CONF_TARIFFS: [],
CONF_METER_NET_CONSUMPTION: False,
}
utility_meter = UtilityMeterSensor(
parent_meter=entity_id,
source_entity=energy_sensor.entity_id,
name=name,
meter_type=meter_type,
meter_offset=DEFAULT_OFFSET,
net_consumption=False,
)
hass.data[DATA_UTILITY][entity_id][DATA_TARIFF_SENSORS] = [utility_meter]
utility_meters.append(utility_meter)
return utility_meters
|
5374673ea8b870365b7d690c868818cd065405a1
| 3,640,348
|
def builtin_divmod(a, b):
"""Divide two numbers and take the quotient and remainder."""
aa, bb = BType.commonize(a, b)
dv, mv = divmod(aa.value, bb.value)
d = type(aa)(dv)
m = type(aa)(mv)
return (d, m)
|
2e7af62cd58e7dd647be9650e554d0a7e2896ed9
| 3,640,349
|
def format_adjacency(G: nx.Graph, adj: np.ndarray, name: str) -> xr.DataArray:
"""
Format adjacency matrix nicely.
Intended to be used when computing an adjacency-like matrix
off a graph object G.
For example, in defining a func:
```python
def my_adj_matrix_func(G):
adj = some_adj_func(G)
return format_adjacency(G, adj, "xarray_coord_name")
```
## Assumptions
1. `adj` should be a 2D matrix of shape (n_nodes, n_nodes)
1. `name` is something that is unique amongst all names used
in the final adjacency tensor.
## Parameters
- `G`: NetworkX-compatible Graph
- `adj`: 2D numpy array
- `name`: A unique name for the kind of adjacency matrix
being constructed.
Gets used in xarray as a coordinate in the "name" dimension.
## Returns
- An XArray DataArray of shape (n_nodes, n_nodes, 1)
"""
expected_shape = (len(G), len(G))
if adj.shape != expected_shape:
raise ValueError(
"Adjacency matrix is not shaped correctly, "
f"should be of shape {expected_shape}, "
f"instead got shape {adj.shape}."
)
adj = np.expand_dims(adj, axis=-1)
nodes = list(G.nodes())
return xr.DataArray(
adj,
dims=["n1", "n2", "name"],
coords={"n1": nodes, "n2": nodes, "name": [name]},
)
|
e1ebe0bc1a42df03e5cc0a94cd600f8c937fedb4
| 3,640,350
|
def batch_local_stats_from_coords(coords, mask):
"""
Given neighborhood neighbor coordinates, compute bond distances,
2-hop distances, and angles in local neighborhood (this assumes
the central atom has coordinates at the origin)
"""
one_hop_ds, two_dop_d_mat = batch_distance_metrics_from_coords(coords, mask)
angles = batch_angles_from_coords(coords, mask)
return one_hop_ds, two_dop_d_mat, angles
|
d5268749bc79cc793d3476c66a44b326d96376c8
| 3,640,351
|
def resolve_sender_entities(act, lexical_distance=0):
"""
Given an Archive's activity matrix, return a dict of lists, each containing
message senders ('From' fields) that have been groups to be
probably the same entity.
"""
# senders orders by descending total activity
senders = act.sum(0).sort_values(ascending=False)
return resolve_entities(
senders, from_header_distance, threshold=lexical_distance
)
|
5e4a510f5e56d6890168e0f32f8433826914cbee
| 3,640,352
|
from typing import List
import tqdm
import torch
def ddpg(
env: gym.Env,
agent: ContinuousActorCriticAgent,
epochs: int,
max_steps: int,
buffer_capacity: int,
batch_size: int,
alpha: float,
gamma: float,
polyak: float,
act_noise: float,
verbose: bool,
) -> List[float]:
"""Trains an agent using Deep Deterministic Policy Gradients algorithm
:param env: The environment to train the agent in
:type env: gym.Env
:param agent: The agent to train
:type agent: ContinuousActorCriticAgent
:param epochs: The number of epochs to train the agent for
:type epochs: int
:param max_steps: The max number of steps per episode
:type max_steps: int
:param buffer_capacity: Max capacity of the experience replay buffer
:type buffer_capacity: int
:param batch_size: Batch size to use of experiences from the buffer
:type batch_size: int
:param gamma: The discount factor
:type gamma: float
:param alpha: The learning rate
:type alpha: float
:param polyak: Interpolation factor in polyak averaging for target networks
:type polyak: float
:param act_noise: Standard deviation for Gaussian exploration noise added to policy at training time
:type act_noise: float
:param verbose: Whether to run in verbose mode or not
:type verbose: bool
:return: The total reward per episode
:rtype: List[float]
"""
pi_optimizer = optim.Adam(agent.pi.parameters(), lr=alpha)
q_optimizer = optim.Adam(agent.q.parameters(), lr=alpha)
target_pi = deepcopy(agent.pi).to(device)
target_q = deepcopy(agent.q).to(device)
experience_buf = Buffer(buffer_capacity)
total_rewards = []
for _ in tqdm(range(epochs), disable=not verbose):
s = torch.from_numpy(env.reset()).float()
done = False
reward = 0.0
steps = 0
while not done and steps < max_steps:
# Collect and save experience from the environment
# Add Gaussian noise to the action for exploration
a = agent.act(s) + torch.normal(mean=0.0, std=act_noise, size=(1,))
s_prime, r, done, _ = env.step(a)
s_prime = torch.from_numpy(s_prime).float()
reward += r
experience_buf.save(Experience(s, a, r, s_prime, done))
# Learn from previous experiences
experiences = experience_buf.get(batch_size)
loss = 0.0
states = torch.stack([e.state for e in experiences]).to(device)
actions = torch.stack([e.action for e in experiences]).to(device)
rewards = [e.reward for e in experiences]
next_states = torch.stack([e.next_state for e in experiences]).to(device)
dones = [e.done for e in experiences]
q_values = agent.q(torch.cat([states, actions], dim=-1))
next_qvalues = target_q(torch.cat([next_states, target_pi(next_states)], dim=-1))
# Keep a copy of the current Q-values to be used for the TD targets
td_targets = q_values.clone()
# Compute TD targets
for index in range(batch_size):
# Terminal states do not have a future value
if dones[index]:
next_qvalues[index] = 0.0
td_targets[index] = rewards[index] + gamma * next_qvalues[index]
# Compute TD error and loss (MSE)
loss = (td_targets - q_values) ** 2
loss = loss.mean()
# Update the value function
q_optimizer.zero_grad()
loss.sum().backward()
q_optimizer.step()
# Update the policy
# We use the negative loss because policy optimization is done using gradient _ascent_
# This is because in policy gradient methods, the "loss" is a performance measure that is _maximized_
loss = -agent.q(torch.cat([states, agent.pi(states)], dim=-1))
loss = loss.mean()
pi_optimizer.zero_grad()
loss.backward()
pi_optimizer.step()
# Update target networks with polyak averaging
with torch.no_grad():
for target_p, p in zip(target_pi.parameters(), agent.pi.parameters()):
target_p.copy_(polyak * target_p + (1.0 - polyak) * p)
with torch.no_grad():
for target_p, p in zip(target_q.parameters(), agent.q.parameters()):
target_p.copy_(polyak * target_p + (1.0 - polyak) * p)
s = s_prime
steps += 1
total_rewards.append(reward)
return total_rewards
|
688bd6ed521e476ac67017dd5781f1d337326e0c
| 3,640,353
|
import os
import sys
def import_module_from_path(full_path, global_name):
"""
Import a module from a file path and return the module object.
Allows one to import from anywhere, something ``__import__()`` does not do.
The module is added to ``sys.modules`` as ``global_name``.
:param full_path:
The absolute path to the module .py file
:param global_name:
The name assigned to the module in sys.modules. To avoid
confusion, the global_name should be the same as the variable to which
you're assigning the returned module.
"""
path, filename = os.path.split(full_path)
module, ext = os.path.splitext(filename)
sys.path.append(path)
try:
mymodule = __import__(module)
sys.modules[global_name] = mymodule
except ImportError:
raise ImportError('Module could not be imported from %s.' % full_path)
finally:
del sys.path[-1]
return mymodule
|
d7f73dce4e51715e79a71616cf509f86c8106f27
| 3,640,354
|
def civic_eid26_statement():
"""Create a test fixture for CIViC EID26 statement."""
return {
"id": "civic.eid:26",
"description": "In acute myloid leukemia patients, D816 mutation is associated with earlier relapse and poorer prognosis than wildtype KIT.", # noqa: E501
"direction": "supports",
"evidence_level": "civic.evidence_level:B",
"proposition": "proposition:001",
"variation_origin": "somatic",
"variation_descriptor": "civic.vid:65",
"disease_descriptor": "civic.did:3",
"method": "method:001",
"supported_by": ["pmid:16384925"],
"type": "Statement"
}
|
bdad5e8d5f6fe063d43bb600bf4158fadc1f38ca
| 3,640,355
|
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# Use a dummy metaclass that replaces itself with the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, '_TemporaryClass', (), {})
|
eed3c63b6f86f1f3154449e32d94b396a519d523
| 3,640,356
|
import re
def validate_user(username, minlen):
"""Checks if the received username matches the required conditions."""
if type(username) != str:
raise TypeError("username must be a string")
if minlen < 1:
raise ValueError("minlen must be at least 1")
"""
Username should not be shorter than minlen
Username should always starts with letter and should consists of letters, numbers, dots and underscore
"""
if (len(username) < minlen):
return False
if not re.match(r'^[a-z][a-z0-9._]*$', username): # made changes in Regex
return False
return True
|
7d7ad86eccba2639158a9f5da9fb093f9f4abff9
| 3,640,357
|
def neg_mae_macro(y_trues, y_preds, labels, topics):
"""
As for absolute error, lower is better
Thus use negative value in order to share the same interface when tuning
dev data with other metrics
"""
return -mae_macro(y_trues, y_preds, labels, topics)
|
4e2a3df557e97dc49e1377d1006c58348c34bdaf
| 3,640,358
|
import os
import stat
import subprocess
import json
def transcribe_from_google(tmp_dir):
"""
Transcribes assets in given tmp directory into text assets via Google Cloud Transcribe
"""
def tmp(path): return os.path.join(tmp_dir, path)
script = "#!/bin/bash\n \
export GOOGLE_APPLICATION_CREDENTIALS=~/.gcloud/gcloud-alexa-cli.json \n \
export ACCESS_TOKEN=`gcloud auth application-default print-access-token` \n \
echo $ACCESS_TOKEN \n \
"
with open(tmp("google-token.sh"), 'w') as fw:
fw.write(script)
# feeling dirty...
st = os.stat(tmp("google-token.sh"))
os.chmod(tmp("google-token.sh"), st.st_mode | stat.S_IEXEC)
p = subprocess.Popen("./google-token.sh", shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=tmp_dir)
stdout, stderr = p.communicate()
log("transcribe_from_google: executed google-token script, result: %s", p.returncode)
if p.returncode != 0:
log("transcribe_from_google: ERROR with google-token! %s", stderr)
return
token = str(stdout.strip())
token = token[2:len(token)-1]
log("transcribe_from_google: token is %s", token)
request_content = """{
"config": {
"encoding":"FLAC",
"sampleRateHertz": 16000,
"languageCode": "en-US",
"enableWordTimeOffsets": false
},
"audio": {
"content":"%s" } }""" % (open(tmp("result.base64"), 'r').read())
with open(tmp("request-transcribe.json"), 'w') as transcribe_write:
transcribe_write.write(request_content)
script = """#!/bin/bash
curl -s -H "Content-Type: application/json"\\
-H "Authorization: Bearer %s"\\
https://speech.googleapis.com/v1/speech:recognize \\
-d@request-transcribe.json > transcript-output.json
""" % (token)
with open(tmp("google-transcribe.sh"), 'w') as fw:
fw.write(script)
# feeling dirty...
st = os.stat(tmp("google-transcribe.sh"))
os.chmod(tmp("google-transcribe.sh"), st.st_mode | stat.S_IEXEC)
p = subprocess.Popen("./google-transcribe.sh", shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=tmp_dir)
stdout, stderr = p.communicate()
log("transcribe_from_google: executed google-transcribe script, result: %s", p.returncode)
if not os.path.exists(tmp("transcript-output.json")):
log("transcribe_from_google: Could not find transcript-output.json")
return
transcript = json.load(open(tmp("transcript-output.json"), 'r'))
if transcript == None or "results" not in transcript:
log("transcribe_from_google: No results from transcription")
return
log("transcribe_from_google: returning transcript text from %s", transcript)
text = transcript["results"][0]["alternatives"][0]["transcript"]
log("transcribe_from_google: %s", text)
return text
|
41d1a1fc4ceef9e0e3fa7171d1de3e9df4886d42
| 3,640,359
|
def clean_data(df):
"""Cleans the a dataset provided as a DataFrame and returns the cleaned DataFrame.
Cleaning includes expanding the categories and cleaning them up.
Args:
df (DataFrame): Data, containing categories as a single column, as well as messages
Returns:
DataFrame: Cleaned DataFrame
"""
# Prepare data
categories = df.categories.str.split(';', expand = True)
row = categories.loc[0]
category_colnames = [x[:-2] for x in row]
categories.columns = category_colnames
for column in categories:
categories[column] = categories[column].str.slice(start=-1)
categories[column] = categories[column].astype(int)
df.drop(columns='categories', inplace=True)
df = pd.merge(left= df, right=categories, left_on=df.index, right_on=categories.index).drop('key_0', axis=1)
# Remove duplicates
df.drop_duplicates(subset='id', inplace=True)
# Remove rows that have a 2 in related, as this is assumed to be faulty data
implausible_related_count = (df['related'] == 2).sum()
df = df.loc[df.related != 2]
print(f'Dropped {implausible_related_count} faulty messages.')
return df
|
1a8552a0ea99691ea94397737ac64f5c9261f66d
| 3,640,360
|
from typing import Mapping
from typing import Any
from typing import Optional
def _validate_float(mapping: Mapping[str, Any],
ref: str) -> Optional[SchemaError]:
"""
Validate the definition of a float value.
:param mapping: representing the type definition to be validated
:param ref: reference to the type definition
:return: error, if any
"""
if 'minimum' in mapping and 'maximum' in mapping:
minimum = mapping['minimum']
maximum = mapping['maximum']
if minimum > maximum:
return SchemaError(
"minimum (== {}) > maximum".format(minimum), ref=ref)
excl_min = False if 'exclusive_minimum' not in mapping \
else bool(mapping['exclusive_minimum'])
excl_max = False if 'exclusive_maximum' not in mapping \
else bool(mapping['exclusive_maximum'])
if excl_min and excl_max:
if minimum == maximum:
return SchemaError(
message=(
"minimum (== {}) == maximum and "
"both are set to exclusive").format(minimum),
ref=ref)
elif not excl_min and excl_max:
if minimum == maximum:
return SchemaError((
"minimum (== {}) == maximum and "
"maximum is set to exclusive").format(minimum),
ref=ref)
elif excl_min and not excl_max:
if minimum == maximum:
return SchemaError((
"minimum (== {}) == maximum and "
"maximum is set to exclusive").format(minimum),
ref=ref)
elif not excl_min and not excl_max:
# If minimum == maximum it is ok to have
# >= minimum and <= maximum as a constraint.
pass
else:
raise AssertionError("Unexpected code path")
return None
|
41c4725a66621addd6164a97549c35d47b1be27f
| 3,640,361
|
import typing
def tokenize_document(document: str) -> typing.List[str]:
"""
Helper method to tokenize the document.
:param document: The input document represented as a string.
:return: A list of tokens.
"""
try:
return nltk.tokenize.word_tokenize(document)
except LookupError:
nltk.download('punkt')
return nltk.tokenize.word_tokenize(document)
|
0380efbb2f243b14135b3232d9ae22158ba14747
| 3,640,362
|
def get_type_name_value(obj):
"""
Returns object type name from LLDB value.
It returns type name with asterisk if object is a pointer.
:param lldb.SBValue obj: LLDB value object.
:return: Object type name from LLDB value.
:rtype: str | None
"""
return None if obj is None else obj.GetTypeName()
|
c87a5acf7d8ef794eab97c90b82bbd9574fb0e2b
| 3,640,363
|
def fastqprint(fastq):
"""
Printing a fastq file
"""
for record in SeqIO.parse(fastq, "fastq"):
print("%s %s" % (record.id, record.seq))
return seq1.reverse_complement()
|
9197da0e9c73f46b5aee8613de434e173a701fd0
| 3,640,364
|
from typing import List
def encodePartList( part_instance: ObjectInstance,
vh_group_list: List[int]) -> dict:
""" Used for copying and pasting
TODO: unify encodePart and encodePartList
Args:
part_instance: The ``Part`` ``ObjectInstance``, to allow for instance
specific property copying
vh_group_list: List of virtual_helices IDs to encode to
be used with copy and paste serialization
Returns:
Dictionary representing the virtual helices with ordered lists of
properties, strands, etc to allow for copy and pasting becoming
different ID'd virtual helices
"""
part = part_instance.reference()
vh_group_list.sort()
# max_id_number_of_helices = part.getMaxIdNum()
# vh_insertions = part.insertions()
'''NOTE This SHOULD INCLUDE 'grid_type' key
'''
group_props = part.getModelProperties().copy()
assert('grid_type' in group_props)
if not group_props.get('is_lattice', True):
vh_props, origins, directions = part.helixProperties()
group_props['virtual_helices'] = vh_props
group_props['origins'] = origins
group_props['directions'] = directions
else:
vh_props, origins, directions = part.helixProperties(vh_group_list)
group_props['virtual_helices'] = vh_props
group_props['origins'] = origins
group_props['directions'] = directions
xover_list = []
strand_list = []
prop_list = []
vh_list = []
vh_group_set = set(vh_group_list)
def filter_xovers(x):
return (x[0] in vh_group_set and x[3] in vh_group_set)
def filter_vh(x):
return x[0] in vh_group_set
for id_num in vh_group_list:
offset_and_size = part.getOffsetAndSize(id_num)
if offset_and_size is None:
# add a placeholder
strand_list.append(None)
prop_list.append(None)
else:
offset, size = offset_and_size
vh_list.append((id_num, size))
fwd_ss, rev_ss = part.getStrandSets(id_num)
fwd_idxs, fwd_colors = fwd_ss.dump(xover_list)
rev_idxs, rev_colors = rev_ss.dump(xover_list)
strand_list.append((fwd_idxs, rev_idxs))
prop_list.append((fwd_colors, rev_colors))
# end for
remap = {x: y for x, y in zip(vh_group_list,
range(len(vh_group_list))
)}
group_props['vh_list'] = vh_list
group_props['strands'] = {'indices': strand_list,
'properties': prop_list
}
filtered_insertions = filter(filter_vh, part.dumpInsertions())
group_props['insertions'] = [(remap[x], y, z) for x, y, z in filtered_insertions]
filtered_xover_list = filter(filter_xovers, xover_list)
group_props['xovers'] = [(remap[a], b, c, remap[x], y, z)
for a, b, c, x, y, z in filtered_xover_list]
instance_props = part_instance.properties()
group_props['instance_properties'] = instance_props
vh_order = filter(lambda x: x in vh_group_set, group_props['virtual_helix_order'])
vh_order = [remap[x] for x in vh_order]
group_props['virtual_helix_order'] = vh_order
external_mods_instances = filter(filter_vh,
part.dumpModInstances(is_internal=False))
group_props['external_mod_instances'] = [(remap[w], x, y, z)
for w, x, y, z in external_mods_instances]
""" TODO Add in Document modifications
"""
return group_props
|
367cdab1ff71104655b93744199cea1b4f822bc8
| 3,640,365
|
import numpy as np
import joblib
import os
def get_charges_single_serial(path_to_cif, create_cif=False, path_to_output_dir='.', add_string='_charged',
use_default_model=True, path_to_pickle_obj='dummy_string'):
""" Description
Computes the partial charges for a single CIF file and returns an ASE atoms object updated with the estimated charges
included as atoms.info['_atom_site_charges']. Features for each CIF is calculated in serial using Numpy.
Options are included for using a different pickled sklearn model and for write an output CIF with the new charges.
:type path_to_cif: string
:param path_to_cif: path to the cif file as input`
:type create_cif: bool
:param create_cif: whether to output a new CIF file while '_atom_site_charges' added
:type path_to_output_dir: string
:param path_to_output_dir: path to the output directory for creating the new CIF file.
:type add_string: string
:param add_string: A string added to the filename to distinguish the output cif file from the original one.
:type use_default_model: bool
:param use_default_model: whether to use the pre-trained model or not. If set to False you can set path to a different pickle file using 'path_to_pickle_obj'.
:type path_to_pickle_obj: string
:param path_to_pickle_obj: path to a pickle file containing the scikit-learn model one wants to use. Is used only if use_default_model is set to False.
:raises:
:rtype: an ase atoms object with the partial charges added as atoms.info['_atom_site_charges'] and the feature vectors in atoms.info['features']
"""
# * Get the path of the pickle and load the model
print("Loading the model...")
if use_default_model:
this_dir, this_filename = os.path.split(__file__)
path_to_pickle_obj = os.path.join(this_dir, "data", "Model_RF_DDEC.pkl")
# print(path_to_pickle_obj)
model = joblib.load(path_to_pickle_obj)
else:
model = joblib.load(path_to_pickle_obj)
# print("Computing features...")
data = get_features_from_cif_serial(path_to_cif)
features = data.info['features']
print("Estimating charges for {}...".format(path_to_cif))
charges = model.predict(features)
# charges = np.round(charges, decimals=4)
# * Adjust the charges for neutrality
charges_adj = charges - np.sum(charges) * np.abs(charges) / np.sum(np.abs(charges))
data.info['_atom_site_charge'] = charges_adj.tolist()
if np.any(np.abs(charges - charges_adj) > 0.2):
print("WARNING: Some charges were adjusted by more than 0.2 to maintain neutrality!")
# if write_cif==True:
if create_cif:
print('Writing new cif file...')
path_to_cif = os.path.abspath(path_to_cif)
old_name = os.path.basename(path_to_cif)
new_name = old_name.split('.')[-2] + add_string + '.cif'
# data1 = data_all[i]
# new_filename = path_to_cif.split('.')[-2].split('\\')[-1]+add_string+ '.cif'
path_to_output_dir = os.path.abspath(path_to_output_dir)
path_to_output_cif = os.path.join(path_to_output_dir, new_name)
write_cif(path_to_output_cif, data)
return data
|
514ac6a3f1a0cd502761ebad681d24aab5f971ef
| 3,640,366
|
from typing import Match
def matchlist(page=1):
"""Respond with view for paginated match list."""
query = Match.query.order_by(Match.id.desc())
paginatedMatches = query.paginate(page, current_app.config['MATCHES_PER_PAGE'], False)
return render_template('matchlist.html', matches=paginatedMatches.items, pagination=paginatedMatches)
|
e9f082e6acb513636b9db98996997991efbb79d8
| 3,640,367
|
import uuid
import os
import json
def post_page_files(current_user, pid):
""" Изменение файлов страницы"""
try:
page = SitePages.query.get(pid)
if request.files.getlist('file[]'):
page_files = request.files.getlist('file[]')
na_files = []
for pfile in page_files:
fsize_b = get_fsize(pfile)
# Еще можно считывать файл в память до заданного размера
# Но появляется проблема при записи файла,
# так как он уже считан
# MAX_FILE_SIZE = 1024 * 1024 + 1
# file_bytes = file.read(MAX_FILE_SIZE)
fsize_mb = formatBytes(fsize_b, power=2)['number']
if ((pfile.content_type not in [
'application/vnd.openxmlformats-officedocument'
'.wordprocessingml.document',
'application/vnd.oasis.opendocument.text',
'application/pdf',
'application/zip',
'application/msword'
]) or (fsize_mb > 10)):
na_files.append("«" + pfile.filename + "»")
else:
fsize = formatBytes(fsize_b)
extension = pfile.filename.split(".")[-1]
separator = ' '
new_file_name = uuid.uuid1().hex + '.' + extension
ud = {
"fid": str(uuid.uuid4().hex),
"name": separator.join(pfile.filename.split(".")[:-1]),
"size": str(fsize['number']) + ' ' + fsize['measure'],
"fname": new_file_name,
"extension": extension
}
page.files.append(ud)
pfile.save(
os.path.join(
current_app.config['CMS_PAGE_FILES'],
new_file_name))
flag_modified(page, 'files')
db.session.commit()
rtext = 'Файлы добавлены!'
rtype = 'success'
if na_files:
rtext = 'Файлы добавлены, но файлы: '
separator = ', '
rtext = rtext + separator.join(na_files)
rtext = rtext + ' были проигнорированы, т.к. ' \
'либо превышен размер, либо не подходящий формат файла.'
rtype = 'warning'
response = Response(
response=json.dumps({'type': rtype,
'text': rtext}),
status=200,
mimetype='application/json'
)
else:
response = Response(
response=json.dumps({'type': 'danger',
'text': 'Вы не отправили'
' ни одного файла!'}),
status=422,
mimetype='application/json'
)
return response
except Exception:
response = server_error(request.args.get("dbg"))
return response
|
8e9ebb1f01a6c66aa2e42f72e81513c12ff3987e
| 3,640,368
|
def get_data(filename: str) -> pd.DataFrame:
""" Create a dataframe out of south_sudan_data.csv """
df = pd.read_csv(filename)
return df
|
bae9149ff8094abe916c0744c5f42735c5ee84ba
| 3,640,369
|
def digital_PCR( primer_mappings ):
"""
Makes a "digital" PCR by looking at the mappings of primers and
predict which will produce products, and more important multiple
products
"""
primer_names = sorted(primer_mappings.keys())
nr_primer_names = len( primer_names )
mappings = {}
products = {}
# for i in range(0, nr_primer_names):
for primer1 in primer_names:
# primer1 = primer_names[ i ]
if ( primer1 == 'FULLSEQ'):
continue
if ( not re.search(r'LEFT', primer1 )):
continue
mappings[ primer1 ] = {}
products[ primer1 ] = {}
for primer2 in primer_names:
# for j in range(0, nr_primer_names):
# primer2 = primer_names[ j ]
if ( primer2 == 'FULLSEQ'):
continue
if ( not re.search(r'RIGHT', primer2 )):
continue
mappings[ primer1 ][ primer2 ] = []
products[ primer1 ][ primer2 ] = []
multiple_products = 0
# print " -- %s vs %s" % (primer1, primer2)
for chr_index1 in range(0, len(primer_mappings[ primer1 ][ 'CHR' ])):
for chr_index2 in range(0, len(primer_mappings[ primer2 ][ 'CHR' ])):
chr1 = primer_mappings[ primer1 ][ 'CHR' ][ chr_index1 ]
chr2 = primer_mappings[ primer2 ][ 'CHR' ][ chr_index2 ]
pos1 = int( primer_mappings[ primer1 ][ 'POS' ][ chr_index1 ] )
pos2 = int( primer_mappings[ primer2 ][ 'POS' ][ chr_index2 ] )
strand1 = primer_mappings[ primer1 ][ 'STRAND' ][ chr_index1 ]
strand2 = primer_mappings[ primer2 ][ 'STRAND' ][ chr_index2 ]
# The primers map to different chromosomes
if ( chr1 != chr2 ):
continue
# the primer are on the same strand.
if ( strand1 == strand2 ):
continue
# Calculate the product size, and check if it is in a doable range
product_size = ( pos2 - pos1 )
if ( product_size < 0 or product_size > config.MAX_PRODUCT_SIZE):
continue
# Make sure that the strand is in the right orientation.
if ( pos1 < pos2 and strand1 != 'plus' and strand2 != 'minus'):
continue
elif( pos1 > pos2 and strand1 != 'minus' and strand2 != 'plus'):
continue
print "%s -- %s %s:%d:%s -> %s:%d:%s ==>> %d bp" %( primer1, primer2, chr1, pos1, strand1, chr2, pos2, strand2, product_size)
mappings[ primer1 ][ primer2 ].append( product_size )
products[ primer1 ][ primer2 ].append( {'chr' : chr1, 'start_pos': pos1, 'end_pos': pos2, 'size': product_size} )
if ( len(products[ primer1 ][ primer2 ]) > 5):
print "Got more than 5 hits... %s -- %s " % ( primer1, primer2 )
multiple_products = 1
break
if ( multiple_products ):
break
# pp.pprint( products )
# pp.pprint( mappings )
return products
|
296b69fd5eaf1fb95afc2fb07dd99e97d715376f
| 3,640,370
|
def database_find_user_salt(username:str)->str:
"""
Finds a users salt from there username
Parameter:
username (str): username selected by the user
Returns:
salt (str): The users salt from the database
Example:
>>> username = 'andrew'
>>> database_find_user_salt(username)
'0fYst66bDGTBi97El1rOzdbP0su8NOoAqNyYuekUb4Rav9WyYw6zOtjTqzhTHcn'
"""
database_cursor.execute(
"SELECT salt FROM passwords WHERE username=:username",
{"username": username})
return database_cursor.fetchone()[0]
|
2c74e943a650a74eb6a7b71a7ac2e677891dbd63
| 3,640,371
|
from .. import sim
def createSimulate(netParams=None, simConfig=None, output=False):
"""
Function for/to <short description of `netpyne.sim.wrappers.createSimulate`>
Parameters
----------
netParams : <``None``?>
<Short description of netParams>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
simConfig : <``None``?>
<Short description of simConfig>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
output : bool
<Short description of output>
**Default:** ``False``
**Options:** ``<option>`` <description of option>
"""
(pops, cells, conns, stims, rxd, simData) = sim.create(netParams, simConfig, output=True)
sim.simulate()
if output: return (pops, cells, conns, stims, simData)
|
399866b8f0a2fd39235526c471327a9cf042603e
| 3,640,372
|
def lang_add(cursor, lang, trust):
"""Adds language for db"""
if trust:
query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
else:
query = 'CREATE LANGUAGE "%s"' % lang
cursor.execute(query)
return True
|
f5a1ac9264efca070b4528505ee6bee6892b3e80
| 3,640,373
|
def interpolate(
a_x, a_q2, padded_x, s_x, padded_q2, s_q2, actual_padded,
):
"""
Basic Bicubic Interpolation inside the subgrid
Four Neighbour Knots selects grid knots around each query point to
make the interpolation: 4 knots on the x axis and 4 knots on the q2
axis are needed for each point, plus the pdf fvalues there.
Default bicubic interpolation performs the interpolation itself
Parameters
----------
a_x: tf.tensor of shape [None]
query of values of log(x)
a_q2: tf.tensor of shape [None]
query of values of log(q2)
padded_x: tf.tensor of shape [None]
value for all the knots on the x axis
padded with one zero at the beginning and one at the end to
avoid out of range errors when queryingpoints near boundaries
s_x: tf.tensor of shape []
size of x knots tensor without padding
padded_q2: tf.tensor of shape [None]
value for all the knots on the q2 axis
padded with one zero at the beginning and one at the end to
avoid out of range errors when querying points near boundaries
s_q2: tf.tensor of shape []
size of q2 knots tensor without padding
actual_padded: tf.tensor of shape [None,None]
pdf values: first axis is the flattened padded (q2,x) grid,
second axis is needed pid column (dimension depends on the query)
"""
x_bins, q2_bins, corn_x, corn_q2, pdf_vals = four_neighbour_knots(
a_x, a_q2, padded_x, padded_q2, actual_padded
)
return default_bicubic_interpolation(
a_x, a_q2, x_bins, q2_bins, corn_x, corn_q2, pdf_vals, s_x, s_q2
)
|
5e5ebda28acdc56a80eca102b39d15aca29ac648
| 3,640,374
|
from re import T
def setting():
""" SMS settings for the messaging framework """
tablename = "%s_%s" % (module, resourcename)
table = s3db[tablename]
table.outgoing_sms_handler.label = T("Outgoing SMS handler")
table.outgoing_sms_handler.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("Outgoing SMS Handler"),
T("Selects what type of gateway to use for outbound SMS"))))
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_update = T("Edit SMS Settings"),
msg_record_modified = T("SMS settings updated")
)
def prep(r):
if r.http == "POST":
# Go to the details page for the chosen SMS Gateway
outgoing_sms_handler = request.post_vars.get("outgoing_sms_handler",
None)
if outgoing_sms_handler == "WEB_API":
s3mgr.configure(tablename,
update_next = URL(f="api_settings",
args=[1, "update"]))
elif outgoing_sms_handler == "SMTP":
s3mgr.configure(tablename,
update_next = URL(f="smtp_to_sms_settings",
args=[1, "update"]))
elif outgoing_sms_handler == "MODEM":
s3mgr.configure(tablename,
update_next = URL(f="modem_settings",
args=[1, "update"]))
elif outgoing_sms_handler == "TROPO":
s3mgr.configure(tablename,
update_next = URL(f="tropo_settings",
args=[1, "update"]))
else:
s3mgr.configure(tablename,
update_next = URL(args=[1, "update"]))
return True
response.s3.prep = prep
s3mgr.configure(tablename,
deletable=False,
listadd=False)
#response.menu_options = admin_menu_options
return s3_rest_controller()
|
0ecdb50499f22eb88e8a22d8295928c6208cff45
| 3,640,375
|
from typing import Callable
def _cachegetter(
attr: str,
cachefactory: Callable[[], _CacheT] = WeakKeyDictionary, # WeakKewDict best for properties
) -> Callable[[_CIT], _CacheT]:
"""Returns a safer attrgetter which constructs the missing object with cachefactory
May be used for normal methods, classmethods and properties, as default
factory is a WeakKeyDictionary (good for storing weak-refs for self or cls).
It may also safely be used with staticmethods, if first parameter is an object
on which the cache will be stored.
Better when used with key getter. If it's a tuple, you should use e.g. cachefactory=dict
Example usage with cachetools.cachedmethod:
class Foo:
@property
@cachedmethod(_cachegetter("__bar_cache"))
def bar(self) -> _RT:
return 2+3
"""
def cachegetter(cls_or_obj: _CIT) -> _CacheT:
cache = getattr(cls_or_obj, attr, None)
if cache is None:
cache = cachefactory()
setattr(cls_or_obj, attr, cache)
return cache
return cachegetter
|
b9d0d8d6ed1a2d3d9a2500326c996af94726ddc4
| 3,640,376
|
from pathlib import Path
import shutil
import json
import os
def change_db_path(new_path: Path, cfg: TodoConfig) -> ErrMsg:
"""new_path 是一个不存在的文件或一个已存在的文件夹,不能是一个已存在的文件"""
new_path = new_path.resolve()
if new_path.is_dir():
new_path = new_path.joinpath(todo_db_name)
if new_path.exists():
return f"{new_path} already exists."
old_path = cfg["db_path"]
shutil.copyfile(old_path, new_path)
cfg["db_path"] = new_path.__str__()
with open(todo_cfg_path, "w", encoding="utf-8") as f:
json.dump(cfg, f, indent=4, ensure_ascii=False)
os.remove(old_path)
return ""
|
38eb300b946a6afe72fca79380f9e5ccb6a68d0c
| 3,640,377
|
def format_time(time):
""" It formats a datetime to print it
Args:
time: datetime
Returns:
a formatted string representing time
"""
m, s = divmod(time, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
return ('{:02d}d {:02d}h {:02d}m {:02d}s').format(int(d), int(h), int(m), int(s))
|
67c6404cbc5076358f9e85dc169e1d7b976b7d60
| 3,640,378
|
def egarch_recursion_python(
parameters: Float64Array,
resids: Float64Array,
sigma2: Float64Array,
p: int,
o: int,
q: int,
nobs: int,
backcast: float,
var_bounds: Float64Array,
lnsigma2: Float64Array,
std_resids: Float64Array,
abs_std_resids: Float64Array,
) -> Float64Array:
"""
Compute variance recursion for EGARCH models
Parameters
----------
parameters : ndarray
Model parameters
resids : ndarray
Residuals to use in the recursion
sigma2 : ndarray
Conditional variances with same shape as resids
p : int
Number of symmetric innovations in model
o : int
Number of asymmetric innovations in model
q : int
Number of lags of the (transformed) variance in the model
nobs : int
Length of resids
backcast : float
Value to use when initializing the recursion
var_bounds : 2-d array
nobs by 2-element array of upper and lower bounds for conditional
variances for each time period
lnsigma2 : ndarray
Temporary array (overwritten) with same shape as resids
std_resids : ndarray
Temporary array (overwritten) with same shape as resids
abs_std_resids : ndarray
Temporary array (overwritten) with same shape as resids
"""
for t in range(nobs):
loc = 0
lnsigma2[t] = parameters[loc]
loc += 1
for j in range(p):
if (t - 1 - j) >= 0:
lnsigma2[t] += parameters[loc] * (
abs_std_resids[t - 1 - j] - SQRT2_OV_PI
)
loc += 1
for j in range(o):
if (t - 1 - j) >= 0:
lnsigma2[t] += parameters[loc] * std_resids[t - 1 - j]
loc += 1
for j in range(q):
if (t - 1 - j) < 0:
lnsigma2[t] += parameters[loc] * backcast
else:
lnsigma2[t] += parameters[loc] * lnsigma2[t - 1 - j]
loc += 1
if lnsigma2[t] > LNSIGMA_MAX:
lnsigma2[t] = LNSIGMA_MAX
sigma2[t] = np.exp(lnsigma2[t])
if sigma2[t] < var_bounds[t, 0]:
sigma2[t] = var_bounds[t, 0]
lnsigma2[t] = np.log(sigma2[t])
elif sigma2[t] > var_bounds[t, 1]:
sigma2[t] = var_bounds[t, 1] + np.log(sigma2[t]) - np.log(var_bounds[t, 1])
lnsigma2[t] = np.log(sigma2[t])
std_resids[t] = resids[t] / np.sqrt(sigma2[t])
abs_std_resids[t] = np.abs(std_resids[t])
return sigma2
|
74478c42d28a50a873834d6eb8207cc756d5fc03
| 3,640,379
|
def polpair_tuple2int(polpair, x_orientation=None):
"""
Convert a tuple pair of polarization strings/integers into
an pol-pair integer.
The polpair integer is formed by adding 20 to each standardized
polarization integer (see polstr2num and AIPS memo 117) and
then concatenating them. For example, polarization pair
('pI', 'pQ') == (1, 2) == 2122.
Parameters
----------
polpair : tuple, length 2
A length-2 tuple containing a pair of polarization strings
or integers, e.g. ('XX', 'YY') or (-5, -5).
x_orientation: str, optional
Orientation in cardinal direction east or north of X dipole.
Default keeps polarization in X and Y basis.
Returns
-------
polpair : int
Integer representation of polarization pair.
"""
# Recursive evaluation
if isinstance(polpair, (list, np.ndarray)):
return [polpair_tuple2int(p) for p in polpair]
# Check types
assert type(polpair) in (tuple,), "pol must be a tuple"
assert len(polpair) == 2, "polpair tuple must have 2 elements"
# Convert strings to ints if necessary
pol1, pol2 = polpair
if type(pol1) in (str, np.str): pol1 = polstr2num(pol1, x_orientation=x_orientation)
if type(pol2) in (str, np.str): pol2 = polstr2num(pol2, x_orientation=x_orientation)
# Convert to polpair integer
ppint = (20 + pol1)*100 + (20 + pol2)
return ppint
|
ac31db32b26a4abe8151f72409467d2a9db2d0b6
| 3,640,380
|
import warnings
def compute_features(df):
"""Compute ReScore features."""
preds_dict = df_to_dict(df)
rescore_features = []
spec_ids = []
charges = []
feature_names = [
"spec_pearson_norm",
"ionb_pearson_norm",
"iony_pearson_norm",
"spec_mse_norm",
"ionb_mse_norm",
"iony_mse_norm",
"min_abs_diff_norm",
"max_abs_diff_norm",
"abs_diff_Q1_norm",
"abs_diff_Q2_norm",
"abs_diff_Q3_norm",
"mean_abs_diff_norm",
"std_abs_diff_norm",
"ionb_min_abs_diff_norm",
"ionb_max_abs_diff_norm",
"ionb_abs_diff_Q1_norm",
"ionb_abs_diff_Q2_norm",
"ionb_abs_diff_Q3_norm",
"ionb_mean_abs_diff_norm",
"ionb_std_abs_diff_norm",
"iony_min_abs_diff_norm",
"iony_max_abs_diff_norm",
"iony_abs_diff_Q1_norm",
"iony_abs_diff_Q2_norm",
"iony_abs_diff_Q3_norm",
"iony_mean_abs_diff_norm",
"iony_std_abs_diff_norm",
"dotprod_norm",
"dotprod_ionb_norm",
"dotprod_iony_norm",
"cos_norm",
"cos_ionb_norm",
"cos_iony_norm",
"spec_pearson",
"ionb_pearson",
"iony_pearson",
"spec_spearman",
"ionb_spearman",
"iony_spearman",
"spec_mse",
"ionb_mse",
"iony_mse",
"min_abs_diff_iontype",
"max_abs_diff_iontype",
"min_abs_diff",
"max_abs_diff",
"abs_diff_Q1",
"abs_diff_Q2",
"abs_diff_Q3",
"mean_abs_diff",
"std_abs_diff",
"ionb_min_abs_diff",
"ionb_max_abs_diff",
"ionb_abs_diff_Q1",
"ionb_abs_diff_Q2",
"ionb_abs_diff_Q3",
"ionb_mean_abs_diff",
"ionb_std_abs_diff",
"iony_min_abs_diff",
"iony_max_abs_diff",
"iony_abs_diff_Q1",
"iony_abs_diff_Q2",
"iony_abs_diff_Q3",
"iony_mean_abs_diff",
"iony_std_abs_diff",
"dotprod",
"dotprod_ionb",
"dotprod_iony",
"cos",
"cos_ionb",
"cos_iony",
]
# Suppress RuntimeWarnings about invalid values
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for spec_id, preds in preds_dict.items():
spec_ids.append(spec_id)
charges.append(preds["charge"])
# Create numpy arrays
target_b = np.array(preds["target"]["B"])
target_y = np.array(preds["target"]["Y"])
target_all = np.concatenate([target_b, target_y])
prediction_b = np.array(preds["prediction"]["B"])
prediction_y = np.array(preds["prediction"]["Y"])
prediction_all = np.concatenate([prediction_b, prediction_y])
target_b_unlog = 2 ** target_b - 0.001
target_y_unlog = 2 ** target_y - 0.001
target_all_unlog = 2 ** target_all - 0.001
prediction_b_unlog = 2 ** prediction_b - 0.001
prediction_y_unlog = 2 ** prediction_y - 0.001
prediction_all_unlog = 2 ** prediction_all - 0.001
# Calculate absolute differences
abs_diff_b = np.abs(target_b - prediction_b)
abs_diff_y = np.abs(target_y - prediction_y)
abs_diff_all = np.abs(target_all - prediction_all)
abs_diff_b_unlog = np.abs(target_b_unlog - prediction_b_unlog)
abs_diff_y_unlog = np.abs(target_y_unlog - prediction_y_unlog)
abs_diff_all_unlog = np.abs(target_all_unlog - prediction_all_unlog)
# Add features
feats = np.array(
[
# spec_id,
# preds['charge'],
# Features between spectra in log space
pearsonr(target_all, prediction_all)[0], # Pearson all ions
pearsonr(target_b, prediction_b)[0], # Pearson b ions
pearsonr(target_y, prediction_y)[0], # Pearson y ions
mse(target_all, prediction_all), # MSE all ions
mse(target_b, prediction_b), # MSE b ions
mse(target_y, prediction_y), # MSE y ions
np.min(abs_diff_all), # min_abs_diff_norm
np.max(abs_diff_all), # max_abs_diff_norm
np.quantile(abs_diff_all, 0.25), # abs_diff_Q1_norm
np.quantile(abs_diff_all, 0.5), # abs_diff_Q2_norm
np.quantile(abs_diff_all, 0.75), # abs_diff_Q3_norm
np.mean(abs_diff_all), # mean_abs_diff_norm
np.std(abs_diff_all), # std_abs_diff_norm
np.min(abs_diff_b), # ionb_min_abs_diff_norm
np.max(abs_diff_b), # ionb_max_abs_diff_norm
np.quantile(abs_diff_b, 0.25), # ionb_abs_diff_Q1_norm
np.quantile(abs_diff_b, 0.5), # ionb_abs_diff_Q2_norm
np.quantile(abs_diff_b, 0.75), # ionb_abs_diff_Q3_norm
np.mean(abs_diff_b), # ionb_mean_abs_diff_norm
np.std(abs_diff_b), # ionb_std_abs_diff_norm
np.min(abs_diff_y), # iony_min_abs_diff_norm
np.max(abs_diff_y), # iony_max_abs_diff_norm
np.quantile(abs_diff_y, 0.25), # iony_abs_diff_Q1_norm
np.quantile(abs_diff_y, 0.5), # iony_abs_diff_Q2_norm
np.quantile(abs_diff_y, 0.75), # iony_abs_diff_Q3_norm
np.mean(abs_diff_y), # iony_mean_abs_diff_norm
np.std(abs_diff_y), # iony_std_abs_diff_norm
np.dot(target_all, prediction_all), # Dot product all ions
np.dot(target_b, prediction_b), # Dot product b ions
np.dot(target_y, prediction_y), # Dot product y ions
np.dot(target_all, prediction_all)
/ (
np.linalg.norm(target_all, 2)
* np.linalg.norm(prediction_all, 2)
), # Cos similarity all ions
np.dot(target_b, prediction_b)
/ (
np.linalg.norm(target_b, 2) * np.linalg.norm(prediction_b, 2)
), # Cos similarity b ions
np.dot(target_y, prediction_y)
/ (
np.linalg.norm(target_y, 2) * np.linalg.norm(prediction_y, 2)
), # Cos similarity y ions
# Same features in normal space
pearsonr(target_all_unlog, prediction_all_unlog)[
0
], # Pearson all ions
pearsonr(target_b_unlog, prediction_b_unlog)[0], # Pearson b ions
pearsonr(target_y_unlog, prediction_y_unlog)[0], # Pearson y ions
spearmanr(target_all_unlog, prediction_all_unlog)[
0
], # Spearman all ions
spearmanr(target_b_unlog, prediction_b_unlog)[0], # Spearman b ions
spearmanr(target_y_unlog, prediction_y_unlog)[0], # Spearman y ions
mse(target_all_unlog, prediction_all_unlog), # MSE all ions
mse(target_b_unlog, prediction_b_unlog), # MSE b ions
mse(target_y_unlog, prediction_y_unlog), # MSE y ions,
0
if np.min(abs_diff_b_unlog) <= np.min(abs_diff_y_unlog)
else 1, # Ion type with min absolute difference
0
if np.max(abs_diff_b_unlog) >= np.max(abs_diff_y_unlog)
else 1, # Ion type with max absolute difference
np.min(abs_diff_all_unlog), # min_abs_diff
np.max(abs_diff_all_unlog), # max_abs_diff
np.quantile(abs_diff_all_unlog, 0.25), # abs_diff_Q1
np.quantile(abs_diff_all_unlog, 0.5), # abs_diff_Q2
np.quantile(abs_diff_all_unlog, 0.75), # abs_diff_Q3
np.mean(abs_diff_all_unlog), # mean_abs_diff
np.std(abs_diff_all_unlog), # std_abs_diff
np.min(abs_diff_b_unlog), # ionb_min_abs_diff
np.max(abs_diff_b_unlog), # ionb_max_abs_diff_norm
np.quantile(abs_diff_b_unlog, 0.25), # ionb_abs_diff_Q1
np.quantile(abs_diff_b_unlog, 0.5), # ionb_abs_diff_Q2
np.quantile(abs_diff_b_unlog, 0.75), # ionb_abs_diff_Q3
np.mean(abs_diff_b_unlog), # ionb_mean_abs_diff
np.std(abs_diff_b_unlog), # ionb_std_abs_diff
np.min(abs_diff_y_unlog), # iony_min_abs_diff
np.max(abs_diff_y_unlog), # iony_max_abs_diff
np.quantile(abs_diff_y_unlog, 0.25), # iony_abs_diff_Q1
np.quantile(abs_diff_y_unlog, 0.5), # iony_abs_diff_Q2
np.quantile(abs_diff_y_unlog, 0.75), # iony_abs_diff_Q3
np.mean(abs_diff_y_unlog), # iony_mean_abs_diff
np.std(abs_diff_y_unlog), # iony_std_abs_diff
np.dot(
target_all_unlog, prediction_all_unlog
), # Dot product all ions
np.dot(target_b_unlog, prediction_b_unlog), # Dot product b ions
np.dot(target_y_unlog, prediction_y_unlog), # Dot product y ions
np.dot(target_all_unlog, prediction_all_unlog)
/ (
np.linalg.norm(target_all_unlog, 2)
* np.linalg.norm(prediction_all_unlog, 2)
), # Cos similarity all ions
np.dot(target_b_unlog, prediction_b_unlog)
/ (
np.linalg.norm(target_b_unlog, 2)
* np.linalg.norm(prediction_b_unlog, 2)
), # Cos similarity b ions
np.dot(target_y_unlog, prediction_y_unlog)
/ (
np.linalg.norm(target_y_unlog, 2)
* np.linalg.norm(prediction_y_unlog, 2)
), # Cos similarity y ions
],
dtype=np.float64,
)
rescore_features.append(feats)
rescore_features = np.vstack(rescore_features)
rescore_features = pd.DataFrame(rescore_features, columns=feature_names)
rescore_features["spec_id"] = spec_ids
rescore_features["charge"] = charges
return rescore_features
|
ff68306022fdf75fe6ea19b055c33b2a333bc2d7
| 3,640,381
|
def collection_basic(commodities) -> CommodityCollection:
"""Returns a simple collection of commodities side effects testing."""
keys = ["9999_80_1", "9999.10_80_2", "9999.20_80_2"]
return create_collection(commodities, keys)
|
6ef751225efd338ecd39282e75abdf7bd64e8e47
| 3,640,382
|
import functools
def do_js_minimization(test_function, get_temp_file, data, deadline, threads,
cleanup_interval, delete_temp_files):
"""Javascript minimization strategy."""
# Start by using a generic line minimizer on the test.
# Do two line minimizations to make up for the fact that minimzations on bots
# don't always minimize as much as they can.
for _ in range(2):
data = do_line_minimization(test_function, get_temp_file, data, deadline,
threads, cleanup_interval, delete_temp_files)
tokenizer = AntlrTokenizer(JavaScriptLexer)
current_minimizer = js_minimizer.JSMinimizer(
test_function,
max_threads=threads,
deadline=deadline,
cleanup_function=process_handler.cleanup_stale_processes,
single_thread_cleanup_interval=cleanup_interval,
get_temp_file=get_temp_file,
delete_temp_files=delete_temp_files,
tokenizer=tokenizer.tokenize,
token_combiner=tokenizer.combine,
progress_report_function=functools.partial(logs.log))
# Some tokens can't be removed until other have, so do 2 passes.
try:
for _ in range(2):
data = current_minimizer.minimize(data)
except minimizer.AntlrDecodeError:
data = do_line_minimization(test_function, get_temp_file, data, deadline,
threads, cleanup_interval, delete_temp_files)
# FIXME(mbarbella): Improve the JS minimizer so that this is not necessary.
# Sometimes, lines that could not have been removed on their own can now be
# removed since they have already been partially cleaned up.
return do_line_minimization(test_function, get_temp_file, data, deadline,
threads, cleanup_interval, delete_temp_files)
|
9b6e40308694f70dfd404734fd2723210ffc26cd
| 3,640,383
|
def percent_list(part_list, whole_list):
"""return percent of the part"""
w = len(whole_list)
if not w:
return (w,0)
p = 100 * float(len(part_list))/float(w)
return (w,round(100-p, 2))
|
f9b3697c96c04c402972351e73395b7f7ed18350
| 3,640,384
|
def disp_calc_helper_NB(adata, min_cells_detected):
"""
Parameters
----------
adata
min_cells_detected
Returns
-------
"""
rounded = adata.raw.astype('int') if adata.raw is not None else adata.X
lowerDetectedLimit = adata.uns['lowerDetectedLimit'] if 'lowerDetectedLimit' in adata.uns.keys() else 1
nzGenes = (rounded > lowerDetectedLimit).sum(axis=0)
nzGenes = nzGenes > min_cells_detected
# maybe we should normalized by Size_Factor anymore if we always normalize the data after calculating size factor?
# x = rounded[:, nzGenes] / adata.obs['Size_Factor'][:, None] if 'Size_Factor' in adata.obs.columns else adata.X[:, nzGenes]
x = rounded[:, nzGenes] / adata.obs['Size_Factor'][:, None] if adata.raw is not None else adata.X[:, nzGenes]
xim = np.mean(1 / adata.obs['Size_Factor']) if 'Size_Factor' in adata.obs.columns else 1
f_expression_mean = x.mean(axis=0)
# For NB: Var(Y) = mu * (1 + mu / k)
# variance formula
f_expression_var = np.mean((x - f_expression_mean) ** 2, axis=0)
disp_guess_meth_moments = f_expression_var - xim * f_expression_mean
disp_guess_meth_moments = disp_guess_meth_moments / np.power(f_expression_mean, 2)
res = pd.DataFrame({"mu": f_expression_mean.squeeze(), "disp": disp_guess_meth_moments.squeeze()})
res.loc[res['mu'] == 0, 'mu'] = None
res.loc[res['mu'] == 0, 'disp'] = None
res.loc[res['disp'] < 0, 'disp'] = 0
res['gene_id'] = adata.var_names[nzGenes]
return res
|
2402446dca38d3b730fb0c11720151c38838341f
| 3,640,385
|
def print_results(request):
"""Renders the results url, which is a placeholder copy of the root url of
query interface, where any results are rendered alongside the table headers.
"""
if request.method == "POST":
form = MetadataForm(request.POST)
if form.is_valid():
query_results = Metadata.objects.filter(instrument__icontains=form.data["instrument"])
else:
query_results = []
form = MetadataForm()
return render(request, "query.html", {"data": query_results, "queryform": form, "render_table": True})
|
77e0db699b3458ce69d56771a83586fab6a86b66
| 3,640,386
|
def capacity_rule(mod, g, p):
"""
The capacity of projects of the *gen_ret_bin* capacity type is a
pre-specified number for each of the project's operational periods
multiplied with 1 minus the binary retirement variable.
"""
return mod.gen_ret_bin_capacity_mw[g, p] \
* (1 - mod.GenRetBin_Retire[g, p])
|
ba4ccad8d620da084912a65a80793f54fb84b374
| 3,640,387
|
from indra.sources.reach.processor import determine_reach_subtype
from typing import Tuple
from typing import Optional
def tag_evidence_subtype(
evidence: Evidence,
) -> Tuple[str, Optional[str]]:
"""Returns the type and subtype of an evidence object as a string,
typically the extraction rule or database from which the statement
was generated.
For biopax, this is just the database name.
Parameters
----------
statement:
The statement which we wish to subtype
Returns
-------
:
A tuple with (type, subtype), both strings. Returns (type, None) if the
type of statement is not yet handled in this function.
"""
source_api = evidence.source_api
annotations = evidence.annotations
if source_api == 'biopax':
subtype = annotations.get('source_sub_id')
elif source_api in ('reach', 'eidos'):
if 'found_by' in annotations:
if source_api == 'reach':
subtype = determine_reach_subtype(annotations['found_by'])
elif source_api == 'eidos':
subtype = annotations['found_by']
else:
subtype = None
else:
logger.debug('Could not find found_by attribute in reach '
'statement annotations')
subtype = None
elif source_api == 'geneways':
subtype = annotations['actiontype']
else:
subtype = None
return (source_api, subtype)
|
59e0e9b436016e24ace7e18619b476f94dece2d6
| 3,640,388
|
def block_deconv_k4s2p1_BN_RELU(in_channel_size, out_channel_size, leaky = 0):
"""
>>> block_deconv_k4s2p1_BN_RELU(13, 17, 0.02)
Sequential(
(0): ConvTranspose2d(13, 17, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
(1): BatchNorm2d(17, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.02, inplace)
)
"""
model_list = []
model_list.append( nn.ConvTranspose2d( in_channel_size, out_channel_size, \
kernel_size=4, stride=2, padding=1, bias=False ) )
model_list.append( nn.BatchNorm2d(out_channel_size) )
model_list.append( nn.ReLU( inplace=True ) if leaky==0 else nn.LeakyReLU(leaky, inplace=True) )
return nn.Sequential(*model_list)
|
6d8f3b9f550a1b18599bf7b3439ad7dda2d316b8
| 3,640,389
|
import napari
import numpy
def demo_super_fast_representative_crop(image, crop_size=64000, display: bool = True):
"""
Demo for self-supervised denoising using camera image with synthetic noise
"""
Log.enable_output = True
Log.set_log_max_depth(5)
image = normalise(image.astype(numpy.float32))
image += 0.1 * normal(size=image.shape, scale=0.1)
def _crop_fun():
return super_fast_representative_crop(
image, crop_size=crop_size, display_crop=False
)
# Warmup (numba compilation)
# _crop_fun()
with lsection(f"Computing crop for image of shape: {image.shape}"):
# for _ in range(10):
crop = _crop_fun()
if display:
viewer = napari.Viewer()
viewer.add_image(image, name='image')
viewer.add_image(crop, name='crop')
napari.run()
lprint(f"Crop size requested: {crop_size} obtained: {crop.size}")
assert crop.size >= int(crop_size * 0.5) and crop.size <= int(crop_size * 2)
|
b5166027719fb3bee757af25cc532b9e9e2e2be7
| 3,640,390
|
def encrypt_uid(user):
"""Encrypts the User id for plain
"""
uid_xor = htk_setting('HTK_USER_ID_XOR')
crypt_uid = int_to_base36(user.id ^ uid_xor)
return crypt_uid
|
a425785f724cbc3e7459e38150b7a455ce1c1c6d
| 3,640,391
|
def createNewVarName(varType):
"""An helper function that returns a new name for creating fresh variables.
"""
createNewVarName.counter += 1
# return "v_{}_{}".format(varType.lower(), createNewVarName.counter)
return "v_{}".format(createNewVarName.counter)
|
19efee0d0b9f3d100807034037b4aecfc6a11940
| 3,640,392
|
def initialize_parameters(n_a, n_x, n_y):
"""
Initialize parameters with small random values
Returns:
parameters -- python dictionary containing:
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
b -- Bias, numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
"""
Wax = np.random.randn(n_a, n_x) * 0.01 # input to hidden
Waa = np.random.randn(n_a, n_a) * 0.01 # hidden to hidden
Wya = np.random.randn(n_y, n_a) * 0.01 # hidden to output
ba = np.zeros((n_a, 1)) # hidden bias
by = np.zeros((n_y, 1)) # output bias
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "ba": ba, "by": by}
return parameters
|
bd420d9484143a1322c43aef6fd4441526bf5d2a
| 3,640,393
|
def secure_request(request, ssl: bool):
"""
:param ssl:
:param request:
:return:
"""
# request.headers['Content-Security-Policy'] = "script-src 'self' cdnjs.cloudflare.com ; "
request.headers['Feature-Policy'] = "geolocation 'none'; microphone 'none'; camera 'self'"
request.headers['Referrer-Policy'] = 'no-referrer'
request.headers['x-frame-options'] = 'SAMEORIGIN'
request.headers['X-Content-Type-Options'] = 'nosniff'
request.headers['X-Permitted-Cross-Domain-Policies'] = 'none'
request.headers['X-XSS-Protection'] = '1; mode=block'
if ssl:
request.headers['expect-ct'] = 'max-age=60, enforce'
request.headers["Content-Security-Policy"] = "upgrade-insecure-requests"
request.headers['Strict-Transport-Security'] = "max-age=60; includeSubDomains; preload"
return request
|
e1c19aa89930e6aeb1c548c24da374859987e090
| 3,640,394
|
def f_mean(data: pd.DataFrame, tags=None, batch_col=None, phase_col=None):
"""
Feature: mean
The arithmetic mean for the given tags in ``tags``,
for each unique batch in the ``batch_col`` indicator column, and
within each unique phase, per batch, of the ``phase_col`` column.
"""
base_name = "mean"
prepared, tags, output, _ = _prepare_data(data, tags, batch_col, phase_col)
f_names = [(tag + "_" + base_name) for tag in tags]
output = prepared.mean()
return output.rename(columns=dict(zip(tags, f_names)))[f_names]
|
16f86d42a22aa2c5849ffeb1aa95a3a1dd0f342f
| 3,640,395
|
import random
def AtariConvInit(kernel_shape, rng, dtype=jnp.float32):
"""The standard init for Conv laters and Atari."""
filter_height, filter_width, fan_in, _ = kernel_shape
std = 1 / jnp.sqrt(fan_in * filter_height * filter_width)
return random.uniform(rng, kernel_shape, dtype, minval=-std, maxval=std)
|
c7f12495c067fc34d9123659dfe91e0295358207
| 3,640,396
|
import urllib
def scrape(url):
"""
Scrapes a url and returns the html using the proper User Agent
"""
UA = 'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.9.2.9) Gecko/20100913 Firefox/3.6.9'
urllib.quote(url.encode('utf-8'))
req = urllib2.Request(url=url,
headers={'User-Agent': UA})
hdl = urllib2.urlopen(req)
html = hdl.read()
return html
|
ce1aa7127532fef3408c45ebaa62a925672b0189
| 3,640,397
|
def _get_prefixed_values(data, prefix):
"""Collect lines which start with prefix; with trimming"""
matches = []
for line in data.splitlines():
line = line.strip()
if line.startswith(prefix):
match = line[len(prefix):]
match = match.strip()
matches.append(match)
return matches
|
d0fe7ff11321ccbf06397963a303f0e79181ebba
| 3,640,398
|
def build_k5_graph():
"""Makes a new K5 graph.
Ref: http://mathworld.wolfram.com/Pentatope.html"""
graph = UndirectedGraph()
# K5 has 5 nodes
for _ in range(5):
graph.new_node()
# K5 has 10 edges
# --Edge: a
graph.new_edge(1, 2)
# --Edge: b
graph.new_edge(2, 3)
# --Edge: c
graph.new_edge(3, 4)
# --Edge: d
graph.new_edge(4, 5)
# --Edge: e
graph.new_edge(5, 1)
# --Edge: f
graph.new_edge(1, 3)
# --Edge: g
graph.new_edge(1, 4)
# --Edge: h
graph.new_edge(2, 4)
# --Edge: i
graph.new_edge(2, 5)
# --Edge: j
graph.new_edge(3, 5)
return graph
|
ba19a5014f729bb0c3af3e528c8d37d02df84932
| 3,640,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.