content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from torchvision.models.vgg import vgg11_bn
from torchvision.models.vgg import vgg11
def vgg_11(batch_norm=True, pretrained=False, fixed_feature=True):
""" VGG 11-layer model from torchvision's vgg model.
:param batch_norm: train model with batch normalization
:param pretrained: if true, return a model pretrained on ImageNet
:param fixed_feature: if true and pretrained is true, model features are fixed while training.
"""
if batch_norm:
model = vgg11_bn(pretrained)
else:
model = vgg11(pretrained)
ff = True if pretrained and fixed_feature else False
return _VGG(model, model.features, ff) | 645b79fa96e3bce65e6f0191a78c9cff88c99fb9 | 3,629,700 |
import logging
import sys
def main():
""" Main
"""
parser = ctg_parseargs()
args = parser.parse_args()
# set up logger
level = logging.WARNING
if args.quiet:
level = logging.ERROR
if args.verbose:
level = logging.INFO
if args.debug:
level = logging.DEBUG
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', level=level)
logger = logging.getLogger()
if args.command == None:
# no command specified
parser.print_help()
sys.exit(0)
if args.command == "count":
logger.info('Startng CTG counting pipeline.')
counting_main(args)
logger.info("Exiting")
sys.exit(0)
if args.command == "score":
logger.info('Starting CTG scoring pipeline.')
scoring_main(args)
logger.info("Exiting")
sys.exit(0)
if args.command == "aggregate":
logger.info("Starting CTG aggregation.")
aggregate_main(args)
logger.info("Exiting")
sys.exit(0)
if args.command == "add_tags":
logger.info("Adding bam tags.")
add_tags_main(args)
sys.exit(0)
return 0 | 5013fe612664c0201480d73a092c13b56ca5bce6 | 3,629,701 |
def Wavelet_hardSoft(s, jN, wname, alpha=0.5):
"""
小波折中阈值滤波
:param s:
:param jN:
:param wname:
:param alpha:
:return:
"""
ca, cd = wavedec(s, jN, wname)
for i in range(len(ca)):
thr = np.median(cd[i] * np.sqrt(2 * np.log((i + 2) / (i + 1)))) / 0.6745
di = np.array(cd[i])
cd[i] = np.where(np.abs(di) > thr, np.sign(di) * (np.abs(di) - alpha * thr), 0)
calast = np.array(ca[-1])
thr = np.median(calast * np.sqrt(2 * np.log((jN + 1) / jN))) / 0.6745
calast = np.where(np.abs(calast) > thr, np.sign(calast) * (np.abs(calast) - alpha * thr), 0)
cd.append(calast)
coef = cd[::-1]
res = pywt.waverec(coef, wname)
return res | f1f348345151e6dcb3dd347a6e51612341ee4bd3 | 3,629,702 |
def compute_vad(log_energy, energy_mean_scale=0.5, energy_threshold=0.5, frames_context=0, proportion_threshold=0.6):
""" Apply voice activity detection
:param log_energy: Log mel energy.
:param energy_mean_scale: If this is set to s, to get the actual threshold we let m be the mean log-energy of the file, and use s*m + vad-energy-threshold (float, default = 0.5)
:param energy_threshold: Constant term in energy threshold for VAD (also see energy_mean_scale) (float, default = 5)
:param frames_context: Number of frames of context on each side of central frame, in window for which energy is monitored (int, default = 0)
:param proportion_threshold: Parameter controlling the proportion of frames within the window that need to have more energy than the threshold (float, default = 0.6)
:return: A vector of boolean that are True if we judge the frame voiced and False otherwise.
"""
assert len(log_energy.shape) == 1
assert energy_mean_scale >= 0
assert frames_context >= 0
assert 0 < proportion_threshold < 1
dtype = log_energy.dtype
energy_threshold += energy_mean_scale * log_energy.mean()
if frames_context > 0:
num_frames = len(log_energy)
window_size = frames_context * 2 + 1
log_energy_pad = np.concatenate([
np.zeros(frames_context, dtype=dtype),
log_energy,
np.zeros(frames_context, dtype=dtype)
])
log_energy_window = sliding_window(log_energy_pad, window_size, 1)
num_count = np.count_nonzero(log_energy_window > energy_threshold, axis=1)
den_count = np.ones(num_frames, dtype=dtype) * window_size
max_den_count = np.arange(frames_context + 1, min(window_size, num_frames) + 1, dtype=dtype)
den_count[:-(frames_context + 2):-1] = max_den_count
den_count[:frames_context + 1] = np.min([den_count[:frames_context + 1], max_den_count], axis=0)
vad = num_count / den_count >= proportion_threshold
else:
vad = log_energy > energy_threshold
return vad | 1b9370494892e18447014cc6850ad528b8786304 | 3,629,703 |
def current_velocity(x_new, x_prev, h):
""" returns current velocity of a particle from next
position at timestep. """
"""
parameters
----------
x_new : array
new x-position of particle
x_prev : array
previous x-position of particle
h : float
simulation timestep
"""
vel = (x_new - x_prev) / 2*h
return vel | 33d47f901be44fed20613957459aca1eecd5ea2c | 3,629,704 |
import csv
def csvReadCallback(inputFile, **kw):
"""Read callback for CSV data"""
inputFile.readline() # skip header
reader = csv.reader(inputFile, lineterminator='\n', **kw)
return [row for row in reader] | e36c92e5792e905da22438a58c8ce810c2a22e2a | 3,629,705 |
def get_interface_config_commands(interface, intf, existing):
"""Generates list of commands to configure on device
Args:
interface (str): k/v pairs in the form of a set that should
be configured on the device
intf (str): full name of interface, i.e. Ethernet1/1
Returns:
list: ordered list of commands to be sent to device
"""
commands = []
desc = interface.get('description')
if desc:
commands.append('description {0}'.format(desc))
mode = interface.get('mode')
if mode:
if mode == 'layer2':
command = 'switchport'
elif mode == 'layer3':
command = 'no switchport'
commands.append(command)
admin_state = interface.get('admin_state')
if admin_state:
command = get_admin_state(interface, intf, admin_state)
commands.append(command)
ip_forward = interface.get('ip_forward')
if ip_forward:
if ip_forward == 'enable':
commands.append('ip forward')
else:
commands.append('no ip forward')
fabric_forwarding_anycast_gateway = interface.get(
'fabric_forwarding_anycast_gateway')
if fabric_forwarding_anycast_gateway is not None:
if fabric_forwarding_anycast_gateway is True:
commands.append('fabric forwarding mode anycast-gateway')
elif fabric_forwarding_anycast_gateway is False:
commands.append('no fabric forwarding mode anycast-gateway')
if commands:
commands.insert(0, 'interface {0}'.format(intf))
return commands | 745dd81a3a45de14d8dd9c58a4e64b78b310136e | 3,629,706 |
def load_word():
""" return the sql and values of the insert queuery."""
sql = """
INSERT INTO Spanglish_Test.Word
(
`word`, `language_id`, `category_id`
)
VALUES (%s, %s, %s)
"""
values = [
(
'Ir', 2, 1
),
(
'Lunes', 2, 2
),
(
'Hola', 2, 3
),
(
'Ver', 2, 1
),
(
'Comer', 2, 1
),
(
'Saber', 2, 1
),
]
return {
'sql': sql,
'values': values
} | f0d836f5ca912865f9d75a5e6c10c13cf554674b | 3,629,707 |
def AttenLogitsRPE(query, key, abs_pos_emb, is_causal):
"""Attention logits from ...
https://arxiv.org/pdf/1803.02155.pdf with trainable rel position emb.
Notice padding is supposed to be masked by the caller of this function.
B: batch size
T: sequence length
N: num of attention heads.
H: per-head attention dimension.
Args:
tensors of the following shapes:
query: [B, T, N, H]
key: [B, T, N, H]
abs_pos_emb: [2T - 1, N, H]. The trainable embdding. abs_pos_emb[i] is
the emb of relative distance i - (T-1).
is_causal: A Python bool or a scalar bool Tensor. True for causal self
attention.
Returns:
The attention logits tensor. [B, N, T, T]
"""
return _AttenLogits(query, key, abs_pos_emb, is_causal=is_causal) | d7d99c4ad07089e61516dd65b5765568542887a2 | 3,629,708 |
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re_art.sub(' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
return re_punc.sub(' ', text) # convert punctuation to spaces
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s)))) | 80e30ee45d665fca1c2ebdd20226da249028064e | 3,629,709 |
def qso_template_uv(wa, z):
""" Return a composite UV QSO spectrum at redshift z.
Wavelengths must be in Angstroms.
This is a smoothed version of the HST/COS EUV+FUV AGN composite
spectrum shown in Figure 5 of Shull, Stevans, and Danforth 2012.
Only good between 550 and 1730 Angstroms (rest frame)
"""
T = readtabfits(DATAPATH + 'templates/qso/Shull_composite.fits')
return np.interp(wa, T.wa*(1 + z), T.fl) | 9c70e2f9c88b3d851951cd3e1b77638c21b99227 | 3,629,710 |
def encode_multipart_formdata(fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (headers, body) ready for httplib.HTTP instance
based on http://code.activestate.com/recipes/146306/
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(convert_to_utf8_str(value))
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
# build headers
headers = {
'Content-Type': 'multipart/form-data; boundary=%s' % BOUNDARY,
'Content-Length': len(body)
}
return headers, body | 7948d8ff0a2785b84c2e9a7e85182fa5db772692 | 3,629,711 |
def lanefinder_pipeline(img, video=False, debug=False):
"""
The pipeline for laneline finding based on all the techniques used so far
:param img: The image to find lanelines on
:return: An image with laneline boundaries drawn on
"""
# Undistort
undst = cv2.undistort(img, mtx, dist, None, mtx)
# Apply blur (remove noise)
undst = cv2.GaussianBlur(undst, (3, 3), 0)
# Sobel on Saturation and Lightness channels
hls = cv2.cvtColor(undst, cv2.COLOR_BGR2HLS)
l_ch = hls[:, :, 1]
s_ch = hls[:, :, 2]
sobelx_l = sobel_binary(l_ch, 35, 125, sobel_kernel=3)
sobelx_s = sobel_binary(s_ch, 35, 70, sobel_kernel=9)
gray = cv2.cvtColor(undst, cv2.COLOR_BGR2GRAY)
sobelx_gray = sobel_binary(gray, 255, 255)
# Yellow and white mask
yellow_mask = hsv_mask(undst, (20, 120), (100, 255), (80, 255))
white_mask = hsl_mask(undst, (0, 255), (0, 200), (200, 255))
# Combine binary thresholds
comb_bin = np.zeros_like(sobelx_l)
# Combine color and sobel masks
comb_bin[(yellow_mask == 1) | (white_mask == 1) | (sobelx_l == 1) | (sobelx_s == 1) | (sobelx_gray == 1)] = 1
# Ground plane perspective warp
warped_bin = cv2.warpPerspective(comb_bin, ground_plane_mtx, (img.shape[1], img.shape[0]))
# Get some Lines
if video:
global leftLine, rightLine
# if previous laneLine exists and the undetected frame counter is less than 5
if leftLine is not None and leftLine.n_undetected < 5:
leftLine = leftLine.search_from_prior(warped_bin, margin=50)
else:
# Get lane pixels using sliding windows
leftx, lefty, rightx, righty, out_img = sliding_window(warped_bin)
leftLine = Line(leftx, lefty)
if rightLine is not None and rightLine.n_undetected < 5:
rightLine = rightLine.search_from_prior(warped_bin, margin=50)
else:
# Get lane pixels using sliding windows
leftx, lefty, rightx, righty, out_img = sliding_window(warped_bin)
rightLine = Line(rightx, righty)
else:
# Get lane pixels using sliding windows
leftx, lefty, rightx, righty, out_img = sliding_window(warped_bin)
leftLine = Line(leftx, lefty)
rightLine = Line(rightx, righty)
## Visualization ##
# Create lane overlay image
line_img = np.zeros_like(undst)
# draw green shaded lane overlay
lane_fill_outline = np.concatenate((leftLine.get_polyline(warped_bin.shape[0]),
rightLine.get_polyline(warped_bin.shape[0])[:, ::-1, :]),
axis=1)
cv2.fillPoly(line_img, lane_fill_outline, color=(0, 255, 0))
# draw left lane line
cv2.polylines(line_img, leftLine.get_polyline(warped_bin.shape[0]), color=(255, 0, 0), isClosed=False,
thickness=8)
# draw right lane line
cv2.polylines(line_img, rightLine.get_polyline(warped_bin.shape[0]), color=(255, 0, 0), isClosed=False,
thickness=8)
line_img_pre = cv2.addWeighted(line_img, 1, np.dstack((warped_bin, warped_bin, warped_bin)) * 255, 0.7, 0)
# Get inverse transform matrix for drawing on original image
_, inverse_ground_plane_mtx = cv2.invert(ground_plane_mtx)
line_img = cv2.warpPerspective(line_img, inverse_ground_plane_mtx, (line_img.shape[1], line_img.shape[0]))
# Combine images
comb_bin = np.dstack((comb_bin, comb_bin, comb_bin)) * 255
out_img = cv2.addWeighted(undst, 1, line_img, 0.5, 0)
# Get centerline offset
offsetx_m = (img.shape[1] / 2 - (leftLine.get_base(img.shape[0]) + (
rightLine.get_base(img.shape[0]) - leftLine.get_base(img.shape[0])) / 2)) * xm_per_pix
# Get curvature
R_curve = np.average((leftLine.get_curvature_meters(img.shape[0], xm_per_pix, ym_per_pix),
rightLine.get_curvature_meters(img.shape[0], xm_per_pix, ym_per_pix)))
# Font and text properties
FONT = cv2.FONT_HERSHEY_SIMPLEX
FONT_SCALE = 0.7
LINE_WIDTH = 2
COLOR = (0, 0, 255) # BGR
# Print curvature and centerline offset on img
cv2.putText(out_img, "Curvature: {:.1f}m".format(R_curve), (10, int(30 * FONT_SCALE)), FONT, FONT_SCALE, COLOR,
LINE_WIDTH,
cv2.LINE_AA)
cv2.putText(out_img, "Centerline offset: {:.2f}m".format(offsetx_m), (10, int(60 * FONT_SCALE)), FONT, FONT_SCALE,
COLOR,
LINE_WIDTH, cv2.LINE_AA)
if debug:
debug_imgs = [
(out_img, ""),
(cv2.cvtColor(sobelx_gray * 255, cv2.COLOR_GRAY2BGR), "sobelx_gray"),
(cv2.cvtColor(sobelx_l * 255, cv2.COLOR_GRAY2BGR), "sobelx_l"),
(cv2.cvtColor(sobelx_s * 255, cv2.COLOR_GRAY2BGR), "sobelx_s"),
(cv2.cvtColor(yellow_mask * 255, cv2.COLOR_GRAY2BGR), "yellow mask"),
(cv2.cvtColor(white_mask * 255, cv2.COLOR_GRAY2BGR), "white mask"),
(comb_bin, "combined"),
(line_img_pre, "warp")
]
grid_square = 0
while True:
grid_square += 1
if grid_square ** 2 > len(debug_imgs):
break
debug_img_shape = (1440, 2560, 3)
debug_img_grid_shape = (int(debug_img_shape[0] / grid_square),
int(debug_img_shape[1] / grid_square),
3)
debug_img = np.zeros(debug_img_shape, np.int32)
for i, tuple in enumerate(debug_imgs):
image = tuple[0]
title = tuple[1]
cv2.putText(image, title, (10, 30), FONT, 1, COLOR, LINE_WIDTH, cv2.LINE_AA)
row = int(i / grid_square)
col = i % grid_square
debug_img[row * debug_img_grid_shape[0]:(row + 1) * debug_img_grid_shape[0],
col * debug_img_grid_shape[1]:(col + 1) * debug_img_grid_shape[1]] = cv2.resize(tuple[0], (
debug_img_grid_shape[1], debug_img_grid_shape[0]), interpolation=cv2.INTER_AREA)
out_img = debug_img
else:
out_img = np.hstack((out_img, line_img_pre))
return out_img | dfed7941b81197584c61f6f89aa45c69418a0768 | 3,629,712 |
from datetime import datetime
def get_ppl_experience(log_entries: QuerySet) -> dict:
"""
https://www.easa.europa.eu/sites/default/files/dfu/Part-FCL.pdf
(a) Applicants for a PPL(A) shall have completed at least 45 hours of flight instruction in aeroplanes or TMGs,
5 of which may have been completed in an FSTD, including at least:
(1) 25 hours of dual flight instruction; and
(2) 10 hours of supervised solo flight time, including at least 5 hours of solo cross-country flight time
with at least 1 cross-country flight of at least 270 km (150 NM), during which full stop landings
at 2 aerodromes different from the aerodrome of departure shall be made.
"""
return {
"Dual instruction": ExperienceRecord(
required=TotalsRecord(time=datetime.timedelta(hours=25), landings=1),
accrued=compute_totals(log_entries.filter(time_function=FunctionType.DUAL.name)),
),
"Supervised solo": ExperienceRecord(
required=TotalsRecord(time=datetime.timedelta(hours=10), landings=1),
accrued=compute_totals(log_entries.filter(time_function=FunctionType.PIC.name)),
),
"Cross-country solo": ExperienceRecord(
required=TotalsRecord(time=datetime.timedelta(hours=5), landings=1),
accrued=compute_totals(log_entries.filter(time_function=FunctionType.PIC.name, cross_country=True)),
),
"Total hours": ExperienceRecord(
required=TotalsRecord(time=datetime.timedelta(hours=45), landings=1),
accrued=compute_totals(log_entries),
),
} | 1fb2421352954c837b27fa46a47f36029a560ed8 | 3,629,713 |
import torch
def reconstruct_from_patches_2d(patches, img_shape, step=[1.0,1.0], batch_first=False):
"""Given patches generated from extract_patches_2d function, creates the original unpatched image. We keep track of the
overlapped regions and average them in the end.
Parameters:
patches - Patches from a larger image
img_shape - Shape of the original image (2d)
step - step size along the x and y directions
batch_first - a flag to indicate if the current img is the first batch
Returns:
A Tensor (Torch.Tensor) which contains a reconstructed image from the patches.
"""
if(batch_first):
patches = patches.permute(1,0,2,3,4)
patch_H, patch_W = patches.size(3), patches.size(4)
img_size = (patches.size(1), patches.size(2),max(img_shape[0], patch_H), max(img_shape[1], patch_W))
step_int = [0,0]
step_int[0] = int(patch_H*step[0]) if(isinstance(step[0], float)) else step[0]
step_int[1] = int(patch_W*step[1]) if(isinstance(step[1], float)) else step[1]
nrow, ncol = 1 + (img_size[-2] - patch_H)//step_int[0], 1 + (img_size[-1] - patch_W)//step_int[1]
r_nrow = nrow + 1 if((img_size[2] - patch_H) % step_int[0] != 0) else nrow
r_ncol = ncol + 1 if((img_size[3] - patch_W) % step_int[1] != 0) else ncol
patches = patches.reshape(r_nrow,r_ncol,img_size[0],img_size[1],patch_H,patch_W)
img = torch.zeros(img_size, device = patches.device)
overlap_counter = torch.zeros(img_size, device = patches.device)
for i in range(nrow):
for j in range(ncol):
img[:,:,i*step_int[0]:i*step_int[0]+patch_H,j*step_int[1]:j*step_int[1]+patch_W] += patches[i,j,]
overlap_counter[:,:,i*step_int[0]:i*step_int[0]+patch_H,j*step_int[1]:j*step_int[1]+patch_W] += 1
if((img_size[2] - patch_H) % step_int[0] != 0):
for j in range(ncol):
img[:,:,-patch_H:,j*step_int[1]:j*step_int[1]+patch_W] += patches[-1,j,]
overlap_counter[:,:,-patch_H:,j*step_int[1]:j*step_int[1]+patch_W] += 1
if((img_size[3] - patch_W) % step_int[1] != 0):
for i in range(nrow):
img[:,:,i*step_int[0]:i*step_int[0]+patch_H,-patch_W:] += patches[i,-1,]
overlap_counter[:,:,i*step_int[0]:i*step_int[0]+patch_H,-patch_W:] += 1
if((img_size[2] - patch_H) % step_int[0] != 0 and (img_size[3] - patch_W) % step_int[1] != 0):
img[:,:,-patch_H:,-patch_W:] += patches[-1,-1,]
overlap_counter[:,:,-patch_H:,-patch_W:] += 1
img /= overlap_counter
if(img_shape[0]<patch_H):
num_padded_H_Top = (patch_H - img_shape[0])//2
num_padded_H_Bottom = patch_H - img_shape[0] - num_padded_H_Top
img = img[:,:,num_padded_H_Top:-num_padded_H_Bottom,]
if(img_shape[1]<patch_W):
num_padded_W_Left = (patch_W - img_shape[1])//2
num_padded_W_Right = patch_W - img_shape[1] - num_padded_W_Left
img = img[:,:,:,num_padded_W_Left:-num_padded_W_Right]
return img | 35b03ee61dd5a2749601e839d7ade75d1b686383 | 3,629,714 |
def get_debug_queries():
"""
Return an array of queries executed within the context of a
:class:`DebugTracer` under the current application context and thread.
"""
return getattr(_app_ctx_stack.top, "storm_debug_queries", []) | a00f5b231e9b17c49da78815408850fcf2b7c122 | 3,629,715 |
def _LJ_rminepsilon_to_ab(coeffs):
"""
Convert rmin/epsilon representation to AB representation of the LJ
potential
"""
A = coeffs['epsilon'] * coeffs['Rmin']**12.0
B = 2 * coeffs['epsilon'] * coeffs['Rmin']**6.0
return {"A": A, "B": B} | 0963c0e8b949d35842660a499ce80a388485773f | 3,629,716 |
def get_offset_stars_ps1(df, target_name_column, target_ra_column,
target_dec_column, radius, data_release='dr2',
catalog='mean', quality_query=None, n=3, verbosity=0):
"""Get offset stars for all targets in the input DataFrame for PanSTARRS
using the MAST website.
Currently this runs slowly as it queries the PanSTARRS 1 archive for each
object. But it runs!
It will always retrieve the z-band magnitude for the offset star. This is
hardcoded in get_ps1_offset_star(). Depending on the catalog it will be
the mean of stack magnitude.
:param df: pandas.core.frame.DataFrame
Dataframe with targets to retrieve offset stars for
:param target_name_column: string
Name of the target identifier column
:param target_ra_column: string
Right ascension column name
:param target_dec_column: string
Declination column name
:param radius: float
Maximum search radius in arcseconds
:param catalog: string
Catalog to retrieve the offset star data from. (e.g. 'mean', 'stack')
:param data_release: string
The specific PanSTARRS data release
:param n: int
Number of offset stars to retrieve. (Maximum: n=5)
:param quality_query: string
A string written in pandas query syntax to apply quality criteria on
potential offset stars around the target.
:param verbosity:
Verbosity > 0 will print verbose statements during the execution.
:return: pandas.core.frame.DataFrame
Returns the dataframe with the retrieved offset stars for all targets
in the input dataframe.
"""
offset_df = pd.DataFrame()
for idx in df.index:
target_name = df.loc[idx, target_name_column]
target_ra = df.loc[idx, target_ra_column]
target_dec = df.loc[idx, target_dec_column]
temp_df = get_ps1_offset_star(target_name, target_ra, target_dec,
radius=radius, catalog=catalog,
data_release=data_release,
quality_query=quality_query, n=n,
verbosity=verbosity)
offset_df = offset_df.append(temp_df, ignore_index=True)
offset_df.to_csv('temp_offset_df.csv', index=False)
return offset_df | f86390ba3a114934f682169ae82b830002e8a647 | 3,629,717 |
from yaml import load
from yaml import CLoader as Loader
from yaml import Loader
def _from_yaml(stream):
"""Load data form a YAML file or string."""
try:
except ImportError:
data = load(stream, Loader=Loader)
return data | 5fb82419c2705cb14077164b954aac530b5ddefb | 3,629,718 |
def is_unit_by_ten_thousand(text: str) -> bool:
"""
是否是以万为计量单位
@param: text 薪资描述字符串
@rtype: bool
"""
log.info(f'invoke method -> is_unit_by_ten_thousand(), salary unit text: {text}')
try:
unit = NumericUnit(text.strip())
except ValueError as e:
log.error(str(e))
return False
if unit == NumericUnit.Ten_Thousand_CN \
or unit == NumericUnit.Ten_Thousand_TW \
or unit == NumericUnit.Ten_Thousand_EN:
return True
else:
return False | aeeac44391f5d06a181fd22246a510d16b500c9b | 3,629,719 |
def had_cells_north_edge(strmfunc, frac_thresh=0.1, lat_str=LAT_STR,
lev_str=LEV_STR):
"""Latitude of northern edge of northern Hadley cell."""
return had_cell_edge(
strmfunc,
cell="north",
edge="north",
frac_thresh=frac_thresh,
lat_str=lat_str,
lev_str=lev_str,
) | f764801ef8047169b9138b696e52d24ef340369d | 3,629,720 |
def get_referrers(*objs): # real signature unknown; restored from __doc__
"""
get_referrers(*objs) -> list
Return the list of objects that directly refer to any of objs.
"""
return [] | a16f99e392b66b0b03d3dd298334b0af4d7d1246 | 3,629,721 |
def margAccRepay(asset, amount, isIsolated="", symbol="", recvWindow=""):
"""# Margin Account Repay (MARGIN)
#### `POST /sapi/v1/margin/repay (HMAC SHA256)`
Repay loan for margin account.
### Weight:
1
### Parameters:
Name |Type |Mandatory |Description
--------|--------|--------|--------
asset |STRING |YES |
isIsolated |STRING |NO |for isolated margin or not, "TRUE", "FALSE",default "FALSE"
symbol |STRING |NO |isolated symbol
amount |DECIMAL |YES |
recvWindow |LONG |NO |The value cannot be greater than <code>60000</code>
timestamp |LONG |YES |
"""
endpoint = '/sapi/v1/margin/repay'
params = {
"asset": asset,
"amount": amount
}
if isIsolated: params["isIsolated"] = isIsolated
if symbol: params["symbol"] = symbol
if recvWindow: params["recvWindow"] = recvWindow
return postbinancedata_sig(endpoint, params) | 639c9f33889ee97e1db95dccdb814a1becb79ba5 | 3,629,722 |
def clean_formula(formula: str) -> str:
"""
Translate mongo's syntax to hande columns names containing spaces to pandas syntax.
Example:
>>> clean_formula('colA * `col B` * [col C] * `[col D]`')
'colA * `col B` * `col C` * `[col D]`'
"""
formula_splitted = COLUMN_PATTERN.split(formula)
return ''.join(clean_formula_element(elem) for elem in formula_splitted) | 522cfe6f5481e665e7cf0df136f6a2f384607eab | 3,629,723 |
def calculate_SI(aggregated_df):
""" calculates suspicion of infection as per Sepsis-3 on aggregated hourly dataframe and saves it under the column `suspicion_of_infection`.
Note:
aggregated_df must contain `antibiotics` and `microbio-sample` columns.
"""
df = aggregated_df[['hadm_id', 'hour', 'antibiotics', 'microbio-sample']] # reduce data, speeds up computation
df['antibiotics'].fillna(0, inplace=True)
def _fix_columns(antibiotics_window_df):
"""Fixes resulting columns/index from GroupBy.rolling so that there are just hadm_id, hour, and antibiotics cols"""
if 'hadm_id' in antibiotics_window_df.index.names and 'hadm_id' in df.columns:
antibiotics_window_df.drop(columns='hadm_id', inplace=True)
if 'hour' in antibiotics_window_df.index.names and 'hour' in df.columns:
antibiotics_window_df.drop(columns='hour', inplace=True)
antibiotics_window_df = antibiotics_window_df.reset_index()[['hadm_id', 'hour', 'antibiotics']]
return antibiotics_window_df
antibiotics_last_24h = df.groupby('hadm_id').rolling(on='hour', window=24, min_periods=1).antibiotics.sum()
antibiotics_last_24h = _fix_columns(antibiotics_last_24h)
antibiotics_last_24h = antibiotics_last_24h.rename(columns={'antibiotics': 'antibiotics_last_24h'})
antibiotics_next_72h = df[::-1].groupby('hadm_id').rolling(on='hour', window=72, min_periods=1).antibiotics.sum()[::-1]
antibiotics_next_72h = _fix_columns(antibiotics_next_72h)
antibiotics_next_72h = antibiotics_next_72h.rename(columns={'antibiotics': 'antibiotics_next_72h'})
df = df.merge(antibiotics_last_24h, on=['hadm_id', 'hour'])
df = df.merge(antibiotics_next_72h, on=['hadm_id', 'hour'])
microbio_sample = df['microbio-sample'] == 1
suspicion_of_infection = microbio_sample & (df['antibiotics_last_24h'] > 0)
suspicion_of_infection |= microbio_sample & (df['antibiotics_next_72h'] > 0)
aggregated_df['suspicion_of_infection'] = suspicion_of_infection
return aggregated_df | 88f0fb6285c3fc2826168f01416e1e825b2ed4cc | 3,629,724 |
def _get_reduce_batch_axis(axis, x_dim, x_ndim):
"""get batch_axis for reduce* operation."""
if not isinstance(axis, tuple):
axis = (axis,)
batch_axis = ()
if axis:
for index in axis:
if index < x_dim:
batch_axis = batch_axis + (index,)
else:
batch_axis = batch_axis + (index + 1,)
else:
batch_axis_list = [index for index in range(x_ndim)]
del batch_axis_list[x_dim]
batch_axis = tuple(batch_axis_list)
return batch_axis | b2a41f5e03c0388c70d2690793329d922f2d3248 | 3,629,725 |
def no_game_no_life(seed, rules, iterations, print_list):
"""
>>> rules = {3 : 1, 4 : 1, 8 : 1, 10 : 1, 11 : 1, 12 : 1, 15 : 1, 21 : 1,
... 23 : 1, 26 : 1, 27 : 1, 28 : 1, 29 : 1, 30 : 1}
>>> current_plants = no_game_no_life('1001010011000000111000111', rules, 20, True)
1000100001000001001001001
11001100011000010010010011
101000100101000010010010001
101001000101000100100110011
10001100010100100100010001
110101000010001001100110011
1001110100011001000100010001
10000110101010011001100110011
11001001111100001000100010001
1010010001011000011001100110011
100011000101000101000100010001
1101010000101000101001100110011
10011101000010100010000100010001
100001101000010100110001100110011
110010010100001000010010100010001
10100100010100011000100010100110011
1000110001010101000110001000010001
11010100001111101010100011000110011
100111010010101111111010101001010001
1000011000011111000111111100001010011
>>> current_plants
325
:param seed: The starting binary string.
:param rules: A dictionary of decimal numbers to 1's and 0's that represent the rules of the game.
:param iterations: The number of iterations to run.
:param print_list: Whether or not to print the resulting list
:return: The number of plants in the last iteration
"""
pot_queue = deque(list(map(int, list(seed))))
starting_index = 0
for iteration in range(0, iterations):
pot_queue, starting_index = do_the_windy_thing(rules, pot_queue, starting_index)
if print_list:
print(''.join(map(str,pot_queue)))
running_sum = 0
current_index = starting_index
for pot in pot_queue:
if pot == 1:
running_sum += current_index
current_index += 1
return running_sum | f8f8ab138a331cd413f3b730afd443c4a3de0e8c | 3,629,726 |
def evalrawexp(context, mapping, arg):
"""Evaluate given argument as a bare template object which may require
further processing (such as folding generator of strings)"""
func, data = arg
return func(context, mapping, data) | dc443da540bef0fe1198b12c0205921f0de66b2e | 3,629,727 |
def _execute_single_config_query(query_name, np1_list, peer_container, output_config):
"""
Runs a query on single set of policies
:param str query_name: the name of the arg.query
:param str np1_list: set of policies
:param PeerContainer peer_container: set of peers
:param OutputConfiguration output_config: dict object
:rtype: int
"""
network_config1 = NetworkConfig(np1_list, peer_container, [np1_list])
return NetworkConfigQueryRunner(query_name, [network_config1], output_config).run_query() > 0 | f49cbe947c952535ea72e0959ab35d9af724c854 | 3,629,728 |
import os
import re
def _ordernii_butterfly(niis):
"""Order a the provided list of nifti1 (.nii) files as appropriate
for the Ploran 2007 dataset (a.k.a butterfly).
"""
scanmap = {}
## Keyed on scode, values are a list of scans
for fipath in niis:
fi = os.path.basename(fipath)
fi_name_parts = fi.split('_')
scode = fi_name_parts[0]
scode = int(scode[1:])
## strip 's' from subcode, e.g 's4' -> 4
# Parse the scancode, looking for the scan number
scancode = fi_name_parts[1]
mat = re.match("^b\d+", scancode)
scannum = int(mat.group()[1:]) - 1
## mat.group() should contain, for example, 'b2'
## so we drop the first letter and
## cast to an int then offset by 1 so it can
## be used as an index into a list (e.g. 'b2' -> 1)
# Debug:
# print("{0} match: {2}, scan: {1}".format(fi, scannum, mat.group()))
# If scode is in scanmap add fipath (not fi)
# otherwise init scode first
max_num_scans = 10 ## ....for Ploran 2007
if scode in scanmap.keys():
scanmap[scode][scannum] = fipath
else:
scanmap[scode] = [None, ] * max_num_scans
## key:scode, val:[None, ...]
scanmap[scode][scannum] = fipath
## and index into [None, ...]
# Use scanmap to create an ordered list of niis
orderedniis = []
[orderedniis.extend(scanmap[sub]) for sub in sorted(scanmap.keys())]
## Want a 1d list thus .extend()
orderedniis = [nii for nii in orderedniis if nii != None]
## Drop Nones
return orderedniis, scanmap | 549aebd5708cabd0d1386fb43ee4464bb04cf72c | 3,629,729 |
from typing import Tuple
def shape(A: Matrix) -> Tuple[int, int]:
"""returns the shape of a given matrix"""
num_rows = len(A)
num_cols = len(A[0]) if A else 0
return num_rows, num_cols | d0a76a63444d3b5e541d738eca591ff0a868da40 | 3,629,730 |
import functools
def integrity(integrity_func, retry_errors=(ResponseNotValid,)):
"""
Args:
:param integrity_func: couldb callable or string contains name of
method to call
"""
def build_decorator(func):
@functools.wraps(func)
def func_wrapper(self, grab, task):
if isinstance(integrity_func, (list, tuple)):
int_funcs = integrity_func
else:
int_funcs = [integrity_func]
try:
for int_func in int_funcs:
if isinstance(int_func, str):
getattr(self, int_func)(grab)
else:
int_func(grab)
except retry_errors as ex:
yield task.clone(refresh_cache=True)
error_code = ex.__class__.__name__.replace('_', '-')
self.stat.inc('integrity:%s' % error_code)
except Exception as ex:
raise
else:
result = func(self, grab, task)
if result is not None:
for event in result:
yield event
func_wrapper._original_func = func # pylint: disable=protected-access
return func_wrapper
return build_decorator | de5ec2e8039919620bb448d8faedd6fe1fcc12fc | 3,629,731 |
import os
def get_file_list(path):
"""
获取文件夹下的所有文件,返回list
:param path:
:return:
"""
file_paths = []
get_dir = os.listdir(path)
for dir in get_dir:
tmp_path = os.path.join(path,dir)
if os.path.isdir(tmp_path):
file_paths.append({str(dir):get_file_list(tmp_path)})
else:
file_paths.append(dir)
return file_paths | 47f183479fb9304d33677fc811509f1801fa0130 | 3,629,732 |
def _convert_evaluation_data_to_frame(steps, evals):
"""Convert evaluation data to (tidy) data frame.
Args:
steps (namedtuple): Namedtuple with field names pos and neg. Is generated by
:func:`~estimagic.differentiation.generate_steps.generate_steps`.
evals (namedtuple): Namedtuple with field names pos and neg. Contains function
evaluation corresponding to steps.
Returns:
df (pandas.DataFrame): Tidy data frame with index (sign, step_number, dim_x
dim_f), where sign corresponds to pos or neg in steps and evals, step_number
indexes the step, dim_x is the dimension of the input vector and dim_f is
the dimension of the function output. The data is given by the two columns
step and eval. The data frame has 2 * n_steps * dim_x * dim_f rows.
"""
n_steps, dim_f, dim_x = evals.pos.shape
dfs = []
for direction, step_arr, eval_arr in zip((1, -1), steps, evals):
df_steps = pd.DataFrame(step_arr, columns=range(dim_x))
df_steps = df_steps.reset_index()
df_steps = df_steps.rename(columns={"index": "step_number"})
df_steps = df_steps.melt(
id_vars="step_number", var_name="dim_x", value_name="step"
)
df_steps = df_steps.sort_values("step_number")
df_steps = df_steps.reset_index(drop=True)
df_steps = df_steps.apply(lambda col: col.abs() if col.name == "step" else col)
eval_arr = np.transpose(eval_arr, (0, 2, 1)).reshape(-1, dim_f)
df_evals = pd.concat((df_steps, pd.DataFrame(eval_arr)), axis=1)
df_evals = df_evals.melt(
id_vars=["step_number", "dim_x", "step"],
var_name="dim_f",
value_name="eval",
)
df_evals = df_evals.assign(**{"sign": direction})
df_evals = df_evals.set_index(["sign", "step_number", "dim_x", "dim_f"])
df_evals = df_evals.sort_index()
dfs.append(df_evals)
df = pd.concat(dfs).astype({"step": float, "eval": float})
return df | 5ac049ed1b2e213e328b883905cf703ab9edde52 | 3,629,733 |
def unsharp_mask(rgb: np.ndarray, sigma: float, alpha=2.0) -> np.ndarray:
"""シャープ化。sigmaは0~1程度、alphaは1~2程度がよい?"""
rgb = ensure_channel_dim(rgb)
blured = blur(rgb, sigma)
rgb = rgb.astype(np.float32)
rgb = rgb + (rgb - blured) * alpha
return to_uint8(rgb) | 5c74995d7e322c1ff432320d8f18232bfcfdf4fb | 3,629,734 |
async def async_get_actions(hass: HomeAssistant, device_id: str) -> list[dict]:
"""List device actions for RFXCOM RFXtrx devices."""
try:
device = async_get_device_object(hass, device_id)
except ValueError:
return []
actions = []
for action_type in ACTION_TYPES:
if hasattr(device, action_type):
values = getattr(device, ACTION_SELECTION[action_type], {})
for value in values.values():
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_TYPE: action_type,
CONF_SUBTYPE: value,
}
)
return actions | d821c4e17dc4944b9cfe126e6cd959725d0a7b27 | 3,629,735 |
async def initialize_settings(user=Depends(login_required)):
"""
### 세팅값 초기화
- force: True일 경우 기존 값 초기화
"""
SettingModel.initialize(first=False)
return Response('Success', status_code=status.HTTP_200_OK) | 4e95029db3dcad14c7e08400d358b5fd098ad0b9 | 3,629,736 |
def build_generator(z_input: Input, label_input: Input):
"""
Build generator CNN
:param z_input: latent input
:param label_input: conditional label input
"""
model = Sequential([
Dense(128, input_dim=latent_dim),
LeakyReLU(alpha=0.2), BatchNormalization(momentum=0.8),
Dense(256),
LeakyReLU(alpha=0.2), BatchNormalization(momentum=0.8),
Dense(512),
LeakyReLU(alpha=0.2), BatchNormalization(momentum=0.8),
Dense(np.prod((28, 28, 1)), activation='tanh'),
# reshape to MNIST image size
Reshape((28, 28, 1))
])
model.summary()
# the latent input vector z
label_embedding = Embedding(input_dim=10, output_dim=latent_dim)(label_input)
flat_embedding = Flatten()(label_embedding)
# combine the noise and label by element-wise multiplication
model_input = multiply([z_input, flat_embedding])
image = model(model_input)
return Model([z_input, label_input], image) | e809026b7f4e326d8bd7190fa667b14888948688 | 3,629,737 |
import json
def annotate(project_id, document_id):
"""Annotate document with provided ID.
:param project_id: project id
:param document_id: document id
:return: rendered template
"""
current_user = auth.current_user
document_id = str(document_id)
project = services.get_project(current_user.id, project_id)
if project is None:
return redirect(url_for('project.list_projects'))
document = services.get_document(current_user.id, project_id, document_id)
if document is None:
return redirect(url_for('annotate.annotate_next', project_id=project_id))
options = json.dumps([{'value': label.value, 'label': label.label} for label in project.labels])
annotation_set = services.get_annotation_set(current_user.id, project_id, document_id)
return render_template('annotate.html', project=project, document=document, annotation_set=annotation_set,
options=options) | dc215edaf0e5d9829451b0e3c0adfe485db24231 | 3,629,738 |
def send_query_search_request(query, page):
"""Send a request to get one page of results for a query."""
logger.info(f'Sending a request for query {query}, page {page}')
return send_request(
query_url,
{'query': query, 'inclusive': True, 'page': page, 'api_key': api_key}) | 1ecfd2af4c61a1ccd811c506d25d6e9648ec9788 | 3,629,739 |
def find_stab(state, xs, zs):
"""
Find a stabilizer in the stabilizer group.
Args:
state:
logical_circuit:
delogical_circuit:
Returns:
"""
stabs = state.stabs
destabs = state.destabs
# Find the destabilizer generators that anticommute with the stabilizer indicated by xs and zs.
# First the destabilizer generators that could *possibly* anticommute:
possible_antidestabs = set([])
for q in xs:
possible_antidestabs.update(destabs.col_z[q])
for q in zs:
possible_antidestabs.update(destabs.col_x[q])
# Now we will confirm if they anticommute or not.
antidestabs = set([])
for d in possible_antidestabs:
if (len(xs & destabs.row_z[d]) + len(zs & destabs.row_x[d])) % 2 == 1:
# They anticommute an odd number of times.
antidestabs.add(d)
# Now we will confirm that the supplied stabilizer is actually in the stabilizer group.
confirm_xs = set([])
confirm_zs = set([])
for d in antidestabs:
confirm_xs ^= stabs.row_x[d]
confirm_zs ^= stabs.row_z[d]
found = confirm_xs == xs and confirm_zs == zs
return found, antidestabs | 07987377192cb4a4fa8cc35bd13d7566a838778c | 3,629,740 |
def get_darwin_memory():
""" Use system-call to extract total memory on macOS """
system_output = sabnzbd.newsunpack.run_simple(['sysctl', 'hw.memsize'])
return float(system_output.split()[1]) | 16329528b4f9ea1ced90446e717f750276026f38 | 3,629,741 |
def normalise_release(release, area):
"""Try to normalise the release name.
None is a valid argument.
Note that the current policy to is accept any release name if an existing
tag is to be used, and only perform the normalisation on tags to be created
by this script.
Arguments:
release(str): the release name
area(str): the area for release
Returns:
str: a valid release name
Raises:
ValueError if the release name cannot be made valid
"""
new_release = release
if release is not None and not check_tag_is_valid(release, area):
usermsg.warning("Warning: release {} does not conform to "
"convention.".format(release))
new_release = format_argument_version(release)
if '.' in release:
usermsg.warning("Release {} contains \'.\' which will"
" be replaced by \'-\' to: \'{}\'"
.format(release, new_release))
if not check_tag_is_valid(new_release, area):
raise ValueError(
"Release {} could not be made valid.".format(release)
)
return new_release | 83fe9c7277aacc54b8679ab1f5cc7c5a1d3cebf6 | 3,629,742 |
from typing import Optional
def _pytd_return_type(
name: str,
return_type: Optional[pytd_node.Node],
is_async: bool
) -> pytd_node.Node:
"""Convert function return type to pytd."""
if name == "__init__":
if (return_type is None or
isinstance(return_type, pytd.AnythingType)):
ret = pytd.NamedType("NoneType")
else:
ret = return_type
elif is_async:
base = pytd.NamedType("typing.Coroutine")
params = (pytd.AnythingType(), pytd.AnythingType(), return_type)
ret = pytd.GenericType(base, params)
elif return_type is None:
ret = pytd.AnythingType()
else:
ret = return_type
return ret | 9e960570fd4064aaf0168b62214b9c0b53288236 | 3,629,743 |
def getvpidx(rate, bdepth):
"""Get the token numbers for indices of value and policy vectors."""
qpm = getqpm(rate, bdepth)
vidx = np.arange(qpm[0], qpm[2]+1, dtype=np.float64)/qpm[1]
pidx = np.arange(qpm[1], qpm[2]+1, dtype=np.float64)/qpm[1]
return vidx, pidx | 93783c23dff9f3122b994be41a084fdc9d63e2d4 | 3,629,744 |
import functools
def apply_on_axis(op, inputs, axis, *args, **kwargs):
"""Applies a differentiable operator on a given axis of the input.
Args:
op: a differentiable operator (can be ranks, quantile, etc.)
inputs: jnp.ndarray<float> of any shape.
axis: the axis (int) or tuple of ints on which to apply the operator. If
several axes are passed the operator, those are merged as a single
dimension.
*args: other positional arguments to the operator.
**kwargs: other positional arguments to the operator.
Returns:
A jnp.ndarray holding the output of the differentiable operator on the given
axis.
"""
op_inner = functools.partial(op, **kwargs)
axis = (axis,) if isinstance(axis, int) else axis
num_points = np.prod(np.array(inputs.shape)[tuple([axis])])
permutation = np.arange(len(inputs.shape))
axis = tuple(permutation[a] for a in axis)
permutation = tuple(sorted(set(permutation) - set(axis)) + sorted(axis))
inputs = jnp.transpose(inputs, permutation)
batch_fn = jax.vmap(op_inner, in_axes=(0,) + (None,) * len(args))
result = batch_fn(jnp.reshape(inputs, (-1, num_points)), *args)
shrink = len(axis)
result = jnp.reshape(result, inputs.shape[:-shrink] + result.shape[-1:])
permutation = tuple(range(len(result.shape)))
rank = len(result.shape) - 1
axis = min(axis)
permutation = permutation[:axis] + (rank,) + permutation[axis:-1]
result = jnp.transpose(result, permutation)
return result | 2ef84e164acb1f018a5f4e7e7ef3a289f99886d4 | 3,629,745 |
from django.utils.text import normalize_newlines
import re
def clean_html(text):
"""
Clean the given HTML. Specifically, do the following:
* Convert <b> and <i> to <strong> and <em>.
* Encode all ampersands correctly.
* Remove all "target" attributes from <a> tags.
* Remove extraneous HTML, such as presentational tags that open and
immediately close and <br clear="all">.
* Convert hard-coded bullets into HTML unordered lists.
* Remove stuff like "<p> </p>", but only if it's at the
bottom of the text.
"""
text = normalize_newlines(force_text(text))
text = re.sub(r'<(/?)\s*b\s*>', '<\\1strong>', text)
text = re.sub(r'<(/?)\s*i\s*>', '<\\1em>', text)
text = fix_ampersands(text)
# Remove all target="" attributes from <a> tags.
text = link_target_attribute_re.sub('\\1', text)
# Trim stupid HTML such as <br clear="all">.
text = html_gunk_re.sub('', text)
# Convert hard-coded bullets into HTML unordered lists.
def replace_p_tags(match):
s = match.group().replace('</p>', '</li>')
for d in DOTS:
s = s.replace('<p>%s' % d, '<li>')
return '<ul>\n%s\n</ul>' % s
text = hard_coded_bullets_re.sub(replace_p_tags, text)
# Remove stuff like "<p> </p>", but only if it's at the bottom
# of the text.
text = trailing_empty_content_re.sub('', text)
return text | 7598a8c18e3cf3fc9bc256c33aed52a11c4bfa88 | 3,629,746 |
def get_report_importer_cls(library_name):
""" Return a ReportImporter class to handle importing a specific library's report information. """
lib_module = find_library_module(library_name)
if lib_module:
try:
return lib_module.REPORT_IMPORTER_CLASS
except AttributeError:
pass
return ReportImporter | 964137420816bfbeb0eb7d22c861633dfd9efb67 | 3,629,747 |
def read_reddened_stars(filename):
"""
Read reddened stars from a text file.
Parameters
----------
filename : str
The name (with a path if neccessary) of the file which
has 5 five columns. The first one with integers, the rest
with floats. Each column represents: star_id, x_color, y_color,
x_color_error, y_color_error.
Returns
-------
stars : ndarray
Data read from the text file.
"""
data_type = {'names': ('id', 'x', 'y', 'xerr', 'yerr'),
'formats': ('i8', 'f8', 'f8', 'f8', 'f8')}
stars = _read_file(filename, data_type)
return stars | e9c50f90d9499cfa8544f41c808992ba10899ded | 3,629,748 |
def rotate_pil(image, angle, center=None, scale=1.0):
"""PIL旋转图像
效果比Image.rotate效果要好,调用rotate进行实现
"""
image = np.asarray(image)
rotated = rotate(image, angle)
return Image.fromarray(rotated) | 1ae75ddba68540b3b7b02cdeb11d62b7b9152fe0 | 3,629,749 |
def pobj_len(this):
"""
returns length (of String, List or Dict)
"""
return _new_pobj(Number, len(this.getvalue())) | 071abd1446e6e1d2394b41ccbf3063c6881f3505 | 3,629,750 |
def _compressed_sparse_stack(blocks, axis):
"""Fast path for stacking CSR/CSC matrices
(i) vstack for CSR, (ii) hstack for CSC.
"""
other_axis = 1 if axis == 0 else 0
data = cupy.concatenate([b.data for b in blocks])
constant_dim = blocks[0].shape[other_axis]
idx_dtype = sputils.get_index_dtype(arrays=[b.indptr for b in blocks],
maxval=max(data.size, constant_dim))
indices = cupy.empty(data.size, dtype=idx_dtype)
indptr = cupy.empty(sum(b.shape[axis]
for b in blocks) + 1, dtype=idx_dtype)
last_indptr = idx_dtype(0)
sum_dim = 0
sum_indices = 0
for b in blocks:
if b.shape[other_axis] != constant_dim:
raise ValueError(
'incompatible dimensions for axis %d' % other_axis)
indices[sum_indices:sum_indices+b.indices.size] = b.indices
sum_indices += b.indices.size
idxs = slice(sum_dim, sum_dim + b.shape[axis])
indptr[idxs] = b.indptr[:-1]
indptr[idxs] += last_indptr
sum_dim += b.shape[axis]
last_indptr += b.indptr[-1]
indptr[-1] = last_indptr
if axis == 0:
return csr.csr_matrix((data, indices, indptr),
shape=(sum_dim, constant_dim))
else:
return csc.csc_matrix((data, indices, indptr),
shape=(constant_dim, sum_dim)) | 934ad0b1d1a26da3496b78e4a4c549c32a7923e5 | 3,629,751 |
def _parse_wall_max_height(response: HtmlResponse):
"""Parse max height of wall.
Returns 0 if not available.
"""
return _parse_length(response.css('th:contains("Höhe") + td ::text')) | b38ddbd8b9a6938fc57cf7cbfbb4feb1531b4525 | 3,629,752 |
def mandelbrot_square(z_min: complex, z_max: complex, n: int = 500,
n_max: int = 100, show_plot: bool = False,
axis: Axes = None, overall_extent: list[float] = None) -> AxesImage:
"""Visualise the mandelbrot set in a square matrix
Colours correspond to the number of iterations required to have
(abs(z) > r_max=2), modulo 200.
Parameters
----------
z_min
Bottom-left corner of complex domain to visualise
z_max
Top-right corner of complex domain to visualise
n
Split the domain length and height by `n` points
n_max
As per `mandelbrot_escape_numbers`
show_plot
If `True`, show the plot in a popup
axis
Axis to plot to
overall_extent
Parameter to `plt.imshow` to ensure consistency across frames in an
animation
"""
# MANDELBROT ITERATION
real_domain = np.linspace(z_min.real, z_max.real, n)
imag_domain = np.linspace(z_min.imag, z_max.imag, n)
domain = [ [complex(x,y) for x in real_domain] for y in imag_domain ]
domain = np.array(domain)
escape_ns = mandelbrot_escape_numbers(domain, n_max)
# COLOURING
# TODO: try different functions to map result to [0,1], and set
# vmax = highest value possible with that function.
viewmapper = 1
if viewmapper == 1:
escape_ns, vmax = escape_ns%200, 200
elif viewmapper == 2:
escape_ns, vmax = np.e**(-1/escape_ns), 1
else:
escape_ns, vmax = escape_ns, n_max
# Colormap, some good ones: None, 'hsv' - https://matplotlib.org/stable/tutorials/colors/colormaps.html
cmap = None
# CREATING GRAPH
local_extent = [z_min.real, z_max.real, z_min.imag, z_max.imag]
extent = overall_extent or local_extent
plot = axis or plt
image = plot.imshow(escape_ns, extent=extent, aspect='equal', vmin=0, vmax=vmax, cmap=cmap)
if show_plot:
plt.ylabel("Im")
plt.xlabel("Re")
plt.show()
return image | fcfcfdb036bde6c423abaed3884f7d22c86da735 | 3,629,753 |
def compatible(s1: Shape, s2: Shape):
"""Assert that two shapes are compatible shapes.
Args:
s1 (:class:`lab.shape.Shape`): First shape.
s2 (:class:`lab.shape.Shape`): Second shape.
Returns:
bool: Boolean indicating whether the two shapes are compatible.
"""
try:
expand_and_broadcast(s1, s2)
return True
except RuntimeError:
return False | 174b32f048fef109bc9383bdd8393f56fb3235b0 | 3,629,754 |
def get_job_service(request_type: RequestType) -> JobServiceInterface:
"""
This is a factory to get a corretly wired job service. Use that function to get any JobServiceInterface instance.
:param request_type: The request type. The JobServiceInterface is chosen and wired based on this type.
:return: Correctly wired JobServiceInterface
"""
injector = injectors.get(request_type)
if not injector:
raise NotImplementedError()
return injector().inject(JobServiceInterface) | 3ed998fc3ba19822100f7333bf69aa1ad4c6461e | 3,629,755 |
def get_index(repository_path, pkl_fname=PKL_FNAME):
"""
Return the index information for the EMTF repository located at
*repository_path*.
"""
pkl_fname, _ = initialize(repository_path, pkl_fname=pkl_fname)
with open(pkl_fname) as fid:
return cPickle.load(fid) | 1c3e3c333c0925ceeb167a8bfd32b4487c994454 | 3,629,756 |
def CycleTarget_to_c(self):
"""Syntax for a target of a cycle."""
return f"cycle_{self.targetID}: continue;" | 12cc7a57e5a24a62aba43ac99879d5a5d364ee29 | 3,629,757 |
def _run_ic(dataset):
"""Run iterative compression on a dataset."""
# Run
return solve_ic(
str(HUFFNER_DATA_DIR / (dataset + HUFFNER_DATA_EXT)),
timeout=EXACT_TIMEOUT,
preprocessing=2,
htime=min(0.3 * EXACT_TIMEOUT, 1)
) | a2e4bfd12d41720fe40a97f67294cd7234551525 | 3,629,758 |
import os
def _expand_target_patterns(blade, target_ids, excluded_trees):
"""Expand target patterns from command line."""
# Parse command line target_ids. For those in the form of <path>:<target>,
# record (<path>,<target>) in direct_targets; for the rest (with <path>
# but without <target>), record <path> into starting_dirs.
def under_excluded_trees(source_dir):
if source_dir.startswith('./'):
source_dir = source_dir[2:]
if source_dir in excluded_trees:
return True
for dir in excluded_trees:
if path_under_dir(source_dir, dir):
return True
return False
direct_targets = set()
starting_dirs = set()
for target_id in target_ids:
source_dir, target_name = target_id.rsplit(':', 1)
if not os.path.exists(source_dir):
_report_not_exist('Directory', source_dir, source_dir, blade)
skip_file = _check_under_skipped_dir(source_dir)
if skip_file:
console.warning('"%s" is under skipped directory due to "%s", ignored' % (target_id, skip_file))
continue
if target_name == '...':
for root, dirs, files in os.walk(source_dir):
# Note the dirs[:] = slice assignment; we are replacing the
# elements in dirs (and not the list referred to by dirs) so
# that os.walk() will not process deleted directories.
if under_excluded_trees(root) or _has_load_excluded_file(root, files):
dirs[:] = []
continue
dirs[:] = [d for d in dirs if not _is_load_excluded_dir(root, d)]
if 'BUILD' in files:
starting_dirs.add(root)
elif target_name == '*':
starting_dirs.add(source_dir)
else:
direct_targets.add(target_id)
return direct_targets, starting_dirs | abdea59b76865fedb90882dcdbf25d7eb6f7031c | 3,629,759 |
from typing import Callable
def expect_jwt(
api: Api,
message: str = "JWT token is required and has the format: 'Bearer <token>'",
) -> Callable:
"""
Adds expected header to swagger,
validates JWT token is present,
adds token to g
"""
def decorator(func: Callable) -> Callable:
"""Adds expected header to swagger"""
@api.response(403, message)
@api.header(
"Authorization",
"JWT token to authorize request. Example: 'Bearer <my token>'",
)
@wraps(func)
def wrapper(*args, **kwargs) -> Callable:
"""
validates JWT token is present,
adds token to g
"""
auth = request.headers.get("Authorization")
if not auth:
raise BadRequest(
"Unauthorized",
401,
{"Request.Header.Authorization": "JWT token is required"},
)
auth_list = auth.split(" ")
if len(auth_list) != 2:
raise BadRequest(
"Invalid Auth Header",
400,
{"Authorization": "JWT token has the format: 'Bearer <token>'"},
)
token = auth_list[1]
payload = User.decode_auth_token(token)
# attaching token and payload to app context
g.token = token
g.payload = payload
return func(*args, **kwargs)
return wrapper
return decorator | 414fd3339414ddd190b1a7457c14a2791e848353 | 3,629,760 |
def get_pythia_definitions(parsed_file):
"""
Return a dictionary of all Pythia definitions in the input parsed file,
of the form
"PythiaBothParam <NAME>=<LABEL>"
or
"PythiaBothParam <NAME>=<NUMBER>",
as {'NAME1': 'LABEL1', 'NAME2': VALUE2, ...}.
Parameters
----------
parsed_file: Lark Tree instance
Input parsed file.
"""
if not isinstance(parsed_file, Tree) :
raise RuntimeError("Input not an instance of a Tree!")
def str_or_float(arg):
try:
return float(arg)
except:
return arg
try:
return {'{0}:{1}'.format(tree.children[0].value, tree.children[1].value):str_or_float(tree.children[2].value)
for tree in parsed_file.find_data('pythia_def')}
except:
RuntimeError("Input parsed file does not seem to have the expected structure.") | 4363ee6180847e0991b6b4a9ed3a8636ce70725c | 3,629,761 |
def disable_layer_logging():
"""
Disable the shape logging for all layers from this moment on. Can be
useful when creating multiple towers.
"""
class ContainEverything:
def __contains__(self, x):
return True
# can use nonlocal in python3, but how
globals()['_LAYER_LOGGED'] = ContainEverything() | e05785f1ade46903c2e66efc35d4fc5f0e9d4fbd | 3,629,762 |
def remove_punct(word: Text) -> Text:
"""Removes punctuation from the word. Returns String."""
result = ''.join([char.lower() for char in word if char.isalpha()])
if len(result) > 0 and result != None:
return result | 7ea8282488a6ddb6f01ecf303b5353dbf2848b24 | 3,629,763 |
def onCapability(name, value):
"""
Run test only if capability with `name` equals `value`.
"""
capability = getattr(process_capabilities, name)
def check_capability():
return capability != value
return skipOnCondition(
check_capability, 'Capability "%s" not present.' % name) | 22ec305df57ee8b2c39d82789b2a5badbbb46225 | 3,629,764 |
from re import U
def WGS84ReferenceSystem():
"""
returns the `GeodeticReferenceSystem` for the WGS84 Ellipsoid
"""
return GeodeticReferenceSystem(a=6378137.0 *U.m, f=1/298.257223563, angular_unit=1*U.DEG, height_unit=100.*U.km, name="WGS84") | 05e1ba83b451eebab752bfe018cd2a0d09f434e3 | 3,629,765 |
def _remap_keypoints(keypoints, padded_w, padded_h, expand, data_shape, ratio):
"""
Remap bboxes in (x0, y0, x1, y1) format into the input image space
Parameters
----------
bboxes
padded_w
padded_h
expand
Returns
-------
"""
keypoints[:, 0::2] *= padded_w / (data_shape * ratio)
keypoints[:, 1::2] *= padded_h / data_shape
keypoints[:, 0::2] -= expand[0]
keypoints[:, 1::2] -= expand[1]
return keypoints | 1b8d2520f0df1847967e8db9c565598d6b5ee2b6 | 3,629,766 |
def viz_mask(mask):
"""Given a (batch, w, h, 10) array, returns a visualization"""
rgb_palette = np.array([(248, 183, 205), (246, 210, 224), (200, 231, 245), (103, 163, 217), (6, 113, 183),
(249, 200, 14), (248, 102, 36), (234, 53, 70), (102, 46, 155), (67, 188, 205)])
mask = np.expand_dims(mask, axis=-1)
mask = np.tile(mask, (1, 1, 1, 1, 3))
mask = np.sum(mask * rgb_palette, axis=-2)
mask = np.clip(mask, 0, 255)
return mask | e889f7aa630ed6b98074eb26279ab8bd1b157faf | 3,629,767 |
import click
def varg_command(cli, name, *others):
"""Create positional argument that are of variable length.
The token ``^`` can be used to mark the end of the argument list.
"""
def decorator(func):
r = click.argument(CommandAfterArgs.split_arg, nargs=-1)(func)
for o in reversed(others):
r = o(r)
return cli.command(
name,
cls=CommandAfterArgs,
context_settings={"ignore_unknown_options": True}
)(r)
return decorator | a6d6d5e828d427e8d0534c36510640f0aa21632e | 3,629,768 |
def get_pipeline_lines(input_pipeline):
"""Returns a list with the lines in the .cppipe file"""
with open(input_pipeline) as f:
lines = f.readlines()
return lines | 403e7531b1cadfe25f519d2b176b97ac344cde6b | 3,629,769 |
def _f3_int_x_ ( self , *args ) :
""" Integrate 3D-function over x
>>> f = ...
>>> g = f.integrate_x ( 'x-range' )
- see ROOT.RooAbsReal.createIntegral
"""
##
vset = ROOT.RooArgSet ( self.xvar )
i = self.fun.createIntegral ( vset , *args )
##
return Fun2D ( i , self.yvar , self.zvar ) | 2e70642a24f7882a0009e9be54ae903c4bf593cd | 3,629,770 |
def eotvos_number(L, lambda_c):
"""Returns the Eötvös/Bond number for the given liquid and gravity.
Parameters
----------
L : scalar (m) or Quantity
Characteristic length, ie. the radius of curvature at the top of the droplet.
lambda_c : scalar (m) or Quantity
The capillary length of the liquid in units of meters.
Returns
-------
`eotvos_number` : Quantity
The Eötvös number for the given parameters.
"""
if isinstance(L, Quantity):
L = L.to('m')
if isinstance(lambda_c, Quantity):
lambda_c = lambda_c.to('m')
return np.power(L/lambda_c, 2) | e2fe6c435a47b62b29ebef7fc25895aafe4af62a | 3,629,771 |
def simple_update_property(attr_name, can_change=False,
type_cast_method=_string_type_cast):
"""Creates a simple @property corresponding to an attribute.
If can_change is True, also protects updating the value of this attribute.
Args:
attr_name: String; the name of a hidden attribute on the object
corresponding to the @property.
can_change: Boolean; indicating whether or not this attribute can change
it's value once set. Defaults to False.
type_cast_method: Callable which casts the value to a specific type,
such as integer or string. Defaults to _string_type_cast.
Returns:
Instance of the builtin property that maps to the attribute provided by
attr_name and enforces the rule set by can_change.
"""
if not attr_name.startswith('_'):
raise GitRvException('Simple property attributes must begin with an '
'underscore. Received %r.' % (attr_name,))
def getter(self):
"""Simple getter for the current attribute."""
return getattr(self, attr_name, None)
def setter(self, value):
"""Setter for the current attribute.
Args:
value: Value to be set for the attribute, the type will either be a
string or something which can be passed to type_cast_method.
Raises:
GitRvException: If the can_change is False and the new value differs
from that which is already set.
"""
value = type_cast_method(value)
current_value = getattr(self, attr_name, None)
if current_value is None or can_change:
setattr(self, attr_name, value)
elif current_value != value:
raise GitRvException('Attribute %r can\'t be changed. Already set '
'to %r.' % (attr_name, current_value))
return property(getter, setter) | 24d91e3548c5c5b04402a2a904fe4c625e832ebf | 3,629,772 |
def _hyphenate_word(word):
""" Memoized hyphenate_word() function. """
cache = _hyphenate_word_cache
if word not in cache:
cache[word] = hyphenate_word(word)
return cache[word][:] | 660c419e5a512cad856b34117075dd984f1caacf | 3,629,773 |
def _rmse(y, y_bin, probs):
"""return 1-rmse since we're maximizing the score for hillclimbing"""
return 1.0 - sqrt(mean_squared_error(y_bin, probs)) | 2d6409ad829ba15fd9ff926708805cadd89c65d9 | 3,629,774 |
def skip_movie():
"""
The judge didn't like the movie they were presented with, so give them a new one.
Returns:
str: response string
"""
room = _get_room(request.args.get('code'))
movie = Movie.get_random()
room.current_round.movie = movie
# Send title & plot to host
socketio.send({'event': 'movie', 'room': room.serialize(), 'title': movie.title, 'plot': movie.plot}, json=True, to=room.current_round.judge.socket_client)
return 'Started round!' | a69e09b390aff269947cf6feeb8c5117bd7fc8ed | 3,629,775 |
import re
import os
import mimetypes
def static(req, resp):
"""Serves files from static directory"""
resp.content_type = const.TEXT_HTML
static = g.app.config.get('application', 'static').strip('/')
static_path_re = re.compile("^\/%s\/%s" %
(req.app.strip('/'), static,))
sfile_path = '/' + req.app.strip('/') + '/' + req.route.strip('/')
sfile_path = static_path_re.sub('', sfile_path)
sfile_path = g.app.path.rstrip('/') + '/static' + sfile_path
if os.path.isfile(sfile_path):
sfile = open(sfile_path, 'rb').read()
resp.content_type = const.APPLICATION_OCTET_STREAM
mime_type = mimetypes.guess_type(sfile_path)
if mime_type[0] is not None:
resp.content_type = mime_type[0]
if mime_type[1] is not None:
resp.content_type += ';charset=%s' % mime_type[1]
return sfile
elif os.path.isdir(sfile_path):
page = HTMLDoc()
resp.content_type = const.TEXT_HTML
folder = os.listdir(sfile_path)
html = page.create_element('HTML')
head = html.create_element('HEAD')
title = head.create_element('TITLE')
title.append(req.route)
body = html.create_element('BODY')
h1 = body.create_element('H1')
h1.append(req.route)
for item in folder:
item = req.route.rstrip('/') + '/' + item
a = body.create_element('A')
a.set_attribute('href', item)
a.append(item)
body.create_element('BR')
h3 = body.create_element('H3')
h3.append(metadata.identity)
return str(page)
else:
raise FileNotFoundError(
"No such file or directory: '%s'" % sfile_path
) | 26f341cb2c092dfb4142bbec846507e28451b1a8 | 3,629,776 |
def argmin_2d(X):
"""Take the arg minimum of a 2D array."""
assert X.size > 0, "argmin of empty array not defined"
ii, jj = np.unravel_index(X.argmin(), X.shape)
return ii, jj | 1058c5bdce085b1bb953eb9e0d0a62bdc6a357af | 3,629,777 |
def get_cf():
"""
Get an authenticated cloudflare API instance. Authentication resolved in
order from:
1. `.cloudflare.cfg`
2. Environment variables
@see
https://github.com/cloudflare/python-cloudflare#providing-cloudflare-username-and-api-key
"""
return CloudFlare.CloudFlare() | 9eb6e31642254fae0d2a776fa3de7ce4ad85f429 | 3,629,778 |
def remove_user_past_events(user_id):
"""
:Route: DELETE /<user_id>/past?past_event=event_id&past_event=event_id
:Description: Remove past events for a single user with id `user_id`. If no past events are specified, all of the user's past events are removed.
:param user_id: The unique ID of a specific user
:type user_id: int
:param past_event: An optional query component/parameter that takes a list of values. Passed in values are int event IDs which uniquely identify an event.
:type past_event: int or None
:return: Success/error message
:Requires: Admin permissions
"""
remove_past_events = request.args.getlist('past_event')
# Check that user exists
user = user_utils.get_user(user_id)
if not user:
return "No such user with id " + str(user_id) + " found!"
# If no past events specified, remove all past events from the user
if not remove_past_events:
users_collection.update({'account.id': str(user_id)}, {'$set': {'app.past_events': []}})
return "Removed all past events for user with id " + str(user_id)
# Otherwise remove only the past events specified
users_collection.update({'account.id': str(user_id)}, {'$pull': {'app.past_events': {'$in': remove_past_events}}})
return "Removed specified past events for user with id " + str(user_id) | 80c83e5953144030ec1780c7127d9b2d5132e1f3 | 3,629,779 |
import requests
from bs4 import BeautifulSoup
def futures_index_dict():
"""
name and code map
:return: name to code
:rtype: dict
index_code
商品期货指数 CCFI
农产品期货指数 CAFI
油脂油料期货指数 OOFI
谷物期货指数 CRFI
油脂期货指数 OIFI
粮食期货指数 GRFI
软商品期货指数 SOFI
饲料期货指数 FEFI
工业品期货指数 CIFI
钢铁期货指数 STFI
建材期货指数 BMFI
能化期货指数 ECFI
商品综合指数 CCCI
林木综合指数 FWCI
能化综合指数 ECCI
金属综合指数 MECI
农畜综合指数 ALCI
"""
url = "http://index.cfmmc.com/index/views/index.html"
r = requests.get(url)
soup = BeautifulSoup(r.text, "lxml")
name_list = [item.text.strip() for item in soup.find(attrs={"class": "down_box"}).find_all("b")[1:]]
code_list = [item["indexcode"] for item in soup.find(attrs={"class": "down_box"}).find_all("b")[1:]]
return dict(zip(name_list, code_list)) | bbe9558346e67473d9be317d14ce3b8aafb617bf | 3,629,780 |
def int_like(value, name, optional=False, strict=False):
"""
Convert to int or raise if not int_like
Parameters
----------
value : object
Value to verify
name : str
Variable name for exceptions
optional : bool
Flag indicating whether None is allowed
strict : bool
If True, then only allow int or np.integer that are not bool. If False,
allow types that support integer division by 1 and conversion to int.
Returns
-------
converted : int
value converted to a int
"""
if optional and value is None:
return None
is_bool_timedelta = isinstance(value, (bool, np.timedelta64))
if hasattr(value, 'squeeze') and callable(value.squeeze):
value = value.squeeze()
if isinstance(value, (int, np.integer)) and not is_bool_timedelta:
return int(value)
elif not strict and not is_bool_timedelta:
try:
if value == (value // 1):
return int(value)
except Exception:
pass
extra_text = ' or None' if optional else ''
raise TypeError('{0} must be integer_like (int or np.integer, but not bool'
' or timedelta64){1}'.format(name, extra_text)) | cb70054688bbd08c8ca4ee6f5573e8918d3510aa | 3,629,781 |
def maxmin(*args):
"""
Returns timed ((t,max),(t,min)) values from a (t,v) dataset
When used to filter an array the winndow will have to be doubled to
allocate both values (or just keep the one with max absdiff from previous).
"""
data = args[0]
t = sorted((v,t) for t,v in data)
mn,mx = (t[0][1],t[0][0]),(t[-1][1],t[-1][0])
return sorted((mx,mn)) | 8f76ee029d04a4a35688054512d0a0212adbfede | 3,629,782 |
def from_none(exc):
"""raise from_none(ValueError('a')) == raise ValueError('a') from None"""
exc.__cause__ = None
exc.__suppress_context__ = True
return exc | 86e45ba2df0020c85f13b85d8a98ee693422e922 | 3,629,783 |
def create_1D_velocity_magnitude_array_RHS(number_of_flowrates_analyzed, lower_flowrate_design_value, upper_flowrate_design_value, D_fd):
"""Goal is to create an array of velocity magnitudes. Will be used to create the velocity components in accordance with the angle arrays previously determined.
Args:
number_of_flowrates_analyzed (int): user-defined number of flow rates to simulate.
lower_flowrate_design_value (float): lower limit flow rate defined.
upper_flowrate_design_value (float): upper limit flow rate defined.
Returns:
RHS_velocity_magnitude_array (array): array of velocity magnitudes for RHS secondary inlet.
LHS_velocity_magnitude_array (array): array of velocity magnitudes for LHS secondary inlet.
mass_flow_rate_array (array): mass flow rates derived by the chosen flow rate field.
"""
# declaring an empty mass flow rate array
mass_flow_rate_array = np.zeros((number_of_flowrates_analyzed,1), dtype=float)
rho_air = 1.225 # density air kg/m^3
flow_rate_RHS_empty = np.zeros((number_of_flowrates_analyzed,1), dtype=float) # define empty array for flow rates (RHS)
flow_rate_RHS_empty[0] = lower_flowrate_design_value # filling initial value with the defined lower limit value
flow_rate_RHS_empty[number_of_flowrates_analyzed-1] = upper_flowrate_design_value # filling last entry with the defined upper limit
# determining steps between flow rates:
step_flowrate_rhs = (upper_flowrate_design_value - lower_flowrate_design_value)/(number_of_flowrates_analyzed-1)
print("flowrate step")
print(step_flowrate_rhs)
# Looping through array and assigning intermediate values
i = 1
while i <= number_of_flowrates_analyzed - 2:
flow_rate_RHS_empty[i] = flow_rate_RHS_empty[i-1] + step_flowrate_rhs
i = i + 1
print("flow rate array after looping:")
print(flow_rate_RHS_empty)
# Convert to velocity using user-defined secondary air flow rate
area_secondary_inlet = (3.14159/4)*(D_fd**2) # secondary inlet cross sectional area m^2
# Multiplying each np array entry with the area Q*A = V
RHS_velocity_magnitude_array = flow_rate_RHS_empty/area_secondary_inlet
print("RHS velocity magnitude following multiplication")
print(RHS_velocity_magnitude_array)
mass_flow_rate_array = rho_air*flow_rate_RHS_empty
print("mass flow rate array:")
print(mass_flow_rate_array)
# setting the LHS equal to the RHS velocities:
LHS_velocity_magnitude_array = RHS_velocity_magnitude_array
return RHS_velocity_magnitude_array, LHS_velocity_magnitude_array, mass_flow_rate_array, flow_rate_RHS_empty | 24bf026fa97e42e86e68ddbe99422575f2dff80d | 3,629,784 |
def get_interface_type(interface):
"""Gets the type of interface
"""
if interface.upper().startswith("ET"):
return "ethernet"
elif interface.upper().startswith("VL"):
return "svi"
elif interface.upper().startswith("LO"):
return "loopback"
elif interface.upper().startswith("MG"):
return "management"
elif interface.upper().startswith("MA"):
return "management"
elif interface.upper().startswith("PO"):
return "portchannel"
elif interface.upper().startswith("NV"):
return "nve"
else:
return "unknown" | f770a3ef1c43574d22630a5c4fff2f25d4975279 | 3,629,785 |
from swingers.sauth.models import ApplicationLink
def retrieve_access_token(request, service):
"""
Returns url, access_token for a service and request.
Service should be a string representing a service to connect to.
System will introspect settings.SITE_NAME to find service.
Probably issues if more than one link, will default to using latest
modified.
"""
link = ApplicationLink.objects.get(server_name=service)
url = link.server_url
access_token = link.get_access_token(request.user.username)
return url, access_token | aa4bdf6610aa63c739d66553868ee30cd0d78bc8 | 3,629,786 |
from typing import Union
from pathlib import Path
from typing import Any
import pickle
def load_pickle(path: Union[Path, str]) -> Any:
"""
Loads a pickle from the path
:param path:
:return:
"""
assert check_file_exists(path), f'"{path}" does not exist'
return pickle.load(open(path, 'rb')) | 33ec2c16e09141fda2faeb4ac20aa80cebf4d1cf | 3,629,787 |
def convolutional_encode2(layers, batch_norm, stim_in,
is_training, reuse_variables=False):
"""Embed stimulus using multiple layers of convolutions.
Each convolutional layer has convolution, batch normalization and soft-plus.
Args :
layers : string description of multiple layers.
Format - (a1, b1, c1, a2, b2, c2 .. ),
where (a, b, c) = (filter width, num of filters, stride)
batch_norm : (boolean) if batch norm applied after each convolution.
stim_in : stimulus input.
is_training : (boolean) if training or testing for (batch norm).
reuse_variables : if using previously-defined variables.
Useful for embedding mutliple responses using same graph.
Returns :
net : stimulus embedding
"""
n_layers = int(len(layers)/3)
tf.logging.info('Number of layers: %d' % n_layers)
# Set normalization
if batch_norm:
normalizer_fn = slim.batch_norm
else:
normalizer_fn = None
tf.logging.info('Logistic activation')
# Use slim to define multiple layers of convolution.
net = stim_in
layer_collection = [net]
for ilayer in range(n_layers):
if ilayer == n_layers - 1:
activation_fn = None
else:
activation_fn = tf.nn.softplus
tf.logging.info('Building stimulus embedding layer: %d, %d, %d'
% (int(layers[ilayer*3 + 1]), int(layers[ilayer*3]),
int(layers[ilayer*3 + 2])))
net = slim.conv2d(net, int(layers[ilayer*3 + 1]),
int(layers[ilayer*3]),
stride=int(layers[ilayer*3 + 2]),
scope='stim_layer_wt_%d' % ilayer,
reuse=reuse_variables,
normalizer_fn=normalizer_fn,
activation_fn=activation_fn,
normalizer_params={'is_training': is_training})
layer_collection += [net]
return net, layer_collection | fa1f8a7ea2ac1a8fdb19afaa3512959350b49195 | 3,629,788 |
import logging
import sys
def get_headers_and_fields(fileobject) -> list[str]:
"""
Add processed event fieldnames to fields.
"""
try:
headers = fileobject.readline().strip().split(",")
except Exception as e:
logging.exception("Error in reading mongoexport header")
sys.exit(1)
cols_to_add = [
"events.confirmed.value",
"events.confirmed.date",
"events.firstClinicalConsultation.date",
"events.hospitalAdmission.date",
"events.hospitalAdmission.value",
"events.icuAdmission.date",
"events.icuAdmission.value",
"events.onsetSymptoms.date",
"events.outcome.date",
"events.outcome.value",
"events.selfIsolation.date",
]
fields = set(headers).union(set(cols_to_add))
fields = fields.union(set(__TRAVEL + __GENOME + __VARIANT))
fields = sorted(list(fields - set(__OMIT)), key=str.casefold)
return headers, fields | 83e5c7e28ce70eaa93feb7aee84159c9d53bac94 | 3,629,789 |
def getUniqueExposures(conn, candidateList, limit = 0, mostRecent = True, nonDets = False, discoveryLimit = 10, lastDetectionLimit=20, requestType = REQUESTTYPES['incremental'], ddc = False):
"""getUniqueExposures.
Args:
conn:
candidateList:
limit:
mostRecent:
nonDets:
discoveryLimit:
lastDetectionLimit:
requestType:
ddc:
"""
print("Finding Unique Exposures...")
exposures = []
# Always get all of the detection exposures
for candidate in candidateList:
recurrences, avgRa, avgDec = getLightcurveData(conn, candidate, limit = limit, mostRecent = mostRecent, nonDets = nonDets, discoveryLimit = discoveryLimit, lastDetectionLimit=lastDetectionLimit, requestType = requestType, ddc = ddc)
for row in recurrences:
exposures.append(row.expname)
exposureSet = list(set(exposures))
# 2016-10-07 KWS The problem is that the most recent exposures are probably
# the ones we need to collect. But this almost certainly means
# that only one thread will end up downloading the data if the
# data is sorted. So shuffle it.
shuffle(exposureSet)
return exposureSet | 4101909d59d481509ce90d738b5234574fdf5aba | 3,629,790 |
def rgb2gray(image):
"""Convert 3-channel RGB image into grayscale"""
if image.ndim == 3:
return (0.299 * image[:, :, 0] + 0.587 * image[:, :, 1] +
0.114 * image[:, :, 2])
elif image.ndim == 4:
return (0.299 * image[:, :, :, 0] + 0.587 * image[:, :, :, 1] +
0.114 * image[:, :, :, 2]) | f87ed301dfd9c13ebfbabf99ad4b56c959a91e46 | 3,629,791 |
def create_variable(workflow_stat):
"""
Generates the javascript variables used to generate the chart.
@param workflow_stat the WorkflowInfo object reference
"""
number_of_jobs = workflow_stat.total_job_instances
# Adding variables
var_str = "<script type='text/javascript'>\nvar initMaxX = " + str(workflow_stat.workflow_run_time) + ";\n"
var_str +="var bar_spacing = 20;\n\
var inner_bar_margin = 4;\n\
var line_width =2;\n\
var inner_bar_width = bar_spacing-2*inner_bar_margin;\n\
var nameMargin = 400;\n\
var scaleMargin = 15;\n"
var_str += "var initMaxY = "+str(number_of_jobs) + "*bar_spacing;\n"
color_name_str = "var color =['darkblue','yellow','orange' ,'steelblue', 'purple'"
desc_name_str = "var desc=['pre script','condor job','resource delay', 'job runtime as seen by dagman','post script '"
for k,v in workflow_stat.transformation_color_map.items():
if k in workflow_stat.transformation_statistics_dict:
color_name_str += ",'"+v +"'"
desc_name_str +=",'"+k +"'"
color_name_str += "];\n"
desc_name_str +="];\n"
var_str += color_name_str
var_str += desc_name_str
if number_of_jobs < 5:
var_str +="var h = " +str(number_of_jobs) +"*bar_spacing*2 + scaleMargin + bar_spacing;\n"
else:
var_str +="var h = 840;\n"
var_str +="var w = 1460;\n\
var toolbar_width = 550;\n\
var containerPanelPadding = 20;\n\
var chartPanelWidth = w+ containerPanelPadding*2;\n\
var chartPanelHeight = h + containerPanelPadding*2;\n\
var curX = 0;\n\
var curY = 0;\n\
var curEndX = initMaxX;\n\
var curEndY = initMaxY;\n\
var xScale = pv.Scale.linear(curX, curEndX).range(0, w-nameMargin);\n\
var yScale = pv.Scale.linear(curY, curEndY).range(0, h -scaleMargin);\n\
var xLabelPos = containerPanelPadding + nameMargin;\n\
var yLabelPos = 40;\n\
var labelWidth = 200;\n\
var panXFactor = 10;\n\
var panYFactor = 10;\n\
var isNewWindow = false;\n\
var condorTime = false;\n\
var kickstart = false;\n\
var condorRuntime = false;\n\
var resourceDelay = false;\n\
var preScript = false;\n\
var postScript = false;\n\
var showName = false;\n\
var headerPanelWidth = w+ containerPanelPadding*2;\n\
var headerPanelHeight = 100;\n\
var footerPanelWidth = w+ containerPanelPadding*2;\n\
var footerPanelHeight = "+ str(45 + len(workflow_stat.transformation_statistics_dict)/3*15) + ";\n\
</script>\n"
return var_str | d0b55b00952f28242775a297fedab6cf1a69169e | 3,629,792 |
def group_fnames_by_pair_param2_param3_and_param1(directory: str,
param1: str,
param2: str,
param3: str):
"""
:param directory: directory do folder, which contains avg resulting files
:type directory: str
:param param1: name of model parameter, available ['beta', 'mortality', 'visibility']
:type param1: str
:param param2: like param1, but different from param1
:type param2: str
:param param3: like param1, but different from param1 and param2
:type param3: str
:return: 2D list of fnames where:
result[i][j] --> fname( param1_i, (param2, param3)_j )
:rtype: np.ndarray
Example:
param1, param2, param3 = beta, mortality, visibility
result[i][j] --> fname( (mortality, visibility)_i, beta_j )
"""
# prepare dict which will contain unique values of param1
params1_vals_to_index = {}
# prepare dict which will contain unique values of pairs (param2, param3)
pair_params2_params3_vals_to_index = {}
# fnames to group
fnames = all_fnames_from_dir(directory=directory)
# iterate over fnames to find out all possible param1 values
# and pairs (param2 value, param3 value)
for fname in fnames:
# read variable_params from fname
variable_params = variable_params_from_fname(fname=fname)
# make pair (param2 value, param3 value) add it to a dict if it appears for the first time
tup = (variable_params[param2], variable_params[param3])
if tup not in pair_params2_params3_vals_to_index.keys():
pair_params2_params3_vals_to_index[tup] = len(pair_params2_params3_vals_to_index)
# get value of param1 and add to a dict if it appears for the first time
param1_value = variable_params[param1]
if param1_value not in params1_vals_to_index.keys():
params1_vals_to_index[param1_value] = len(params1_vals_to_index)
# sort dict by it's values.
# order of items in it matters while plotting death toll (line color depends on it) ---------------
params1_vals_to_index = dict(zip(
sorted(params1_vals_to_index.keys()),
range(len(params1_vals_to_index)),
)
)
# create empty resulting list
result = np.zeros((len(pair_params2_params3_vals_to_index), len(params1_vals_to_index)), dtype=object)
# iterate over fnames again and fill resulting list
for fname in fnames:
# read variable_params from fname
variable_params = variable_params_from_fname(fname=fname)
# get pair (param2 value, param3 value) and its corresponding index
tup = (variable_params[param2], variable_params[param3])
first_index = pair_params2_params3_vals_to_index[tup]
# get value of param1 and its corresponding index
param1_value = variable_params[param1]
second_index = params1_vals_to_index[param1_value]
# fill resulting list wit current fname
result[first_index][second_index] = fname
return result | 7598cebac63a26477545935acf51405d037d8edd | 3,629,793 |
def drought_add_facecolor(drought: pd.DataFrame) -> pd.DataFrame:
"""Add facecolor column before drought plot."""
bar_colors = np.array(['blue', 'cyan', 'green', 'orange', 'red'])
drought_index_band = pd.cut(drought.drought_index, [-1.0, -0.99, -0.5, +0.5, +0.99, +1.001], right=False)
drought['facecolor'] = bar_colors[drought_index_band.cat.codes.values]
if "bucket_size" in drought.columns:
drought['barwidth'] = drought['bucket_size'] * 0.8
else:
drought['barwidth'] = 0.8
return drought | a69471c1d8fc8e2b7aa80ef22a5c3274fa0e211d | 3,629,794 |
def aten_le(mapper, graph, node):
""" 构造对比大小的PaddleLayer。
TorchScript示例:
%80 : bool = aten::le(%78, %79)
参数含义:
%80 (bool): 输出,第一个元素是否小于等于第二个元素。
%78 (-): 需对比的输入1。
%79 (-): 需对比的输入2。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%78
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%79
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, scope_name)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.le", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name)
return current_inputs, current_outputs | 45797abf1ec5a97619578dd734c1d9d7fb448aec | 3,629,795 |
def fstr(*args, **kwargs):
"""fstr(format) is f-string format like. Uses locals and globlallas. Useful in Py2."""
if not args:
raise TypeError('missing format in fstr(format, *args, **kwargs)')
fmt, args = args[0], args[1:]
return vfstr(fmt, args, kwargs) | 61843ed4b35c9cb89b3855801097a4961e1f174e | 3,629,796 |
import logging
def race_store(cfg):
"""
Creates a proper race store based on the current configuration.
:param cfg: Config object. Mandatory.
:return: A race store implementation.
"""
if cfg.opts("reporting", "datastore.type") == "elasticsearch":
logging.getLogger(__name__).info("Creating ES race store")
return CompositeRaceStore(EsRaceStore(cfg), EsResultsStore(cfg), FileRaceStore(cfg))
else:
logging.getLogger(__name__).info("Creating file race store")
return FileRaceStore(cfg) | 78b1b56d103a47e1ef941674ddd9c9b3debf16de | 3,629,797 |
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except OSError:
lines = index = None
else:
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index) | 2d554d9db43fcc5a697e9b5db2486ba91fb59503 | 3,629,798 |
def Adjoint(T):
"""Computes the adjoint representation of a homogeneous transformation
matrix
:param T: A homogeneous transformation matrix
:return: The 6x6 adjoint representation [AdT] of T
Example Input:
T = np.array([[1, 0, 0, 0],
[0, 0, -1, 0],
[0, 1, 0, 3],
[0, 0, 0, 1]])
Output:
np.array([[1, 0, 0, 0, 0, 0],
[0, 0, -1, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 3, 1, 0, 0],
[3, 0, 0, 0, 0, -1],
[0, 0, 0, 0, 1, 0]])
"""
R, p = TransToRp(T)
rarr = np.eye((6))
rarr[0:3, 0:3] = R
vs3 = VecToso3(p)
rarr[3:6, 0:3] = np.dot(vs3, R)
rarr[3:6, 3:6] = R
return rarr | 3894ecec8c4e1de01b550307c7df47fdc959d46e | 3,629,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.