content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import logging
def GitPush(git_repo, refspec, push_to, force=False, dry_run=False,
capture_output=True, skip=False, **kwargs):
"""Wrapper for pushing to a branch.
Args:
git_repo: Git repository to act on.
refspec: The local ref to push to the remote.
push_to: A RemoteRef object representing the remote ref to push to.
force: Whether to bypass non-fastforward checks.
dry_run: If True, do everything except actually push the remote ref.
capture_output: Whether to capture output for this command.
skip: Log the git command that would have been run, but don't run it; this
avoids e.g. remote access checks that still apply to |dry_run|.
"""
cmd = ['push', push_to.remote, '%s:%s' % (refspec, push_to.ref)]
if force:
cmd.append('--force')
if dry_run:
cmd.append('--dry-run')
if skip:
logging.info('Would have run "%s"', cmd)
return
return RunGit(git_repo, cmd, capture_output=capture_output,
**kwargs) | 3af43d0a819c297735995a9d8c7e39b49937b7a3 | 27,900 |
def boxes_to_array(bound_boxes):
"""
# Args
boxes : list of BoundBox instances
# Returns
centroid_boxes : (N, 4)
probs : (N, nb_classes)
"""
temp_list = []
for box in bound_boxes:
temp_list.append([np.argmax(box.classes), np.asarray([box.x, box.y, box.w, box.h]), np.max(box.classes)])
return np.array(temp_list) | e01b908e675b84928d1134d8eec4627f36b8af4a | 27,901 |
import os
def bam_to_junction_reads_table(bam_filename, ignore_multimapping=False):
"""Create a table of reads for this bam file"""
uniquely, multi = _get_junction_reads(bam_filename)
reads = _combine_uniquely_multi(uniquely, multi, ignore_multimapping)
# Remove "junctions" with same start and stop
reads = reads.loc[reads[JUNCTION_START] != reads[JUNCTION_STOP]]
reads.index = np.arange(reads.shape[0])
reads['sample_id'] = os.path.basename(bam_filename)
reads = add_exons_and_junction_ids(reads)
return reads | d5a832f2834a8635ce04889d3b0ec7a0c082f95d | 27,902 |
from typing import Tuple
def _scope_prepare(scope: str) -> Tuple[object, str]:
"""
Parse a scope string a return a tuple consisting of context manager for the assignation of the tf's scope
and a string representing the summary name. The scope is of the form "<ident1>.<ident2>. ... .<ident3>", the
righmost identifier is used as summary name whereas the prefix is used as scope name.
:param scope: A string containing a qualified name.
:return:
"""
splits = scope.rsplit('.', 1)
if any(map(lambda v: len(v) == 0, splits)):
raise ValueError(f'Invalid scope name: {scope}')
if len(splits) == 1:
return nullcontext(), splits[0]
return tf.name_scope(splits[0]), splits[1] | 01bcd08d87e23621f3476055379d9c7403fd4b75 | 27,903 |
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
aftv = hass.data[DOMAIN][entry.entry_id][ANDROID_DEV]
await aftv.adb_close()
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok | 77376bcdf98c9b4c2ac6020e44d704fbe59d9143 | 27,904 |
from typing import Tuple
def render_wrapped_text(text: str, font: pygame.freetype.Font,
color: Color, centered: bool, offset_y: int,
max_width: int) -> Tuple[pygame.Surface, pygame.Rect]:
"""Return a surface & rectangle with text rendered over several lines.
Parameter offset_y defines the distance between lines."""
words = text.split()
lines = []
lines_h = 0
line_w, line_h = 0, 0
# Separate text into lines, storing each line size
while words:
line_words = []
while words:
_, _, l_w, l_h = font.get_rect(
' '.join(line_words + words[:1]))
if l_w > max_width:
break
line_w, line_h = l_w, l_h
line_words.append(words.pop(0))
if line_words:
lines_h += line_h
lines.append((' '.join(line_words), (line_w, line_h)))
else:
# Split word in half if it is too long
long_word = words.pop(0)
words.insert(0, long_word[:len(long_word)//2])
words.insert(1, long_word[len(long_word)//2:])
# Create transparent surface and rectangle to be returned
final_height = lines_h + (len(lines) - 1) * offset_y if lines else lines_h
final_surf = pygame.Surface((max_width, final_height), pygame.SRCALPHA, 32)
final_surf.convert()
final_rect = final_surf.get_rect()
# Render lines on the surface
pos_y = 0
for line in lines:
if centered:
pos_x = int(max_width/2 - line[1][0]/2)
else:
pos_x = 0
font.render_to(final_surf, (pos_x, pos_y), line[0], color)
pos_y += line[1][1] + offset_y
return final_surf, final_rect | 73be30318fd3afe5bf5138b8c21c46caf05022bc | 27,905 |
def comp4(a1,a2,b1,b2):
"""两个区间交集,a1<a2; b1<b2"""
if a2<b1 or b2<a1:#'空集'
gtii = []
else:
lst1 = sorted([a1,a2,b1,b2])
gtii = [lst1[1], lst1[2]]
return gtii | ba4357b16ee09f78b6c09f422d27a42cd91e298e | 27,906 |
from typing import Callable
from typing import Optional
from typing import Union
from typing import Tuple
from typing import List
def fixed_step_solver_template(
take_step: Callable,
rhs_func: Callable,
t_span: Array,
y0: Array,
max_dt: float,
t_eval: Optional[Union[Tuple, List, Array]] = None,
):
"""Helper function for implementing fixed-step solvers supporting both
``t_span`` and ``max_dt`` arguments. ``take_step`` is assumed to be a
function implementing a single step of size h of a fixed-step method.
The signature of ``take_step`` is assumed to be:
- rhs_func: Either a generator :math:`G(t)` or RHS function :math:`f(t,y)`.
- t0: The current time.
- y0: The current state.
- h: The size of the step to take.
It returns:
- y: The state of the DE at time t0 + h.
``take_step`` is used to integrate the DE specified by ``rhs_func``
through all points in ``t_eval``, taking steps no larger than ``max_dt``.
Each interval in ``t_eval`` is divided into the least number of sub-intervals
of equal length so that the sub-intervals are smaller than ``max_dt``.
Args:
take_step: Callable for fixed step integration.
rhs_func: Callable, either a generator or rhs function.
t_span: Interval to solve over.
y0: Initial state.
max_dt: Maximum step size.
t_eval: Optional list of time points at which to return the solution.
Returns:
OdeResult: Results object.
"""
# ensure the output of rhs_func is a raw array
def wrapped_rhs_func(*args):
return Array(rhs_func(*args)).data
y0 = Array(y0).data
t_list, h_list, n_steps_list = get_fixed_step_sizes(t_span, t_eval, max_dt)
ys = [y0]
for current_t, h, n_steps in zip(t_list, h_list, n_steps_list):
y = ys[-1]
inner_t = current_t
for _ in range(n_steps):
y = take_step(wrapped_rhs_func, inner_t, y, h)
inner_t = inner_t + h
ys.append(y)
ys = Array(ys)
results = OdeResult(t=t_list, y=ys)
return trim_t_results(results, t_span, t_eval) | 6e989b1f6d92ddeb4d5f18e9eb110667f28b6a33 | 27,907 |
def set_reference_ene(rxn_lst, spc_dct,
pes_model_dct_i, spc_model_dct_i,
run_prefix, save_prefix, ref_idx=0):
""" Sets the reference species for the PES for which all energies
are scaled relative to.
"""
# Set the index for the reference species, right now defualt to 1st spc
ref_rxn = rxn_lst[ref_idx]
_, (ref_rgts, _) = ref_rxn
ioprinter.info_message(
'Determining the reference energy for PES...', newline=1)
ioprinter.info_message(
' - Reference species assumed to be the',
' first set of reactants on PES: {}'.format('+'.join(ref_rgts)))
# Get the model for the first reference species
ref_scheme = pes_model_dct_i['therm_fit']['ref_scheme']
ref_enes = pes_model_dct_i['therm_fit']['ref_enes']
ref_ene_level = spc_model_dct_i['ene']['lvl1'][0]
ioprinter.info_message(
' - Energy Level for Reference Species: {}'.format(ref_ene_level))
# Get the elec+zpe energy for the reference species
ioprinter.info_message('')
hf0k = 0.0
for rgt in ref_rgts:
ioprinter.info_message(' - Calculating energy for {}...'.format(rgt))
basis_dct, uniref_dct = thermfit.prepare_refs(
ref_scheme, spc_dct, (rgt,))
spc_basis, coeff_basis = basis_dct[rgt]
# Build filesystem
ene_spc, ene_basis = thmroutines.basis.basis_energy(
rgt, spc_basis, uniref_dct, spc_dct,
spc_model_dct_i, run_prefix, save_prefix)
# Calcualte the total energy
hf0k += thermfit.heatform.calc_hform_0k(
ene_spc, ene_basis, spc_basis, coeff_basis, ref_set=ref_enes)
return hf0k | 52c915060a869f41ee5262190dd7ffac79b1684b | 27,908 |
import torch
def get_laf_center(LAF: torch.Tensor) -> torch.Tensor:
"""Returns a center (keypoint) of the LAFs.
Args:
LAF: tensor [BxNx2x3].
Returns:
tensor BxNx2.
Shape:
- Input: :math: `(B, N, 2, 3)`
- Output: :math: `(B, N, 2)`
Example:
>>> input = torch.ones(1, 5, 2, 3) # BxNx2x3
>>> output = get_laf_center(input) # BxNx2
"""
raise_error_if_laf_is_not_valid(LAF)
out: torch.Tensor = LAF[..., 2]
return out | c172defe938c35e7f41616b48d9d6d3da21eb9d1 | 27,909 |
def get_all_vlan_bindings_by_logical_switch(context, record_dict):
"""Get Vlan bindings that match the supplied logical switch."""
query = context.session.query(models.VlanBindings)
return query.filter_by(
logical_switch_uuid=record_dict['logical_switch_id'],
ovsdb_identifier=record_dict['ovsdb_identifier']).all() | df88a52325e1bee59fae3b489a29ce8ee343d1fb | 27,910 |
def conv_input_length(output_length, filter_size, padding, stride):
"""Determines input length of a convolution given output length.
Args:
output_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The input length (integer).
"""
if output_length is None:
return None
assert padding in {'same', 'valid', 'full'}
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
return (output_length - 1) * stride - 2 * pad + filter_size | 88c80a77d3aee4050625aa080db5d9b246f9e920 | 27,911 |
def convert_data_to_int(x, y):
"""
Convert the provided data to integers, given a set of data with fully
populated values.
"""
# Create the new version of X
x_classes = []
for i in xrange(x.shape[1]):
x_classes.append({item:j for j, item in enumerate(set(x[:,i]))})
new_x = np.zeros(x.shape, dtype='i')
for i, xi in enumerate(x):
for j, xii in enumerate(xi):
new_x[i,j] = x_classes[j][xii]
# Create the new version of y
y_classes = {item:i for i, item in enumerate(set(y))}
new_y = np.zeros(y.shape, dtype='i')
for i, yi in enumerate(y):
new_y[i] = y_classes[yi]
return new_x, new_y | c8b3f017a34b68edf4f1740f8a3dd2130664ddae | 27,912 |
def send_report(report_text, svc_info, now_str):
"""
Publish report to AWS SNS endpoint
Note: publish takes a max of 256KB.
"""
overage = len(report_text) - MAX_SNS_MESSAGE
if overage > 0:
report_text = report_text[:-overage - 20] + '\n<message truncated/>'
resp = SNS_C.publish(TopicArn=svc_info['CowReportARN'],
Message=report_text,
Subject='CowCatcher Report for ' + now_str)
return resp | 9c48c3d7ba12e11cf3df944942803f36f6c23f52 | 27,913 |
def credential():
"""Return credential."""
return Credential('test@example.com', 'test_password') | 1da4e56abb87c9c5a0d0996d3a2911a23349321b | 27,914 |
def get_ez_from_contacts(xlsx_file, contacts_file, label_volume_file):
"""Return list of indices of EZ regions given by the EZ contacts in the patient spreadsheet"""
CONTACTS_IND = 6
EZ_IND = 7
df = pd.read_excel(xlsx_file, sheet_name="EZ hypothesis and EI", header=1)
ez_contacts = []
contacts_col = df.iloc[:, CONTACTS_IND]
mask = contacts_col.notnull()
contacts_names = contacts_col[mask]
ez_mask = df.iloc[:, EZ_IND][mask] == 'YES'
ez_contacts.extend(contacts_names[ez_mask])
contacts = Contacts(contacts_file)
label_vol = nib.load(label_volume_file)
ez_inds = []
for contact in ez_contacts:
coords = contacts.get_coords(contact)
region_ind = nifti.point_to_brain_region(
coords, label_vol, tol=3.0) - 1 # Minus one to account for the shift
if region_ind != -1:
ez_inds.append(region_ind)
return ez_inds | b3e4bfda0d0e9830b34012b7995082e90c9932a8 | 27,915 |
def mapfmt_str(fmt: str, size: int) -> str:
"""Same as mapfmt, but works on strings instead of bytes."""
if size == 4:
return fmt
return fmt.replace('i', 'q').replace('f', 'd') | af51b6ac65c80eef1721b64dcd8ee6a8bb5cbc97 | 27,916 |
import os
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Camera."""
# check for missing required configuration variable
if config.get("file_path") is None:
_LOGGER.error("Missing required variable: file_path")
return False
setup_config = (
{
"name": config.get("name", "Local File"),
"file_path": config.get("file_path")
}
)
# check filepath given is readable
if not os.access(setup_config["file_path"], os.R_OK):
_LOGGER.error("file path is not readable")
return False
add_devices([
LocalFile(setup_config)
]) | c1adac9e3ebb80993e0d8525d2aaa21e2ff78e7e | 27,917 |
def RandomImageDetection(rows=None, cols=None):
"""Return a uniform random color `vipy.image.ImageDetection` of size (rows, cols) with a random bounding box"""
rows = np.random.randint(128, 1024) if rows is None else rows
cols = np.random.randint(128, 1024) if cols is None else cols
return ImageDetection(array=np.uint8(255 * np.random.rand(rows, cols, 3)), colorspace='rgb', category='RandomImageDetection',
xmin=np.random.randint(0,cols - 16), ymin=np.random.randint(0,rows - 16),
bbwidth=np.random.randint(16,cols), bbheight=np.random.randint(16,rows)) | e7f06b32b771f3eb3c10d09e39bc4be4577d4233 | 27,918 |
def upsample(x, stride, target_len, separate_cls=True, truncate_seq=False):
"""
Upsample tensor `x` to match `target_len` by repeating the tokens `stride` time on the sequence length dimension.
"""
if stride == 1:
return x
if separate_cls:
cls = x[:, :1]
x = x[:, 1:]
output = tf.repeat(x, repeats=stride, axis=1)
if separate_cls:
if truncate_seq:
output = tf.pad(output, [[0, 0], [0, stride - 1], [0, 0]])
output = output[:, : target_len - 1]
output = tf.concat([cls, output], axis=1)
else:
output = output[:, :target_len]
return output | 716c94cb365144e65a6182e58c39284375c8f700 | 27,919 |
import numpy
def torsional_scan_linspaces(zma, tors_names, increment=0.5,
frm_bnd_key=None, brk_bnd_key=None):
""" scan grids for torsional dihedrals
"""
sym_nums = torsional_symmetry_numbers(
zma, tors_names, frm_bnd_key=frm_bnd_key, brk_bnd_key=brk_bnd_key)
intervals = tuple(2*numpy.pi/sym_num - increment for sym_num in sym_nums)
npoints_lst = tuple(
(int(interval / increment)+1) for interval in intervals)
return tuple((0, interval, npoints)
for interval, npoints in zip(intervals, npoints_lst)) | a2dcd4ec57ae598a42c25102db89af331e6f8a40 | 27,920 |
import ctypes
def get_output_to_console(p_state):
"""Returns a bool indicating whether the Log is output to the console."""
return bool(_Get_Output_To_Console(ctypes.c_void_p(p_state))) | 71599b5a2e4b2708d6e8d5fa003acc89cd0d030c | 27,921 |
def check_valid_column(observation):
"""
Validates that our observation only has valid columns
Returns:
- assertion value: True if all provided columns are valid, False otherwise
- error message: empty if all provided columns are valid, False otherwise
"""
valid_columns = {
"observation_id",
"Type",
"Date",
"Part of a policing operation",
"Latitude",
"Longitude",
"Gender",
"Age range",
"Officer-defined ethnicity",
"Legislation",
"Object of search",
"station"
}
keys = set(observation.keys())
if len(valid_columns - keys) > 0:
missing = valid_columns - keys
error = "Missing columns: {}".format(missing)
return False, error
if len(keys - valid_columns) > 0:
extra = keys - valid_columns
error = "Unrecognized columns provided: {}".format(extra)
return False, error
return True, "" | 104fc6646a5e4d978b2a0cec4322c6f275b82f42 | 27,922 |
import os
def _params_to_filename(job_id, run_id, extra_run_id):
"""
Prepare file name based on params.
Args:
job_id (str): Job Uid
run_id (str): Job Run Uid
extra_run_id (str): Extra Job Uid
Returns:
str: filenames
"""
with current_app.app_context():
_logs_store_dir = current_app.config.get('LOG_STORE_FOLDER', f"/tmp/")
try:
if not os.path.isdir(_logs_store_dir):
os.makedirs(_logs_store_dir, exist_ok=True)
except OSError as error:
logger.info('Directory {} can not be created due {}'.format(
_logs_store_dir, error))
jobs_logs_path = f"{_logs_store_dir}/{job_id}_{run_id}_{extra_run_id}.log"
return jobs_logs_path | fde7906dda0fb0146b0e06171098ce0f862b4162 | 27,923 |
def w_getopt(args, options):
"""A getopt for Windows.
Options may start with either '-' or '/', the option names may
have more than one letter (/tlb or -RegServer), and option names
are case insensitive.
Returns two elements, just as getopt.getopt. The first is a list
of (option, value) pairs in the same way getopt.getopt does, but
there is no '-' or '/' prefix to the option name, and the option
name is always lower case. The second is the list of arguments
which do not belong to an option.
Different from getopt.getopt, a single argument not belonging to an option
does not terminate parsing.
"""
opts = []
arguments = []
while args:
if args[0][:1] in "/-":
arg = args[0][1:] # strip the '-' or '/'
arg = arg.lower()
if arg + ':' in options:
try:
opts.append((arg, args[1]))
except IndexError:
raise GetoptError("option '%s' requires an argument" % args[0])
args = args[1:]
elif arg in options:
opts.append((arg, ''))
else:
raise GetoptError("invalid option '%s'" % args[0])
args = args[1:]
else:
arguments.append(args[0])
args = args[1:]
return opts, arguments | 34095675fa95cbc1c8474a7253b4d49a2e947dc0 | 27,924 |
def get_alt_for_density(density: float, density_units: str='slug/ft^3',
alt_units: str='ft', nmax: int=20, tol: float=5.) -> float:
"""
Gets the altitude associated with a given air density.
Parameters
----------
density : float
the air density in slug/ft^3
density_units : str; default='slug/ft^3'
the density units; slug/ft^3, slinch/in^3, kg/m^3
alt_units : str; default='ft'
sets the units for the output altitude; ft, m, kft
nmax : int; default=20
max number of iterations for convergence
tol : float; default=5.
tolerance in alt_units
Returns
-------
alt : float
the altitude in feet
"""
tol = convert_altitude(tol, alt_units, 'ft')
dalt = 500. # ft
alt_old = 0.
alt_final = 5000.
n = 0
#density_scale = _density_factor(density_units, "slug/ft^3")
# Newton's method
while abs(alt_final - alt_old) > tol and n < nmax:
alt_old = alt_final
alt1 = alt_old
alt2 = alt_old + dalt
rho1 = atm_density(alt1, density_units=density_units)
rho2 = atm_density(alt2, density_units=density_units)
m = dalt / (rho2 - rho1)
alt_final = m * (density - rho1) + alt1
n += 1
if abs(alt_final - alt_old) > tol:
raise RuntimeError('Did not converge; Check your units; n=nmax=%s\n'
'target alt=%s alt_current=%s' % (nmax, alt_final, alt1))
alt_out = convert_altitude(alt_final, 'ft', alt_units)
return alt_out | 68243ec75bbe8989e7a9fd63fe6a1635da222cae | 27,925 |
from datetime import datetime
def parse_date_string(date: str) -> datetime:
"""Converts date as string (e.g. "2004-05-25T02:19:28Z") to UNIX timestamp (uses UTC, always)
"""
# https://docs.python.org/3.6/library/datetime.html#strftime-strptime-behavior
# http://strftime.org/
parsed = datetime.strptime(date, '%Y-%m-%dT%H:%M:%SZ') # string parse time
# now apply UTC timezone
return parsed.replace(tzinfo=timezone.utc) | 624e92ceab996d7cfded7c7989e716fbba7abd5e | 27,926 |
def levenshtein_distance(s, t, ratio_calc = False):
""" levenshtein_distance:
Calculates levenshtein distance between two strings.
If ratio_calc = True, the function computes the
levenshtein distance ratio of similarity between two strings
For all i and j, distance[i,j] will contain the Levenshtein
distance between the first i characters of s and the
first j characters of t
"""
# Initialize matrix of zeros
rows = len(s) + 1
cols = len(t) + 1
distance = np.zeros((rows,cols), dtype = int)
# Populate matrix of zeros with the indeces of each character of both strings
for i in range(1, rows):
for k in range(1,cols):
distance[i][0] = i
distance[0][k] = k
# Iterate over the matrix to compute the cost of deletions,insertions and/or substitutions
for col in range(1, cols):
for row in range(1, rows):
if s[row - 1] == t[col - 1]:
cost = 0 # If the characters are the same in the two strings in a given position [i,j] then the cost is 0
else:
# In order to align the results with those of the Python Levenshtein package, if we choose to calculate the ratio
# the cost of a substitution is 2. If we calculate just distance, then the cost of a substitution is 1.
if ratio_calc:
cost = 2
else:
cost = 1
distance[row][col] = min(distance[row-1][col] + 1, # Cost of deletions
distance[row][col-1] + 1, # Cost of insertions
distance[row-1][col-1] + cost) # Cost of substitutions
if ratio_calc:
# Computation of the Levenshtein Distance Ratio
ratio = ((len(s)+len(t)) - distance[row][col]) / (len(s)+len(t))
return ratio
else:
# print(distance) # Uncomment if you want to see the matrix showing how the algorithm computes the cost of deletions,
# insertions and/or substitutions
# This is the minimum number of edits needed to convert string a to string b
return distance[row][col] | 670196344e33bd4c474c0b24b306c9fe3d7e093b | 27,927 |
def no_warnings(func):
""" Decorator to run R functions without warning. """
def run_withoutwarnings(*args, **kwargs):
warn_i = _options().do_slot('names').index('warn')
oldwarn = _options()[warn_i][0]
_options(warn=-1)
try:
res = func(*args, **kwargs)
except Exception as e:
# restore the old warn setting before propagating
# the exception up
_options(warn=oldwarn)
raise e
_options(warn=oldwarn)
return res
return run_withoutwarnings | 52831940551c324b6e9624af0df28cc2442bac2b | 27,928 |
def ParseKindsAndSizes(kinds):
"""Parses kind|size list and returns template parameters.
Args:
kinds: list of kinds to process.
Returns:
sizes_known: whether or not all kind objects have known sizes.
size_total: total size of objects with known sizes.
len(kinds) - 2: for template rendering of greater than 3 kinds.
"""
sizes_known = True
size_total = 0
kinds_and_sizes = RetrieveCachedStats()
if kinds_and_sizes:
for kind in kinds:
if kind in kinds_and_sizes:
size_total += kinds_and_sizes[kind]
else:
sizes_known = False
else:
sizes_known = False
if size_total:
size_total = GetPrettyBytes(size_total)
return sizes_known, size_total, len(kinds) - 2 | 7f94fd099ea2f28070fe499288f62d1c0b57cce9 | 27,929 |
def load_3D(path,
n_sampling=10000,
voxelize=True,
voxel_mode="binary",
target_size=(30, 30, 30)):
"""Load 3D data into numpy array, optionally voxelizing it.
Parameters
----------
path : srt
Path to 3D file.
n_sampling : int
Number of points to be sampled in case the read 3D data contains a mesh.
voxelize : bool, optional (Default True)
Indicates whether the 3D data will be converted into voxelgrid or not.
voxel_mode : {"binary", "density", "TDF"}, optional (Default "binary")
The type of feature vector that will be generated from the voxelgrid.
binary
0 for empty voxels, 1 for occupied.
density
number of points inside voxel / total number of points.
TDF
Truncated Distance Function. Value between 0 and 1 indicating the distance
between the voxel's center and the closest point. 1 on the surface,
0 on voxels further than 2 * voxel side.
target_size : [int, int, int], optional (Default [30, 30, 30])
Dimensions of voxelgrid in case voxelize is True.
Returns
-------
feature_vector : ndarray
(target_size[0], target_size[1], target_size[2])
Raises
------
ValueError: if 3D format is not valid.
"""
point_cloud = PyntCloud.from_file(path)
if point_cloud.mesh is not None:
point_cloud = PyntCloud(point_cloud.get_sample(
"mesh_random", n=n_sampling))
if voxelize:
vgrid_id = point_cloud.add_structure("voxelgrid", x_y_z=target_size)
voxelgrid = point_cloud.structures[vgrid_id]
if voxel_mode == "binary":
feature_vector = voxelgrid.get_feature_vector(mode="binary")
elif voxel_mode == "density":
feature_vector = voxelgrid.get_feature_vector(mode="density")
elif voxel_mode == "TDF":
feature_vector = voxelgrid.get_feature_vector(mode="TDF")
else:
raise ValueError("Invalid mode; available modes are: {}".format(
{"binary", "density", "TDF"}))
# add fake channel
return feature_vector[None, ...]
else:
return point_cloud | eec6614b2675faa61d9a09b8ff3a491580302a91 | 27,930 |
import array
def create_vector2d(vec):
"""Returns a vector as a numpy array."""
return array([vec[0],vec[1]]) | 0b3cdc81f3744c54dea8aab0ee28743134ff1d42 | 27,931 |
def get_chrom_start_end_from_string(s):
"""Get chrom name, int(start), int(end) from a string '{chrom}__substr__{start}_{end}'
...doctest:
>>> get_chrom_start_end_from_string('chr01__substr__11838_13838')
('chr01', 11838, 13838)
"""
try:
chrom, s_e = s.split('__substr__')
start, end = s_e.split('_')
return chrom, int(start), int(end)
except Exception:
raise ValueError("String %s must be of format '{chrom}__substr__{start}_{end}'" % s) | 5dbce8eb33188c7f06665cf92de455e1c705f38b | 27,932 |
def Remove_Invalid_Tokens(tokenized_sentence, invalidating_symbols):
"""
Returns a tokenized sentence without tokens that include
invalidating_symbols
"""
valid_tokens_sentence = [] + tokenized_sentence # forcing a copy, avoid pass by reference
for token in tokenized_sentence:
for invalid_symbol in invalidating_symbols.split():
if invalid_symbol in token:
valid_tokens_sentence.remove(token)
return valid_tokens_sentence | 931858685c6c405de5e0b4755ec0a26a672be3b0 | 27,933 |
def _get_protocol(url):
"""
Get the port of a url.
Default port is 80. A specified port
will come after the first ':' and before the next '/'
"""
if url.find('http://') == 0:
return 'http'
elif url.find('https://') == 0:
return 'https'
else:
return 'http' | 42b2750148829154f17e34a2cebccf4387f07f25 | 27,934 |
import warnings
def convert_spectral_axis(mywcs, outunit, out_ctype, rest_value=None):
"""
Convert a spectral axis from its unit to a specified out unit with a given output
ctype
Only VACUUM units are supported (not air)
Process:
1. Convert the input unit to its equivalent linear unit
2. Convert the input linear unit to the output linear unit
3. Convert the output linear unit to the output unit
"""
# If the WCS includes a rest frequency/wavelength, convert it to frequency
# or wavelength first. This allows the possibility of changing the rest
# frequency
wcs_rv = get_rest_value_from_wcs(mywcs)
inunit = u.Unit(mywcs.wcs.cunit[mywcs.wcs.spec])
outunit = u.Unit(outunit)
# If wcs_rv is set and speed -> speed, then we're changing the reference
# location and we need to convert to meters or Hz first
if ((parse_phys_type(inunit) == 'speed' and
parse_phys_type(outunit) == 'speed' and
wcs_rv is not None)):
mywcs = convert_spectral_axis(mywcs, wcs_rv.unit,
ALL_CTYPES[parse_phys_type(wcs_rv.unit)],
rest_value=wcs_rv)
inunit = u.Unit(mywcs.wcs.cunit[mywcs.wcs.spec])
elif (parse_phys_type(inunit) == 'speed' and parse_phys_type(outunit) == 'speed'
and wcs_rv is None):
# If there is no reference change, we want an identical WCS, since
# WCS doesn't know about units *at all*
newwcs = mywcs.deepcopy()
return newwcs
#crval_out = (mywcs.wcs.crval[mywcs.wcs.spec] * inunit).to(outunit)
#cdelt_out = (mywcs.wcs.cdelt[mywcs.wcs.spec] * inunit).to(outunit)
#newwcs.wcs.cdelt[newwcs.wcs.spec] = cdelt_out.value
#newwcs.wcs.cunit[newwcs.wcs.spec] = cdelt_out.unit.to_string(format='fits')
#newwcs.wcs.crval[newwcs.wcs.spec] = crval_out.value
#newwcs.wcs.ctype[newwcs.wcs.spec] = out_ctype
#return newwcs
in_spec_ctype = mywcs.wcs.ctype[mywcs.wcs.spec]
# Check whether we need to convert the rest value first
ref_value = None
if 'speed' in parse_phys_type(outunit):
if rest_value is None:
rest_value = wcs_rv
if rest_value is None:
raise ValueError("If converting from wavelength/frequency to speed, "
"a reference wavelength/frequency is required.")
ref_value = rest_value.to(u.Hz, u.spectral())
elif 'speed' in parse_phys_type(inunit):
# The rest frequency and wavelength should be equivalent
if rest_value is not None:
ref_value = rest_value
elif wcs_rv is not None:
ref_value = wcs_rv
else:
raise ValueError("If converting from speed to wavelength/frequency, "
"a reference wavelength/frequency is required.")
# If the input unit is not linearly sampled, its linear equivalent will be
# the 8th character in the ctype, and the linearly-sampled ctype will be
# the 6th character
# e.g.: VOPT-F2V
lin_ctype = (in_spec_ctype[7] if len(in_spec_ctype) > 4 else in_spec_ctype[:4])
lin_cunit = (LINEAR_CUNIT_DICT[lin_ctype] if lin_ctype in LINEAR_CUNIT_DICT
else mywcs.wcs.cunit[mywcs.wcs.spec])
in_vcequiv = _parse_velocity_convention(in_spec_ctype[:4])
out_ctype_conv = out_ctype[7] if len(out_ctype) > 4 else out_ctype[:4]
if CTYPE_TO_PHYSICALTYPE[out_ctype_conv] == 'air wavelength':
raise NotImplementedError("Conversion to air wavelength is not supported.")
out_lin_cunit = (LINEAR_CUNIT_DICT[out_ctype_conv] if out_ctype_conv in
LINEAR_CUNIT_DICT else outunit)
out_vcequiv = _parse_velocity_convention(out_ctype_conv)
# Load the input values
crval_in = (mywcs.wcs.crval[mywcs.wcs.spec] * inunit)
# the cdelt matrix may not be correctly populated: need to account for cd,
# cdelt, and pc
cdelt_in = (mywcs.pixel_scale_matrix[mywcs.wcs.spec, mywcs.wcs.spec] *
inunit)
if in_spec_ctype == 'AWAV':
warnings.warn("Support for air wavelengths is experimental and only "
"works in the forward direction (air->vac, not vac->air).",
ExperimentalImplementationWarning
)
cdelt_in = air_to_vac_deriv(crval_in) * cdelt_in
crval_in = air_to_vac(crval_in)
in_spec_ctype = 'WAVE'
# 1. Convert input to input, linear
if in_vcequiv is not None and ref_value is not None:
crval_lin1 = crval_in.to(lin_cunit, u.spectral() + in_vcequiv(ref_value))
else:
crval_lin1 = crval_in.to(lin_cunit, u.spectral())
cdelt_lin1 = cdelt_derivative(crval_in,
cdelt_in,
# equivalent: inunit.physical_type
intype=CTYPE_TO_PHYSICALTYPE[in_spec_ctype[:4]],
outtype=parse_phys_type(lin_cunit),
rest=ref_value,
linear=True
)
# 2. Convert input, linear to output, linear
if ref_value is None:
if in_vcequiv is not None:
pass # consider raising a ValueError here; not clear if this is valid
crval_lin2 = crval_lin1.to(out_lin_cunit, u.spectral())
else:
# at this stage, the transition can ONLY be relativistic, because the V
# frame (as a linear frame) is only defined as "apparent velocity"
crval_lin2 = crval_lin1.to(out_lin_cunit, u.spectral() +
u.doppler_relativistic(ref_value))
# For cases like VRAD <-> FREQ and VOPT <-> WAVE, this will be linear too:
linear_middle = in_vcequiv == out_vcequiv
cdelt_lin2 = cdelt_derivative(crval_lin1, cdelt_lin1,
intype=parse_phys_type(lin_cunit),
outtype=CTYPE_TO_PHYSICALTYPE[out_ctype_conv],
rest=ref_value,
linear=linear_middle)
# 3. Convert output, linear to output
if out_vcequiv is not None and ref_value is not None:
crval_out = crval_lin2.to(outunit, out_vcequiv(ref_value) + u.spectral())
#cdelt_out = cdelt_lin2.to(outunit, out_vcequiv(ref_value) + u.spectral())
cdelt_out = cdelt_derivative(crval_lin2,
cdelt_lin2,
intype=CTYPE_TO_PHYSICALTYPE[out_ctype_conv],
outtype=parse_phys_type(outunit),
rest=ref_value,
linear=True
).to(outunit)
else:
crval_out = crval_lin2.to(outunit, u.spectral())
cdelt_out = cdelt_lin2.to(outunit, u.spectral())
if crval_out.unit != cdelt_out.unit:
# this should not be possible, but it's a sanity check
raise ValueError("Conversion failed: the units of cdelt and crval don't match.")
# A cdelt of 0 would be meaningless
if cdelt_out.value == 0:
raise ValueError("Conversion failed: the output CDELT would be 0.")
newwcs = mywcs.deepcopy()
if hasattr(newwcs.wcs,'cd'):
newwcs.wcs.cd[newwcs.wcs.spec, newwcs.wcs.spec] = cdelt_out.value
# todo: would be nice to have an assertion here that no off-diagonal
# values for the spectral WCS are nonzero, but this is a nontrivial
# check
else:
newwcs.wcs.cdelt[newwcs.wcs.spec] = cdelt_out.value
newwcs.wcs.cunit[newwcs.wcs.spec] = cdelt_out.unit.to_string(format='fits')
newwcs.wcs.crval[newwcs.wcs.spec] = crval_out.value
newwcs.wcs.ctype[newwcs.wcs.spec] = out_ctype
if rest_value is not None:
if parse_phys_type(rest_value.unit) == 'frequency':
newwcs.wcs.restfrq = rest_value.to(u.Hz).value
elif parse_phys_type(rest_value.unit) == 'length':
newwcs.wcs.restwav = rest_value.to(u.m).value
else:
raise ValueError("Rest Value was specified, but not in frequency or length units")
return newwcs | 3a2b041a128aeeee3b163f66b07cd6f2241702f5 | 27,935 |
def row_to_str(row):
"""Convert a df row to a string for insert into SQL database."""
return str(list(row)).replace("[", "(").replace("]", ")") | fb2b0d598604a124b948f884a6839a40af1203fc | 27,936 |
def interpolate_affines(affines):
"""
"""
# get block grid
block_grid = affines.shape[:3]
# construct an all identities matrix for comparison
all_identities = np.empty_like(affines)
for i in range(np.prod(block_grid)):
idx = np.unravel_index(i, block_grid)
all_identities[idx] = np.eye(4)
# if affines are all identity, just return
if np.all(affines == all_identities):
return affines
# process continues until there are no identity matrices left
new_affines = np.copy(affines)
identities = True
while identities:
identities = False
# loop over all affine matrices
for i in range(np.prod(block_grid)):
idx = np.unravel_index(i, block_grid)
# if an identity matrix is found
if np.all(new_affines[idx] == np.eye(4)):
identities = True
trans, denom = np.array([0, 0, 0]), 0
# average translations from 6 connected neighborhood
for ax in range(3):
if idx[ax] > 0:
neighbor = tuple(
x-1 if j == ax else x for j, x in enumerate(idx)
)
neighbor_trans = new_affines[neighbor][:3, -1]
if not np.all(neighbor_trans == 0):
trans = trans + neighbor_trans
denom += 1
if idx[ax] < block_grid[ax]-1:
neighbor = tuple(
x+1 if j == ax else x for j, x in enumerate(idx)
)
neighbor_trans = new_affines[neighbor][:3, -1]
if not np.all(neighbor_trans == 0):
trans = trans + neighbor_trans
denom += 1
# normalize then update matrix
if denom > 0: trans /= denom
new_affines[idx][:3, -1] = trans
return new_affines | 880ea993634a6c4725d02365d75e79705175c2e5 | 27,937 |
import tqdm
def getLineMeasures(file_list, orders, names, err_cut=0):
"""
Find line center (in pixels) to match order/mode lines
"""
# Load in x values to match order/mode lines
x_values = np.empty((len(file_list),len(orders)))
x_values[:] = np.nan # want default empty to be nan
x_errors = np.empty((len(file_list),len(orders)))
x_errors[:] = np.nan
pd_keys = pd.DataFrame({'orders':orders.copy().astype(int),
'names':names.copy().astype(str)})
for file_num in tqdm(range(len(file_list))):
# Load in line fit information
file_name = file_list[file_num]
try:
x,m,w,e = readFile(file_name)
m = m.astype(int)
if err_cut > 0:
mask = e < err_cut
x = x[mask]
m = m[mask]
w = w[mask]
e = e[mask]
except ValueError:
continue
# Identify which lines this exposure has
for nord in np.unique(m):
I = m==nord # Mask for an order
# Get identifying names: "(nord, wavelength string)"
n = ["{0:09.3f}".format(wave) for wave in w[I]]
xvl_dict = dict(zip(n,x[I]))
err_dict = dict(zip(n,e[I]))
ord_xval = np.array(pd_keys[pd_keys.orders==nord].names.map(xvl_dict))
ord_errs = np.array(pd_keys[pd_keys.orders==nord].names.map(err_dict))
x_values[file_num,pd_keys.orders==nord] = ord_xval
x_errors[file_num,pd_keys.orders==nord] = ord_errs
return x_values, x_errors | b13fe9f46457f7d289d09ffba3b769fe4e1c700e | 27,938 |
import os
def is_slackware():
"""
Checks if we are running on a Slackware system.
:returns: **bool** to indicate if we're on a Slackware system
"""
return os.path.exists('/etc/slackware-version') | 69f8acb73317345e19b81b5deec8370784cc11aa | 27,939 |
def signature_exempt(view_func):
"""Mark a view function as being exempt from signature and apikey check."""
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.signature_exempt = True
return wraps(view_func)(wrapped_view) | f564ad0ce20e6e2b7ae760c5f50a297f587006d4 | 27,940 |
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_tridentnet_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# if args.eval_only:
# cfg.MODEL.WEIGHTS = "/root/detectron2/projects/TridentNet/log_80_20/model_0029999.pth"
cfg.freeze()
default_setup(cfg, args)
return cfg | 13d30557537c7e7d18811e016c0eaf43602f1ef2 | 27,941 |
def get_intersections(line, potential_lines, nodes, precision):
"""
Get the intersection points between the lines defined by two planes
and the lines defined by the cost area (x=0, y=0, y=-x+1)
and the lines defined by the possible combinations of predictors
"""
slope, intercept = line
# Intersection with y axis when x=0
point = get_intersection_point_yaxis(intercept)
if point:
nodes[point].update({line, 'y_axis'})
# Intersection with x axis when y=0
point = get_intersection_point_xaxis(slope, intercept, precision)
if point:
nodes[point].update({line, 'x_axis'})
# Intersection with y=-x+1 line
point = get_intersection_point(slope, intercept, -1, 1, precision)
if point:
nodes[point].update({line, 'hypotenuse'})
# Intersection with other lines
nodes = get_intersection_with_lines(slope, intercept, potential_lines, nodes, precision)
return nodes | 3e9811bee159f6c550dea5784754e08bc65624d7 | 27,942 |
def subtract_loss_from_gain(gain_load, loss_load):
"""Create a single DataCollection from gains and losses."""
total_loads = []
for gain, loss in zip(gain_load, loss_load):
total_load = gain - loss
total_load.header.metadata['type'] = \
total_load.header.metadata['type'].replace('Gain ', '')
total_loads.append(total_load)
return total_loads | b53044b802a8ea13befdde850a478c435b0370ef | 27,943 |
def zero_out_noisy_epochs(psg, sample_rate, period_length_sec,
max_times_global_iqr=20):
"""
Sets all values in a epoch of 'period_length_sec' seconds of signal to zero
(channel-wise) if any (absolute) value within that period exceeds
'max_times_global_iqr' times the IQR of all data in the channel across time
Args:
psg: A ndarray of shape [N, C] of PSG data
sample_rate: The sample rate of data in the PSG
period_length_sec: The length of one epoch/period/segment in seconds
max_times_global_iqr: Extreme value threshold; number of times a value
in a channel must exceed the global IQR for that
channel for it to be termed an outlier.
Returns:
PSG, ndarray of shape [N, C]
A list of lists, one sub-list for each channel, each storing indices
of all epochs that were set to zero.
"""
n_channels = psg.shape[-1]
chan_inds = []
for chan in range(n_channels):
chan_psg = psg[..., chan]
# Compute global IQR
iqr = np.subtract(*np.percentile(chan_psg, [75, 25]))
threshold = iqr * max_times_global_iqr
# Reshape PSG to periods on 0th axis
n_periods = int(chan_psg.shape[0]/(sample_rate*period_length_sec))
chan_psg = chan_psg.reshape(n_periods, -1)
# Compute IQR for all epochs
inds = np.unique(np.where(np.abs(chan_psg) > threshold)[0])
# Zero out noisy epochs in the particular channel
chan_psg[inds] = 0.
psg[:, chan] = np.reshape(chan_psg, [-1])
chan_inds.append(inds)
return psg, chan_inds | 427e2652a2e595bd0b25c3a30d35e088a9b0562b | 27,944 |
import subprocess
def reduce_language_model(language_model_path, out_language_model_path):
"""
TODO DOCUMENTATION
:param language_model_path:
:param out_language_model_path:
:return:
"""
command = 'sphinx_lm_convert'
script = [command,
'-i', language_model_path,
'-o', out_language_model_path,
]
p = subprocess.call(script)
if p != 0:
raise Exception('Error in language model reduction')
return out_language_model_path | 6c4e9ffd4b7221c65834e90573ac0a85d0bb47a4 | 27,945 |
from typing import Tuple
from typing import List
def start_training(channel: Channel) -> Tuple[List[ndarray], int, int]:
"""Start a training initiation exchange with a coordinator.
The decoded contents of the response from the coordinator are returned.
Args:
channel (~grpc.Channel): A gRPC channel to the coordinator.
Returns:
~typing.List[~numpy.ndarray]: The weights of a global model to train on.
int: The number of epochs to train.
int: The epoch base of the global model.
"""
coordinator: CoordinatorStub = CoordinatorStub(channel=channel)
# send request to start training
reply: StartTrainingReply = coordinator.StartTraining(
request=StartTrainingRequest()
)
logger.info("Participant received reply", reply=type(reply))
weights: List[ndarray] = [proto_to_ndarray(weight) for weight in reply.weights]
epochs: int = reply.epochs
epoch_base: int = reply.epoch_base
return weights, epochs, epoch_base | 70ac5b32b58df84cd386cc820f18a8fe2667d620 | 27,946 |
import re
def has_forbidden(mylist) -> bool:
""" Does the string contain one of the forbidden substrings "ab" "cd" "pq"
"xy"? """
return bool(re.search(FORBIDDEN, mylist)) | 848fb1270ba99f40ef1ff0e23296f76895a5484d | 27,947 |
from main import PAGLuxembourg
def classFactory(iface): # pylint: disable=invalid-name
"""Load PagLuxembourg class from file PagLuxembourg.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
return PAGLuxembourg(iface) | 9ff71fbc9f915435da660861ee9023026dfb2e48 | 27,948 |
def is_official_target(target_name, version):
""" Returns True, None if a target is part of the official release for the
given version. Return False, 'reason' if a target is not part of the
official release for the given version.
target_name: Name if the target (ex. 'K64F')
version: The release version string. Should be a string contained within RELEASE_VERSIONS
"""
result = True
reason = None
target = TARGET_MAP[target_name]
if hasattr(target, 'release_versions') and version in target.release_versions:
if version == '2':
# For version 2, either ARM or uARM toolchain support is required
required_toolchains = set(['ARM', 'uARM'])
if not len(required_toolchains.intersection(set(target.supported_toolchains))) > 0:
result = False
reason = ("Target '%s' must support " % target.name) + \
("one of the folowing toolchains to be included in the mbed 2.0 ") + \
(("official release: %s" + linesep) % ", ".join(required_toolchains)) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" % ", ".join(target.supported_toolchains))
elif version == '5':
# For version 5, ARM, GCC_ARM, and IAR toolchain support is required
required_toolchains = set(['ARM', 'GCC_ARM', 'IAR'])
required_toolchains_sorted = list(required_toolchains)
required_toolchains_sorted.sort()
supported_toolchains = set(target.supported_toolchains)
supported_toolchains_sorted = list(supported_toolchains)
supported_toolchains_sorted.sort()
if not required_toolchains.issubset(supported_toolchains):
result = False
reason = ("Target '%s' must support " % target.name) + \
("ALL of the folowing toolchains to be included in the mbed OS 5.0 ") + \
(("official release: %s" + linesep) % ", ".join(required_toolchains_sorted)) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" % ", ".join(supported_toolchains_sorted))
elif not target.default_build == 'standard':
result = False
reason = ("Target '%s' must set the 'default_build' " % target.name) + \
("to 'standard' to be included in the mbed OS 5.0 ") + \
("official release." + linesep) + \
("Currently it is set to '%s'" % target.default_build)
else:
result = False
reason = ("Target '%s' has set an invalid release version of '%s'" % version) + \
("Please choose from the following release versions: %s" + ', '.join(RELEASE_VERSIONS))
else:
result = False
if not hasattr(target, 'release_versions'):
reason = "Target '%s' does not have the 'release_versions' key set" % target.name
elif not version in target.release_versions:
reason = "Target '%s' does not contain the version '%s' in its 'release_versions' key" % (target.name, version)
return result, reason | 4cd8a2e3735aa91cd66204568c70e645e6f2f8ed | 27,949 |
from typing import Dict
from typing import Union
from typing import Any
def _convert_to_dict_or_str(elements_map: Dict[str, Element]) -> Dict[str, Union[str, Dict[Any, Any]]]:
"""Combines a dictionary of xml elements into a dictionary of dicts or str"""
return {
key: XmlDictElement(value) if value or value.items() else value.text
for key, value in elements_map.items()
} | a68d1043abb995a632209528a52416a8a4661b58 | 27,950 |
def vsi_tecaji():
"""
Funkcija vrne vse tečaje PGD Hrušica.
"""
poizvedba = """
SELECT id, naziv
FROM tecaji
"""
tecaji = conn.execute(poizvedba).fetchall()
return tecaji | c4e9e9a9422920b38d4dce51a27d4db142c50f90 | 27,951 |
def cosine(u, v, dim=-1):
"""cosine similarity"""
return (u * v).sum(dim=dim) / (u.norm(dim=dim, p=2) * v.norm(dim=dim, p=2)) | 2d2a5a02ce20f6ae37dbefa3c8f9399aef2da8ad | 27,952 |
from datetime import datetime
def retrive_cache_data(format: str, query: str) -> dict[int, Book]:
"""
Retrive the cached data for the query.
"""
date_now = datetime.now()
# save the search results in the cache if not already there or if the cache is expired
if format not in book_cache:
book_cache[format] = {}
if query not in book_cache[format] or book_cache[format][query]["expires"] < date_now:
book_cache[format][query] = {}
book_cache[format][query]["expires"] = date_now + timedelta(hours=1)
book_cache[format][query]["data"] = {
k + 1: {"book": v, "download_url": None, "cover_url": None}
for k, v in enumerate(
Libgen.search_title(query, {"extension": format} if format != "all" else {})
)
}
return book_cache[format][query].get("data") | 885532f94f1e4b28350d3854ada867f42d386d5f | 27,953 |
def get_orders_loadings_palette_number(client_orders, value):
"""
Searches OrdersLoadingPlaces objects linked with client orders and that contain value
:param client_orders: orders of profile
:param value: value of palettes number for which OrdersLoadingPlaces objects are searching
:return: List with OrdersLoadingPlaces objects or empty list
"""
orders_loadings = list()
for client_order in client_orders:
orders_loading_temp = OrdersLoadingPlaces.objects.filter(order=client_order)
for order_loading in orders_loading_temp:
if order_loading.loading_place.palette_number and order_loading.loading_place.palette_number.pallets_number == value:
orders_loadings.append(order_loading)
return orders_loadings | c145997418722e84da5d7420fbba6a8167737f96 | 27,954 |
import math
def rotmat(x=0,y=0,z=0):
"""Rotation Matrix function
This function creates and returns a rotation matrix.
Parameters
----------
x,y,z : float, optional
Angle, which will be converted to radians, in
each respective axis to describe the rotations.
The default is 0 for each unspecified angle.
Returns
-------
Rxyz : list
The product of the matrix multiplication.
Examples
--------
>>> import numpy as np
>>> from .pyCGM import rotmat
>>> x = 0.5
>>> y = 0.3
>>> z = 0.8
>>> np.around(rotmat(x,y,z),8)
array([[ 0.99988882, -0.01396199, 0.00523596],
[ 0.01400734, 0.99986381, -0.00872642],
[-0.00511341, 0.00879879, 0.99994822]])
>>> x = 0.5
>>> np.around(rotmat(x),8)
array([[ 1. , 0. , 0. ],
[ 0. , 0.99996192, -0.00872654],
[ 0. , 0.00872654, 0.99996192]])
>>> x = 1
>>> y = 1
>>> np.around(rotmat(x,y),8)
array([[ 9.9984770e-01, 0.0000000e+00, 1.7452410e-02],
[ 3.0459000e-04, 9.9984770e-01, -1.7449750e-02],
[-1.7449750e-02, 1.7452410e-02, 9.9969541e-01]])
"""
x = math.radians(x)
y = math.radians(y)
z = math.radians(z)
Rx = [ [1,0,0],[0,math.cos(x),math.sin(x)*-1],[0,math.sin(x),math.cos(x)] ]
Ry = [ [math.cos(y),0,math.sin(y)],[0,1,0],[math.sin(y)*-1,0,math.cos(y)] ]
Rz = [ [math.cos(z),math.sin(z)*-1,0],[math.sin(z),math.cos(z),0],[0,0,1] ]
Rxy = matrixmult(Rx,Ry)
Rxyz = matrixmult(Rxy,Rz)
Ryx = matrixmult(Ry,Rx)
Ryxz = matrixmult(Ryx,Rz)
return Rxyz | f1b31916abb78f3f47c324c3341d06338f6e4787 | 27,955 |
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
:param m:
:param d:
:return:
"""
m.Horizon_Energy_Target_Shortage_MWh = Var(
m.ENERGY_TARGET_ZONE_BLN_TYPE_HRZS_WITH_ENERGY_TARGET,
within=NonNegativeReals
)
def violation_expression_rule(mod, z, bt, h):
return mod.Horizon_Energy_Target_Shortage_MWh[z, bt, h] \
* mod.energy_target_allow_violation[z]
m.Horizon_Energy_Target_Shortage_MWh_Expression = Expression(
m.ENERGY_TARGET_ZONE_BLN_TYPE_HRZS_WITH_ENERGY_TARGET,
rule=violation_expression_rule
)
def energy_target_rule(mod, z, bt, h):
"""
Total delivered energy-target-eligible energy must exceed target
:param mod:
:param z:
:param bt:
:param h:
:return:
"""
return mod.Total_Delivered_Horizon_Energy_Target_Energy_MWh[z, bt, h] \
+ mod.Horizon_Energy_Target_Shortage_MWh_Expression[z, bt, h] \
>= mod.Horizon_Energy_Target[z, bt, h]
m.Horizon_Energy_Target_Constraint = Constraint(
m.ENERGY_TARGET_ZONE_BLN_TYPE_HRZS_WITH_ENERGY_TARGET,
rule=energy_target_rule
) | e6fe56e72fb7cd5906f0f30da2e7166a09eeb3f1 | 27,956 |
def grow_rate(n, k, nu_c, nu_d, sigma, g, dp, rho_c, rho_d, K):
"""
Compute the instability growth rate on a gas bubble
Write instability growth rate equation in Grace et al. as a root
problem for n = f(k)
Returns
-------
res : float
The residual of the growth-rate equation expressed as a root-finding
problem.
Notes
-----
This function is used by the `grace()` function for maximum stable
particle size. It should not be called directly.
"""
# Compute more derived variables
m_c = np.sqrt(k**2 + n / nu_c)
m_d = np.sqrt(k**2 + n / nu_d)
mu_c = nu_c / rho_c
# Compute the residual of the root function
res = (sigma * k**3 - g * k * dp + n**2 * (rho_c + rho_d)) * \
(k + m_c + K * (k + m_d)) + 4 * n * k * mu_c * (k + K * m_d) * \
(K * k + m_c)
# Return the residual
return res | 79e277ca7941b80fd52667b4157ffbfd97f9a87e | 27,957 |
def cg_semirelaxed_fused_gromov_wasserstein(C1:th.Tensor,
A1:th.Tensor,
p:th.Tensor,
C2:th.Tensor,
A2:th.Tensor,
alpha:float,
symmetry:bool=True,
init_mode:str='product',
T_init:th.Tensor=None,
use_log:bool=False,
eps:float=10**(-5),
max_iter:int=1000,
seed:int=0,
verbose:bool=False,
device:str='cpu',
dtype:type=th.float32):
"""
Conditional gradient algorithm for semi-relaxed fused gromov-wasserstein:
\min_{T} \alpha * <L(C_1, C_2) \otimes T, T> + (1-\alpha) * <D, T>
The implementation uses the generalization of the Frank-Wolfe algorithm detailed
in Algorithm 1. Section 3.2 of the main paper.
This general form is discussed in Algorithm 3. of section 7.3.1 in the supplementary material.
It comes down to consider:
- linear_cost = (1-\alpha) * D
- alpha = \alpha
"""
N1 = A1.shape[0]
N2 = A2.shape[0]
d = A1.shape[1]
# Compute matrix of euclidean distances between features
FS2 = (A1**2) @ th.ones((d, N2), dtype=dtype, device=device)
FT2 = th.ones((N1, d), dtype=dtype, device=device) @ (A2**2).T
D = FS2 + FT2 - 2 * A1 @ A2.T
return cg_semirelaxed(C1, p, C2, alpha, (1 - alpha) * D, init_mode, T_init,
symmetry, use_log, eps, max_iter, seed, verbose, device, dtype) | 5a21edf35423b46826a9b88af62ddd27678538c2 | 27,958 |
def Vinv_terminal_time_series(m_t,Vdc_t):
"""Function to generate time series inverter terminal voltage."""
try:
assert len(m_t) == len(Vdc_t) != None
return m_t*(Vdc_t/2)
except:
LogUtil.exception_handler() | cbbcc7475c30694f3960e1b78b4be64bde283a2b | 27,959 |
def render(ob, ns):
"""Calls the object, possibly a document template, or just returns
it if not callable. (From DT_Util.py)
"""
if hasattr(ob, '__render_with_namespace__'):
ob = ZRPythonExpr.call_with_ns(ob.__render_with_namespace__, ns)
else:
# items might be acquisition wrapped
base = aq_base(ob)
# item might be proxied (e.g. modules might have a deprecation
# proxy)
base = removeAllProxies(base)
if callable(base):
try:
if getattr(base, 'isDocTemp', 0):
ob = ZRPythonExpr.call_with_ns(ob, ns, 2)
else:
ob = ob()
except NotImplementedError:
pass
return ob | fba552131df5fe760c124e90e58f845f06fbbf44 | 27,960 |
def GetFlakeInformation(flake, max_occurrence_count, with_occurrences=True):
"""Gets information for a detected flakes.
Gets occurrences of the flake and the attached monorail issue.
Args:
flake(Flake): Flake object for a flaky test.
max_occurrence_count(int): Maximum number of occurrences to fetch.
with_occurrences(bool): If the flake must be with occurrences or not.
For flakes reported by Flake detection, there should always be
occurrences, but it's not always true for flakes reported by
Flake Analyzer, ignore those flakes for now.
Returns:
flake_dict(dict): A dict of information for the test. Including data from
its Flake entity, its flake issue information and information of all its
flake occurrences.
"""
occurrences = []
for flake_type in [
FlakeType.CQ_FALSE_REJECTION, FlakeType.RETRY_WITH_PATCH,
FlakeType.CI_FAILED_STEP, FlakeType.CQ_HIDDEN_FLAKE
]:
typed_occurrences = _FetchFlakeOccurrences(flake, flake_type,
max_occurrence_count)
occurrences.extend(typed_occurrences)
if max_occurrence_count:
max_occurrence_count = max_occurrence_count - len(typed_occurrences)
if max_occurrence_count == 0:
# Bails out if the number of occurrences with higher impact has hit the
# cap.
break
if not occurrences and with_occurrences:
# Flake must be with occurrences, but there is no occurrence, bail out.
return None
# Makes sure occurrences are sorted by time_happened in descending order,
# regardless of types.
occurrences.sort(key=lambda x: x.time_happened, reverse=True)
flake_dict = flake.to_dict()
flake_dict['occurrences'] = _GetGroupedOccurrencesByBuilder(occurrences)
flake_dict['flake_counts_last_week'] = _GetFlakeCountsList(
flake.flake_counts_last_week)
flake_issue = GetFlakeIssue(flake)
if flake_issue and flake_issue.status and flake_issue.status in OPEN_STATUSES:
flake_dict['flake_issue'] = flake_issue.to_dict()
flake_dict['flake_issue']['issue_link'] = FlakeIssue.GetLinkForIssue(
flake_issue.monorail_project, flake_issue.issue_id)
flake_dict['flake_issue'][
'last_updated_time_in_monorail'] = _GetLastUpdatedTimeDelta(flake_issue)
flake_dict['culprits'], flake_dict['sample_analysis'] = (
_GetFlakeAnalysesResults(flake_issue.issue_id))
return flake_dict | 596573749599e7a1b49e8047b68d67a09e4e00e9 | 27,961 |
def get_prop_cycle():
"""Get the prop cycle."""
prop_cycler = rcParams['axes.prop_cycle']
if prop_cycler is None and 'axes.color_cycle' in rcParams:
clist = rcParams['axes.color_cycle']
prop_cycler = cycler('color', clist)
return prop_cycler | 9b571b16cddf187e9bbfdacd598f280910de130a | 27,962 |
def binary_crossentropy(target, output, from_logits=False):
"""Binary crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
Returns:
A tensor.
"""
# Note: nn.sigmoid_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
# transform back to logits
epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output / (1 - output))
return sigmoid_cross_entropy_with_logits(labels=target, logits=output) | eb388c3bb3454eec6e26797313534cf089d06a6d | 27,963 |
def create_all_snapshots(volume_ids):
"""
Creates the snapshots of all volumes in the provided list.
Params:
volume_ids (list): List of volumes attached to the instance
Returns:
None
"""
for i in volume_ids:
snapshot(i)
return True | c985bdce6b11e85cedb3d8951447fdf234e3aeb4 | 27,964 |
def stack_subsample_frames(x, stacking=1, subsampling=1):
""" Stacks frames together across feature dim, and then subsamples
x.shape: FEAT, TIME
output FEAT * stacking, TIME / subsampling
"""
# x.shape: FEAT, TIME
seq = []
x_len = tf.shape(x)[1]
for n in range(0, stacking):
tmp = x[:, n:x_len - stacking + 1 + n:subsampling]
seq.append(tmp)
print(seq)
x = tf.concat(seq, axis=0)
return x | 6cab964588d01cdecec1862cd3c1db681923d20d | 27,965 |
def compute_TVL1(prev, curr, TVL1, bound=20):
"""
Args:
prev (numpy.ndarray): a previous video frame, dimension is
`height` x `width`.
curr (numpy.ndarray): a current video frame, dimension is
`height` x `width`.
bound (int): specify the maximum and minimux of optical flow.
Return:
flow (numpy.ndarray): optical flow.
"""
# TVL1=cv2.optflow.DualTVL1OpticalFlow_create()
# TVL1 = cv2.DualTVL1OpticalFlow_create()
# TVL1=cv2.createOptFlow_DualTVL1()
flow = TVL1.calc(prev, curr, None)
assert flow.dtype == np.float32
flow = (flow + bound) * (255.0 / (2 * bound))
flow = np.round(flow).astype(int)
flow[flow >= 255] = 255
flow[flow <= 0] = 0
return flow | 94d83e0cbfc8e20ed78ba0ab3763d5ddd4e2859a | 27,966 |
def _fetch_all_namespace_permissions(cursor):
"""
Fetches all user-namespace-permissions mapping registered with Herd
:param: cursor to run hive queries
:return: list of all users that have READ on each respective namespace
"""
namespaces = _fetch_all_namespaces()
user_namespace_permissions = []
all_users = set()
for namespace in namespaces:
_print_info('Fetching namespace permissions for namespace: {}'.format(
namespace))
response = _fetch_herd_session() \
.get('{}://{}/{}/{}'.format(HERD_REST_PROTOCOL, HERD_BASE_URL,
HERD_REST_BASE_PATH,
'/userNamespaceAuthorizations/namespaces/{}').format(
namespace)) \
.json()
public_read = False
namespace_users = []
for authorization in response['userNamespaceAuthorizations']:
if 'READ' in authorization['namespacePermissions']:
namespace_users.append(
authorization['userNamespaceAuthorizationKey']['userId'])
# add each user to the global users set
all_users.add(
authorization['userNamespaceAuthorizationKey']['userId'])
# check if read-all is enabled on namespace
if authorization['userNamespaceAuthorizationKey']['userId'] == PUBLIC_READ_USER:
public_read = True
_print_info(
'Found {} users with READ permissions on namespace: {}'.format(
len(namespace_users), namespace))
user_namespace_permissions.append({
'namespace': namespace,
'users': namespace_users
})
# grant read to all users if read-all is enabled, otherwise - revoke
_print_info(
'Public read option enabled on namespace: \'{}\'? {}'.format(
namespace, public_read))
_manage_public_read(cursor, namespace, public_read)
# manage user-schemas for all users
for user in all_users:
_create_user_schema(cursor, user)
return user_namespace_permissions | 0802b81a8d731cabbd51b1d3699c0ce64ac6c64a | 27,967 |
def for_in_right(obj, callback=None):
"""This function is like :func:`for_in` except it iterates over the
properties in reverse order.
Args:
obj (list|dict): Object to process.
callback (mixed): Callback applied per iteration.
Returns:
list|dict: `obj`.
Example:
>>> data = {'product': 1}
>>> def cb(v): data['product'] *= v
>>> for_in_right([1, 2, 3, 4], cb)
[1, 2, 3, 4]
>>> data['product'] == 24
True
See Also:
- :func:`for_in_right` (main definition)
- :func:`for_own_right` (alias)
.. versionadded:: 1.0.0
"""
walk = (None for ret, _, _, _ in itercallback(obj, callback, reverse=True)
if ret is False)
next(walk, None)
return obj | 6d85f7245cd454be61015ca69e1844d1dc830c86 | 27,968 |
def metadata_fake(batch_size):
"""Make a xr dataset"""
# get random OSGB center in the UK
lat = np.random.uniform(51, 55, batch_size)
lon = np.random.uniform(-2.5, 1, batch_size)
x_centers_osgb, y_centers_osgb = lat_lon_to_osgb(lat=lat, lon=lon)
# get random times
t0_datetimes_utc = make_t0_datetimes_utc(batch_size)
metadata_dict = {}
metadata_dict["batch_size"] = batch_size
metadata_dict["x_center_osgb"] = list(x_centers_osgb)
metadata_dict["y_center_osgb"] = list(y_centers_osgb)
metadata_dict["t0_datetime_utc"] = list(t0_datetimes_utc)
return Metadata(**metadata_dict) | fa55cb231e013f3b5c4193af9c5cfff6f79fce82 | 27,969 |
def delete_document(ix: str, docid: str):
"""
delete a document
PUT request body should be a json {field: value} mapping of fields to update
"""
check_role(Role.WRITER, _index(ix))
try:
elastic.delete_document(ix, docid)
except elasticsearch.exceptions.NotFoundError:
abort(404)
return '', HTTPStatus.OK | a87c7c23b31ce24da83b81e8d241d4173542a930 | 27,970 |
def sentence_segment(doc, candidate_pos):
"""Store those words only in cadidate_pos"""
sentences = []
for sent in doc.sents:
selected_words = []
for token in sent:
# Store words only with cadidate POS tag
if token.pos_ in candidate_pos and token.is_stop is False and len(token.text) > 1:
selected_words.append(token.text.lower())
sentences.append(selected_words)
return sentences | 6c56d47470e60edddfedfeb476aa7833be765218 | 27,971 |
def get_speckle_spatial_freq(image, pos, cx, cy, lambdaoverd, angle=None):
""" returns the spatial frequency of the speckle defined in the area of aperture mask """
""" lambdaoverd = nb of pixels per lambda/D """
nx, ny =image.shape[0], image.shape[1]
k_xy = (np.roll(pos,1,axis=0)-[cx,cy])/lambdaoverd # fwhm is in pixels per lbd over D
#k_mod = np.sqrt(k_sp[0]**2. + k_sp[1]**2.)
k_xy = snm.rotateXY(k_xy[0], k_xy[1], thetadeg = -1.0*angle)
ipdb.set_trace()
return k_xy | beace113642545f74ba3a49a4a15b0308d0f4535 | 27,972 |
def verify_ping(
device,
address,
loss_rate=0,
count=None,
max_time=30,
check_interval=10):
""" Verify ping loss rate on ip address provided
Args:
device ('obj'): Device object
address ('str'): Address value
loss_rate ('int'): Expected loss rate value
count ('int'): Count value for ping command
max_time (`int`): Max time, default: 30
check_interval (`int`): Check interval, default: 10
Returns:
Boolean
Raises:
None
"""
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
if count:
cmd = 'ping {address} count {count}'.format(
address=address,
count=count
)
else:
cmd = 'ping {address}'.format(
address=address
)
try:
out = device.parse(cmd)
except SchemaEmptyParserError as e:
timeout.sleep()
continue
# Example dictionary structure:
# {
# "ping": {
# "address": "10.189.5.94",
# "data-bytes": 56,
# "result": [
# {
# "bytes": 64,
# "from": "10.189.5.94",
# "icmp-seq": 0,
# "time": "2.261",
# "ttl": 62
# },
# ],
# "source": "10.189.5.94",
# "statistics": {
# "loss-rate": 0,
# "received": 1,
# "round-trip": {
# "avg": "2.175",
# "max": "2.399",
# "min": "1.823",
# "stddev": "0.191"
# },
# "send": 1
# }
# }
# }
loss_rate_found = Dq(out).get_values("loss-rate", 0)
if loss_rate_found == loss_rate:
return True
return False | df19e0815a49388189bf480623c156b9992a2fe2 | 27,973 |
from pathlib import Path
import argparse
def add_server_arguments(parser):
"""Add the --bind option to an argparse parser"""
def hostportsplit_helper(arg):
"""Wrapper around hostportsplit that gives better error messages than
'invalid hostportsplit value'"""
try:
return hostportsplit(arg)
except ValueError:
raise parser.error("Invalid argument to --bind." +
" Did you mean --bind '[%s]'?" % arg
if arg.count(':') >= 2 and '[' not in arg
else " See --help-bind for details.")
parser.add_argument('--bind', help="Host and/or port to bind to (see --help-bind for details)", type=hostportsplit_helper, default=None)
parser.add_argument('--credentials', help="JSON file pointing to credentials for the server's identity/ies.", type=Path)
# These are to be eventually migrated into credentials
parser.add_argument('--tls-server-certificate', help="TLS certificate (chain) to present to connecting clients (in PEM format)", metavar="CRT")
parser.add_argument('--tls-server-key', help="TLS key to load that supports the server certificate", metavar="KEY")
parser.add_argument('--help-bind', help=argparse.SUPPRESS, action=_HelpBind) | 91aed985ffa395557fb557befc74c2d06853059c | 27,974 |
def get_default(key):
""" get the default value for the specified key """
func = registry.defaults.get(key)
return func() | 081588445955da66d9988e962d2a360ed1193240 | 27,975 |
from typing import List
def antisymmetric(r: Relation) -> (bool, List):
"""Kiểm tra tính phản xứng của r"""
antisymmetric_tuple = []
for x, y in r:
if x == y:
continue
if (y, x) in r:
return False, [((x, y), (y, x))]
antisymmetric_tuple.append(((x, y), (y, x)))
return True, antisymmetric_tuple | d7a7900192850a9b86a56263fec5daea551a034f | 27,976 |
def calc_negative_predictive_value(cause, actual, predicted):
"""Calculate negative predictive value (NPV) for a single cause
Negative predictive value is the number of prediction correctly determined
to not belong to the given cause over the total number of predicted to
not be the cause:
.. math::
NPV = \\frac{TN}{NP} = \\frac{TN}{TN + FP}
where TN is the number of true negative predictions, NP is the number of
negative predictions, and FP is the number of false positive predictions.
Args:
cause: a label in the actual and predicted series
actual (pd.Series): true individual level classification
predicted (pd.Series): individual level prediction
Returns:
float
"""
true_negative = ((actual != cause) & (predicted != cause)).sum()
n_not_predicted = (predicted != cause).sum()
return true_negative / n_not_predicted if n_not_predicted else np.nan | f89976fc5ec9c03e5d8d42a8265ba92e87d91ec8 | 27,977 |
def print_atom_swap(swap):
"""Return atom swap string for DL CONTROL"""
return "{} {}".format(swap["id1"], swap["id2"]) | 4c2fa18434e7a66b98b9716b89a26b622b588cd6 | 27,978 |
import torch
import io
def export_onnx_model(model, inputs):
"""
Trace and export a model to onnx format.
Args:
model (nn.Module):
inputs (torch.Tensor): the model will be called by `model(*inputs)`
Returns:
an onnx model
"""
assert isinstance(model, torch.nn.Module)
# make sure all modules are in eval mode, onnx may change the training state
# of the module if the states are not consistent
def _check_eval(module):
assert not module.training
model.apply(_check_eval)
logger.info("Beginning ONNX file converting")
# Export the model to ONNX
with torch.no_grad():
with io.BytesIO() as f:
torch.onnx.export(
model,
inputs,
f,
operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
# verbose=True, # NOTE: uncomment this for debugging
# export_params=True,
)
onnx_model = onnx.load_from_string(f.getvalue())
logger.info("Completed convert of ONNX model")
# Apply ONNX's Optimization
logger.info("Beginning ONNX model path optimization")
all_passes = onnxoptimizer.get_available_passes()
passes = ["extract_constant_to_initializer", "eliminate_unused_initializer", "fuse_bn_into_conv"]
assert all(p in all_passes for p in passes)
onnx_model = onnxoptimizer.optimize(onnx_model, passes)
logger.info("Completed ONNX model path optimization")
return onnx_model | 006e44dcd019f8a87d6fcee6de0db34ec6ce59b0 | 27,979 |
def dot_product_area_attention(q,
k,
v,
bias,
dropout_rate=0.0,
image_shapes=None,
name=None,
attention_image_summary=None,
save_weights_to=None,
dropout_broadcast_dims=None,
max_area_width=1,
max_area_height=1,
memory_height=1,
area_key_mode="mean",
area_value_mode="sum",
top_k_areas=0,
area_temperature=1.0,
training=True):
"""Dot-product area attention.
Args:
q: Tensor with shape [..., length_q, depth_k].
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
match with q.
bias: bias Tensor (see attention_bias())
dropout_rate: a float.
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
name: an optional string
attention_image_summary: the callback for making image summary of attention.
save_weights_to: an optional dictionary to capture attention weights
for visualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
dropout_broadcast_dims: an optional list of integers less than rank of q.
Specifies in which dimensions to broadcast the dropout decisions.
max_area_width: the max width allowed for an area.
max_area_height: the max height allowed for an area.
memory_height: the height of the memory.
area_key_mode: the mode for computing area keys, which can be "mean",
"concat", "sum", "sample_concat", and "sample_sum".
area_value_mode: the mode for computing area values, which can be either
"mean", or "sum".
top_k_areas: Use the top key areas for attention.
area_temperature: the temperature for attention softmax.
training: indicating if it is in the training mode.
Returns:
Tensor with shape [..., length_q, depth_v].
"""
tf.logging.info("dot_product_area_attention: "
"area_h=%d, area_w=%d, mem_h=%d, "
"area_key_mode=%s, area_value_mode=%s, "
"area_temperature=%f",
max_area_height, max_area_width, memory_height,
area_key_mode, area_value_mode,
area_temperature)
with tf.variable_scope(
name, default_name="dot_product_area_attention",
values=[q, k, v]) as scope:
mem_shape = common_layers.shape_list(k)
batch_size = mem_shape[0]
head_size = mem_shape[1]
length = mem_shape[2]
depth = mem_shape[3]
k_area = compute_area_key(
tf.reshape(k, [-1, length, depth]),
max_area_width=max_area_width,
max_area_height=max_area_height,
height=memory_height,
mode=area_key_mode,
training=training)
if area_value_mode == "mean":
v_area, _, _, _, _ = compute_area_features(
tf.reshape(v, [-1, length, depth]), max_area_width=max_area_width,
max_area_height=max_area_height, height=memory_height)
elif area_value_mode == "max":
v_area, _, _ = basic_pool(tf.reshape(v, [-1, length, depth]),
max_area_width=max_area_width,
max_area_height=max_area_height,
height=memory_height,
fn=tf.reduce_max)
elif area_value_mode == "sum":
_, _, v_area, _, _ = compute_area_features(
tf.reshape(v, [-1, length, depth]), max_area_width=max_area_width,
max_area_height=max_area_height, height=memory_height)
else:
raise ValueError("Unsupported area value mode=%s" % area_value_mode)
k = tf.reshape(k_area, [batch_size, head_size, -1, depth])
v = tf.reshape(v_area, [batch_size, head_size, -1, depth])
logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv]
if bias is not None:
bias = common_layers.cast_like(bias, logits)
with tf.name_scope("compute_area_att_bias", values=[bias]):
bias_shape = common_layers.shape_list(bias)
mem_length = bias_shape[-1]
bias_values = tf.reshape(
tf.to_float(tf.less(bias, -1)), [-1, mem_length, 1])
_, _, padding_sum, _, _ = compute_area_features(
bias_values, max_area_width=max_area_width,
max_area_height=max_area_height, height=memory_height)
bias = tf.where(
tf.cast(tf.to_int32(padding_sum), tf.bool),
tf.fill(tf.shape(padding_sum), -np.inf),
tf.zeros_like(padding_sum, dtype=tf.float32))
bias = tf.reshape(bias,
[bias_shape[0], bias_shape[1],
bias_shape[2], -1])
logits += bias
logits = logits / area_temperature
weights = tf.nn.softmax(logits, name="attention_weights")
if top_k_areas > 0:
tf.logging.info("area_attention top_k_areas=%d", top_k_areas)
top_k = tf.minimum(common_layers.shape_list(weights)[-1], top_k_areas)
top_weights, _ = tf.nn.top_k(weights, k=top_k)
min_values = tf.reduce_min(top_weights, -1, keepdims=True)
weights = tf.where(tf.greater_equal(weights, min_values),
weights, tf.zeros_like(weights))
weights = tf.div(weights, tf.reduce_sum(weights, -1, keepdims=True))
if save_weights_to is not None:
save_weights_to[scope.name] = weights
save_weights_to[scope.name + "/logits"] = logits
# Drop out attention links for each head.
weights = common_layers.dropout_with_broadcast_dims(
weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
if common_layers.should_generate_summaries() and attention_image_summary:
attention_image_summary(weights, image_shapes)
return tf.matmul(weights, v) | 947864002406597931c663340e1a258ac2ae5bed | 27,980 |
import glob
import json
import requests
def update_docs(cfid, docs) -> bool:
"""Updates the documentation of an already existing model
The update_docs function can be used to upload the documentation of an already existing model
by simply providing the cfid and the new docs object.
Args:
cfid: a string with a valid computeFunction ID.
docs: A dict{} containing the fields 'name' and 'documentation'
Returns:
False if upload failed, true otherwise
Raises (in debug mode):
UploadModelError if unable to successfully bundle and upload the model.
"""
# check the docs:
name = docs.get('name', "")
if name == "":
print("FATAL: Please make sure to add a name to your documentation (using the 'name') field \n"
"Your model documentation has not been updated. \n")
return False
documentation = docs.get('documentation', "")
if documentation == "":
print("FATAL: Please make sure to your documentation (using the 'documentation' field) \n"
"Your model documentation has not been updated. \n")
return False
# check authorization:
auth = _check_jwt()
if not auth:
if not glob.SILENT:
print("FATAL: We were unable to obtain JWT authorization for your account. \n"
"Your model has not been uploaded. \n")
if glob.DEBUG:
raise UploadModelError("We were unable to obtain JWT authorization.")
return False
# all ok, upload:
if auth:
url = glob.USER_MANAGER_URL + "/compute-function/" + glob.JWT_USER_ID + "/" + cfid
# Setup the actual request
data: dict = {
"language": "WASM",
"public": True,
"profit": 0.1,
"cycles": 1.0,
"name": name,
"docs": documentation
}
payload: dict = {
'data': json.dumps(data)
}
files = []
headers = {
'Authorization': glob.JWT_TOKEN
}
# Do the request (and make sure to delete the gzip if errors occur)
try:
response = requests.put(url, headers=headers, data=payload, files=files)
except Exception as e:
if not glob.SILENT:
print("FATAL: Unable to carry out the upload request: the toolchain is not available. \n"
"Your model documentation has not been updated. \n")
if glob.DEBUG:
raise UploadModelError("We were unable to obtain JWT authorization: " + str(e))
return False
# Error handling
try:
response_data = json.loads(response.text)
except Exception as e:
if not glob.SILENT:
print("FATAL: We did not receive a valid JSON response from the toolchain-server. \n"
"Your model documentation has not been updated. \n")
if glob.DEBUG:
raise UploadModelError("We did not receive a valid response from the server: " + str(e))
return False
if response_data['error']:
if not glob.SILENT:
print("FATAL: An error was returned by the server. \n"
"Your model documentation has not been updated. \n")
if glob.DEBUG:
raise UploadModelError("We did not receive a valid response from the server: " + response_data['error'])
return False
if glob.DEBUG:
print("The documentation for " + cfid + " has been updated using:")
print(data)
# user feedback:
if not glob.SILENT:
print("Your model documentation has been updated. \n")
return True
else:
return False | 4aba692996d251536cde7a41cb0e5cdc663caa97 | 27,981 |
def visit_children_first(graph_head, visitor):
"""
:type graph_head: DecisionGraphNode
:param visitor: Visitor
:return:
"""
todo = deque()
todo.append(graph_head)
todo_set = {graph_head}
results = {}
while len(todo) != 0:
node = todo.pop()
todo_set.remove(node)
if node in results:
continue
if len(node.get_successors()) == 0 or all(succ in results for succ in node.get_successors()):
results[node] = visitor.visit_node(node, results)
else:
todo.append(node)
todo_set.add(node)
for succ in node.get_successors():
if succ not in todo_set:
todo.append(succ)
todo_set.add(succ)
return results[graph_head] | 45e2525d83b63e4b3660b4cef9ca36f8be0c16ef | 27,982 |
def random_transform(x, seed=None):
"""Randomly augment a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
np.random.seed(seed)
img_row_axis = 0
img_col_axis = 1
img_channel_axis = 2
rotation_range = 10
theta = np.deg2rad(np.random.uniform(-rotation_range, rotation_range))
height_shift_range = width_shift_range = 0.2
if height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-height_shift_range,
height_shift_range)
if np.max(height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-width_shift_range,
width_shift_range)
if np.max(width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
zoom_range = (0.9, 1.1)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis, fill_mode='nearest')
return x | 9f0b09dd4c5b0a0f9f00ae15682a27645894b064 | 27,983 |
import sys
import argparse
def process_command_line(argv):
"""
Return a 2-tuple: (settings object, args list).
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
# initialize the parser object, replace the description
parser = argparse.ArgumentParser(
description='Plot scatter plots and correlations',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'counts_files',
help='A file with two columns, the first is the name of the library'
' and the second is the name of the counts file. The third column'
' is the counts of the single reads.')
parser.add_argument(
'output_head',
help='Name of output files prefix, two figures will be generated'
' _scatters.tif and _heatmap.tif.')
parser.add_argument(
'-l', '--seglen', type=int, default=100,
help='Length of segment for binning, need to be the same as used to '
'generate the summary files.')
parser.add_argument(
'-c', '--counts', type=int, default=5,
help='Minimal number of reads to include in the plot.')
settings = parser.parse_args(argv)
return settings | ac80365e1b9227bc4f23021cd04cce777fd08b6d | 27,984 |
def global_avg_pooling_forward(z):
"""
全局平均池化前向过程
:param z: 卷积层矩阵,形状(N,C,H,W),N为batch_size,C为通道数
:return:
"""
return np.mean(np.mean(z, axis=-1), axis=-1) | f12efc7bd368af81164246fcb39a27f9de7e122d | 27,985 |
from typing import Optional
from functools import reduce
import operator
def find_local_term(match_text: SearchText, service: OntologyService) -> Optional[ConditionMatchingSuggestion]:
"""
Note that local search is pretty dumb, uses SearchText which already simplifies a bunch of words
Code looks through local database, erring of false positives, then uses search_text_to_suggestion to
see if returned result is a genuine positive
"""
q = list()
# TODO, can we leverage phenotype matching?
if match_text.prefix_terms:
term_list = list(match_text.prefix_terms)
if len(term_list) == 1 and len(term_list[0]) <= 4:
term_str: str = term_list[0]
# check array contains (and hope we don't have any mixed case aliases)
q.append(Q(name__iexact=term_str) | Q(aliases__contains=[term_str.upper()]) | Q(
aliases__contains=[term_str.lower()]))
else:
for term_str in term_list:
if len(term_str) > 1 and not term_str.isnumeric():
# exclude numeric because the value might be stored as roman or arabic
# problem with icontains in aliases is it converts array list to a string, and then finds text in there
# so "hamper,laundry" would be returned for icontains="ham"
q.append(Q(name__icontains=term_str) | Q(aliases__icontains=term_str))
matches = list()
if q:
qs = OntologyTerm.objects.filter(ontology_service=service).filter(reduce(operator.and_, q))
for term in qs[0:200]:
if not term.is_obsolete:
if cms := search_text_to_suggestion(match_text, term):
matches.append(cms)
return merge_matches(matches) | 034803bf94d8a8befa7a1ca9099759c022a0da36 | 27,986 |
def label_encoder(adata):
"""
Encode labels of Annotated `adata` matrix using sklearn.preprocessing.LabelEncoder class.
Parameters
----------
adata: `~anndata.AnnData`
Annotated data matrix.
Returns
-------
labels: numpy nd-array
Array of encoded labels
"""
le = preprocessing.LabelEncoder()
labels = le.fit_transform(adata.obs["condition"].tolist())
return labels.reshape(-1, 1), le | 421aa578a965b2e8e66204a368e1c42348148ef6 | 27,987 |
def is_plus_or_minus(token_type: TokenType) -> bool:
"""Check if token is a plus or minus."""
return is_plus(token_type) or is_minus(token_type) | 1f0210505e8e882f07380ffd0d412a62f1d4d44f | 27,988 |
def gen_data(test_size=TEST_SIZE, channels=CHANNELS,
width=WIDTH, height=HEIGHT,
mmean=0, vmean=1, channel_last=False, fc_output=False):
"""
Generate random data to pass through the layer
NOTE:
- The generated data should not be normal, so that the layer can try to
normalize the data. Therefore the mean of the data is drawn from
another normal distribution which is centered around 1. This occurs for
the synthetic activations and synthetic deltas. The variances are
drawn from a folded normal distribution.
Return:
inputs: synthetic input activations
deltas_in: synthetic deltas for bprop
"""
if fc_output:
shape = (test_size, channels)
else:
shape = (test_size, channels, width, height)
if channel_last:
shape = (test_size, width, height, channels)
inputs = np.random.normal(loc=0, scale=1, size=shape)
# Generate random deltas for testing bprop
deltas_in = np.random.normal(loc=0,
scale=np.abs(vmean * np.random.randn(*shape)),
size=shape)
return inputs.astype(np.float32), deltas_in.astype(np.float32) | 0030330bf93d6abb34f41575aaf1f45a52199393 | 27,989 |
from typing import Union
from typing import List
def plot_r2_pvalues(
model: mofa_model,
factors: Union[int, List[int], str, List[str]] = None,
n_iter: int = 100,
groups_df: pd.DataFrame = None,
group_label: str = None,
view=0,
fdr: bool = True,
cmap="binary_r",
**kwargs,
):
"""
Plot R2 values for the model
Parameters
----------
model : mofa_model
Factor model
factors : optional
Index of a factor (or indices of factors) to use (all factors by default)
view : optional
Make a plot for a cetrain view (first view by default)
groups_df : optional pd.DataFrame
Data frame with samples (cells) as index and first column as group assignment
group_label : optional
Sample (cell) metadata column to be used as group assignment
fdr : optional bool
If plot corrected PValues (FDR)
cmap : optional
The colourmap for the heatmap (default is 'binary_r' with darker colour for smaller PValues)
"""
r2 = model.get_r2_null(
factors=factors,
groups_df=groups_df,
group_label=group_label,
n_iter=n_iter,
return_pvalues=True,
fdr=fdr,
)
pvalue_column = "FDR" if fdr else "PValue"
# Select a certain view if necessary
if view is not None:
view = model.views[view] if isinstance(view, int) else view
r2 = r2[r2["View"] == view]
r2_df = r2.sort_values("PValue").pivot(
index="Factor", columns="Group", values=pvalue_column
)
# Sort by factor index
r2_df.index = r2_df.index.astype("category")
r2_df.index = r2_df.index.reorder_categories(
sorted(r2_df.index.categories, key=lambda x: int(x.split("Factor")[1]))
)
r2_df = r2_df.sort_values("Factor")
g = sns.heatmap(r2_df.sort_index(level=0, ascending=False), cmap=cmap, **kwargs)
g.set_yticklabels(g.yaxis.get_ticklabels(), rotation=0)
return g | 784a5333514a270bdf69960fa1a857668b414e5a | 27,990 |
import itertools
def plot_confusion_matrix(y_true, y_pred, labels=None, true_labels=None,
pred_labels=None, title=None, normalize=False,
hide_zeros=False, x_tick_rotation=0, ax=None,
figsize=None, cmap='Blues', title_fontsize="large",
text_fontsize="medium"):
"""Generates confusion matrix plot from predictions and true labels
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_pred (array-like, shape (n_samples)):
Estimated targets as returned by a classifier.
labels (array-like, shape (n_classes), optional): List of labels to
index the matrix. This may be used to reorder or select a subset
of labels. If none is given, those that appear at least once in
``y_true`` or ``y_pred`` are used in sorted order. (new in v0.2.5)
true_labels (array-like, optional): The true labels to display.
If none is given, then all of the labels are used.
pred_labels (array-like, optional): The predicted labels to display.
If none is given, then all of the labels are used.
title (string, optional): Title of the generated plot. Defaults to
"Confusion Matrix" if `normalize` is True. Else, defaults to
"Normalized Confusion Matrix.
normalize (bool, optional): If True, normalizes the confusion matrix
before plotting. Defaults to False.
hide_zeros (bool, optional): If True, does not plot cells containing a
value of zero. Defaults to False.
x_tick_rotation (int, optional): Rotates x-axis tick labels by the
specified angle. This is useful in cases where there are numerous
categories and the labels overlap each other.
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> rf = RandomForestClassifier()
>>> rf = rf.fit(X_train, y_train)
>>> y_pred = rf.predict(X_test)
>>> skplt.plot_confusion_matrix(y_test, y_pred, normalize=True)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_confusion_matrix.png
:align: center
:alt: Confusion matrix
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
cm = confusion_matrix(y_true, y_pred, labels=labels)
if labels is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(labels)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm = np.around(cm, decimals=2)
cm[np.isnan(cm)] = 0.0
if true_labels is None:
true_classes = classes
else:
validate_labels(classes, true_labels, "true_labels")
true_label_indexes = np.in1d(classes, true_labels)
true_classes = classes[true_label_indexes]
cm = cm[true_label_indexes]
if pred_labels is None:
pred_classes = classes
else:
validate_labels(classes, pred_labels, "pred_labels")
pred_label_indexes = np.in1d(classes, pred_labels)
pred_classes = classes[pred_label_indexes]
cm = cm[:, pred_label_indexes]
if title:
ax.set_title(title, fontsize=title_fontsize)
elif normalize:
ax.set_title('Normalized Confusion Matrix', fontsize=title_fontsize)
else:
ax.set_title('Confusion Matrix', fontsize=title_fontsize)
image = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.get_cmap(cmap))
plt.colorbar(mappable=image)
x_tick_marks = np.arange(len(pred_classes))
y_tick_marks = np.arange(len(true_classes))
ax.set_xticks(x_tick_marks)
ax.set_xticklabels(pred_classes, fontsize=text_fontsize,
rotation=x_tick_rotation)
ax.set_yticks(y_tick_marks)
ax.set_yticklabels(true_classes, fontsize=text_fontsize)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if not (hide_zeros and cm[i, j] == 0):
ax.text(j, i, cm[i, j],
horizontalalignment="center",
verticalalignment="center",
fontsize=text_fontsize,
color="white" if cm[i, j] > thresh else "black")
ax.set_ylabel('True label', fontsize=text_fontsize)
ax.set_xlabel('Predicted label', fontsize=text_fontsize)
ax.grid('off')
return ax | f2f690a410d933ecdffee1898b9d991482a5eb67 | 27,991 |
def _check_lfs_hook(client, paths):
"""Pull the specified paths from external storage."""
return client.check_requires_tracking(*paths) | 403b3db59f6eeec72c8f4a3b18808997b0f34724 | 27,992 |
import argparse
def parse_args():
"""Parse the input argument of the program"""
parser = argparse.ArgumentParser(
description='Process FAQ template files to create the FAQ.')
parser.add_argument('-i', '--input', required=True, metavar='I', nargs='+',
help='FAQ files parsed')
parser.add_argument('-o', '--output', metavar='O', nargs=1,
help='output file')
return parser.parse_args() | c457a6fa0aed998720d5521890ac3ea263749363 | 27,993 |
def name_to_zamid(name):
"""Converts a nuclide's name into the nuclide's z-a-m id.
Parameters
----------
name: str
Name of a nuclide
"""
dic = d.nuc_name_dic
elt_name = name.split('-')[0]
na = int(name.split('-')[1].replace('*',''))
if '*' in name:
state = 1
else:
state = 0
zzaaam = 10000*d.nuc_name_dic[elt_name] + na*10 + state
zamid = str(zzaaam)
return zamid | 89129a288a93c96f3e24003b6dee2adba81dc935 | 27,994 |
def sample_category(name):
"""Create and return a sample category"""
return models.Category.objects.create(name=name) | b9b38954520611ca7808592200ebf871da90bab6 | 27,995 |
from datetime import datetime
import os
def GetAllRunningAUProcess():
"""Get all the ongoing AU processes' pids from tracking logs.
This func only checks the tracking logs generated in latest several hours,
which is for avoiding the case that 'there's a running process whose id is
as the same as a previous AU process'.
Returns:
A list of background AU processes' pids.
"""
pids = []
now = datetime.datetime.now()
track_log_list = GetAllTrackStatusFileByTime()
# Only check log file created in 3 hours.
for track_log in track_log_list:
try:
created_time = datetime.datetime.fromtimestamp(
os.path.getctime(track_log))
if now - created_time >= datetime.timedelta(hours=AU_PROCESS_HOUR_GAP):
break
pid = ParsePidFromTrackLogFileName(os.path.basename(track_log))
if pid and IsProcessAlive(pid):
pids.append(pid)
except (ValueError, OSError) as e:
_Log('Error happened in getting pid from %s: %s', track_log, e)
return pids | aaa57448e4b795ea14e613fd457234f554bbe627 | 27,996 |
import importlib
import os
def get_installed_app_locale_path(appname):
"""
Load the app given by appname and return its locale folder path, if it exists.
Note that the module is imported to determine its location.
"""
try:
m = importlib.import_module(appname)
module_path = os.path.dirname(m.__file__)
module_locale_path = os.path.join(module_path, "locale")
if os.path.isdir(module_locale_path):
return module_locale_path
except ImportError:
pass
return None | 68bed5ec8dbb048823f203bd12512393e6238efb | 27,997 |
def file_share_exists(ctx, filesvc, share_name):
"""
Checks if a File Share already exists
:rtype: `azure.storage.file.models.Share` or `None`
:returns: Azure File Share object if the File Share
exists or None if it does not
"""
ctx.logger.debug('Checking if File Share "{0}" exists'
.format(share_name))
try:
props = filesvc.get_share_properties(share_name)
ctx.logger.debug('File Share "{0}" exists'
.format(share_name))
return props
except Exception:
ctx.logger.debug('File Share "{0}" does not exist'
.format(share_name))
return None | 504b53119a7e0e881a4486eb009a8663767e84a8 | 27,998 |
def plan_add(request):
"""
测试计划添加
:param request:
:return:
"""
user_id = request.session.get('user_id', '')
if not get_user(user_id):
request.session['login_from'] = '/base/plan/'
return HttpResponseRedirect('/login/')
else:
if request.method == 'POST':
prj_list = is_superuser(user_id)
plan_name = request.POST['plan_name'].strip()
content = request.POST.getlist("case_id")
msg = plan_info_logic(plan_name, content)
if msg != 'ok':
log.error('plan add error:{}'.format(msg))
return render(request, 'base/plan/add.html', {'error': msg, "prj_list": prj_list})
else:
prj_id = request.POST['prj_id']
project = Project.objects.get(prj_id=prj_id)
is_locust = request.POST['is_locust']
is_task = request.POST['is_task']
env_id = request.POST['env_id']
environment = Environment.objects.get(env_id=env_id)
description = request.POST['description']
username = request.session.get('user', '')
if is_locust == '1':
Plan.objects.filter(is_locust=1).update(is_locust=0)
if is_task == '1':
Plan.objects.filter(is_task=1).update(is_task=0)
plan = Plan(plan_name=plan_name, project=project, environment=environment, description=description,
content=content, is_locust=is_locust, is_task=is_task, update_user=username)
plan.save()
log.info('add plan {} success. plan info: {} // {} // {} // {} //{} //{}'.
format(plan_name, project, environment, description, content, is_locust, is_task))
return HttpResponseRedirect("/base/plan/")
elif request.method == 'GET':
prj_list = is_superuser(user_id)
info = {"prj_list": prj_list}
return render(request, "base/plan/add.html", info) | 4da776fd83e30019fbd6cdb1b659d8626e0620cc | 27,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.