content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def create_folder(path):
"""
Creates a folder if not already exists
Args:
:param path: The folder to be created
Returns
:return: True if folder was newly created, false if folder already exists
"""
if not os.path.exists(path):
os.makedirs(path)
return True
else:
return False | 5,333,300 |
def extract_smaps(kspace, low_freq_percentage=8, background_thresh=4e-6):
"""Extract raw sensitivity maps for kspaces
This function will first select a low frequency region in all the kspaces,
then Fourier invert it, and finally perform a normalisation by the root
sum-of-square.
kspace has to be of shape: nslices x ncoils x height x width
Arguments:
kspace (tf.Tensor): the kspace whose sensitivity maps you want extracted.
low_freq_percentage (int): the low frequency region to consider for
sensitivity maps extraction, given as a percentage of the width of
the kspace. In fastMRI, it's 8 for an acceleration factor of 4, and
4 for an acceleration factor of 8. Defaults to 8.
background_thresh (float): unused for now, will later allow to have
thresholded sensitivity maps.
Returns:
tf.Tensor: extracted raw sensitivity maps.
"""
n_slices = tf.shape(kspace)[0]
if n_slices > 0:
n_low_freq = tf.cast(tf.shape(kspace)[-2:] * low_freq_percentage / 100, tf.int32)
center_dimension = tf.cast(tf.shape(kspace)[-2:] / 2, tf.int32)
low_freq_lower_locations = center_dimension - tf.cast(n_low_freq / 2, tf.int32)
low_freq_upper_locations = center_dimension + tf.cast(n_low_freq / 2, tf.int32)
###
# NOTE: the following stands for in numpy:
# low_freq_mask = np.zeros_like(kspace)
# low_freq_mask[
# ...,
# low_freq_lower_locations[0]:low_freq_upper_locations[0],
# low_freq_lower_locations[1]:low_freq_upper_locations[1]
# ] = 1
x_range = tf.range(low_freq_lower_locations[0], low_freq_upper_locations[0])
y_range = tf.range(low_freq_lower_locations[1], low_freq_upper_locations[1])
X_range, Y_range = tf.meshgrid(x_range, y_range)
X_range = tf.reshape(X_range, (-1,))
Y_range = tf.reshape(Y_range, (-1,))
low_freq_mask_indices = tf.stack([X_range, Y_range], axis=-1)
# we have to transpose because only the first dimension can be indexed in
# scatter_nd
scatter_nd_perm = [2, 3, 0, 1]
low_freq_mask = tf.scatter_nd(
indices=low_freq_mask_indices,
updates=tf.ones([
tf.size(X_range),
tf.shape(kspace)[0],
tf.shape(kspace)[1]],
),
shape=[tf.shape(kspace)[i] for i in scatter_nd_perm],
)
low_freq_mask = tf.transpose(low_freq_mask, perm=scatter_nd_perm)
###
low_freq_kspace = kspace * tf.cast(low_freq_mask, kspace.dtype)
coil_image_low_freq = tf_ortho_ifft2d(low_freq_kspace)
# no need to norm this since they all have the same norm
low_freq_rss = tf.norm(coil_image_low_freq, axis=1)
coil_smap = coil_image_low_freq / low_freq_rss[:, None]
# for now we do not perform background removal based on low_freq_rss
# could be done with 1D k-means or fixed background_thresh, with tf.where
else:
coil_smap = tf.zeros_like(kspace, dtype=kspace.dtype)
return coil_smap | 5,333,301 |
def tiff_to_mat_conversion(ms_path, pan_path, save_path, ms_initial_point=(0, 0), ms_final_point=(0, 0), ratio=4):
"""
Generation of *.mat file, starting from the native GeoTiFF extension.
Also, a crop tool is provided to analyze only small parts of the image.
Parameters
----------
ms_path : str
The path of the Multi-Spectral image
pan_path : str
The path of the Panchromatic file
save_path : str
The destination mat file
ms_initial_point : tuple
Upper left point for image cropping. The point must be expressed in pixel coordinates,
as (x,y), where (0,0) is precisely the point at the top left.
ms_final_point : tuple
Bottom right point for image cropping. The point must be expressed in pixel coordinates,
as (x,y), where (0,0) is precisely the point at the top left.
ratio : int
The resolution scale which elapses between MS and PAN.
Return
------
I_in : Dictionary
The dictionary, composed of MS and Pan images.
"""
ms = gdal.Open(ms_path)
ms = ms.ReadAsArray()
ms = np.moveaxis(ms, 0, -1)
pan = gdal.Open(pan_path)
pan = pan.ReadAsArray()
if ms_final_point[0] != 0 and ms_final_point[1] != 0:
ms = ms[ms_initial_point[1]:ms_final_point[1], ms_initial_point[0]:ms_final_point[0], :]
pan = pan[ms_initial_point[1] * ratio:ms_final_point[1] * ratio,
ms_initial_point[0] * ratio:ms_final_point[0] * ratio]
io.savemat(save_path, {'I_MS_LR': ms, 'I_PAN': pan})
I_in = {'I_MS_LR': ms, 'I_PAN': pan}
return I_in | 5,333,302 |
def is_member(musicians, musician_name):
"""Return true if named musician is in musician list;
otherwise return false.
Parameters:
musicians (list): list of musicians and their instruments
musician_name (str): musician name
Returns:
bool: True if match is made; otherwise False.
"""
i = 0 # counter
while i < len(musicians): # guard against infinite loop
musician = musicians[i].split(', ')[0].lower()
if musician_name.lower() == musician:
return True # preferable to break statement
i += 1 # MUST INCREMENT
return False | 5,333,303 |
def _parse_client_dict(dataset: tf.data.Dataset,
string_max_length: int) -> Tuple[tf.Tensor, tf.Tensor]:
"""Parses the dictionary in the input `dataset` to key and value lists.
Args:
dataset: A `tf.data.Dataset` that yields `OrderedDict`. In each
`OrderedDict` there are two key, value pairs:
`DATASET_KEY`: A `tf.string` representing a string in the dataset.
`DATASET_VALUE`: A rank 1 `tf.Tensor` with `dtype` `tf.int64`
representing the value associate with the string.
string_max_length: The maximum length of the strings. If any string is
longer than `string_max_length`, a `ValueError` will be raised.
Returns:
input_strings: A rank 1 `tf.Tensor` containing the list of strings in
`dataset`.
string_values: A rank 2 `tf.Tensor` containing the values of
`input_strings`.
Raises:
ValueError: If any string in `dataset` is longer than string_max_length.
"""
parsed_dict = data_processing.to_stacked_tensor(dataset)
input_strings = parsed_dict[DATASET_KEY]
string_values = parsed_dict[DATASET_VALUE]
tf.debugging.Assert(
tf.math.logical_not(
tf.math.reduce_any(
tf.greater(tf.strings.length(input_strings), string_max_length))),
data=[input_strings],
name='CHECK_STRING_LENGTH')
return input_strings, string_values | 5,333,304 |
def init(command):
"""
We assume the first command from NASA is the rover
position. Assumetions are bad. We know that the
command conists of two numbers seperated by a space.
Parse the number so it matches D D
"""
if re.match('^[0-9]\s[0-9]\s[a-zA-Z]$', command):
pos = command.split(" ");
position['x'] = pos[0]
position['y'] = pos[1]
position['heading'] = pos[2]
print position
return position
return False | 5,333,305 |
def add_small_gap_multiply(original_wf, gap_cutoff, density_multiplier, fw_name_constraint=None):
"""
In all FWs with specified name constraints, add a 'small_gap_multiply' parameter that
multiplies the k-mesh density of compounds with gap < gap_cutoff by density multiplier.
Useful for increasing the k-point mesh for metallic or small gap systems.
Note that this powerup only works on FireWorks with the appropriate WriteVasp* tasks that
accept the small_gap_multiply argument...
Args:
original_wf (Workflow)
gap_cutoff (float): Only multiply k-points for materials with gap < gap_cutoff (eV)
density_multiplier (float): Multiply k-point density by this amount
fw_name_constraint (str): Only apply changes to FWs where fw_name contains this substring.
Returns:
Workflow
"""
for idx_fw, idx_t in get_fws_and_tasks(original_wf, fw_name_constraint=fw_name_constraint,
task_name_constraint="WriteVasp"):
original_wf.fws[idx_fw].tasks[idx_t]["small_gap_multiply"] = [gap_cutoff, density_multiplier]
return original_wf | 5,333,306 |
def _get_backend(config_backend):
"""Extract the backend class from the command line arguments."""
if config_backend == 'gatttool':
backend = GatttoolBackend
elif config_backend == 'bluepy':
backend = BluepyBackend
elif config_backend == 'pygatt':
backend = PygattBackend
else:
raise Exception('unknown backend: {}'.format(config_backend))
return backend | 5,333,307 |
def kern_CUDA_sparse(nsteps,
dX,
rho_inv,
context,
phi,
grid_idcs,
mu_egrid=None,
mu_dEdX=None,
mu_lidx_nsp=None,
prog_bar=None):
"""`NVIDIA CUDA cuSPARSE <https://developer.nvidia.com/cusparse>`_ implementation
of forward-euler integration.
Function requires a working :mod:`accelerate` installation.
Args:
nsteps (int): number of integration steps
dX (numpy.array[nsteps]): vector of step-sizes :math:`\\Delta X_i` in g/cm**2
rho_inv (numpy.array[nsteps]): vector of density values :math:`\\frac{1}{\\rho(X_i)}`
int_m (numpy.array): interaction matrix :eq:`int_matrix` in dense or sparse representation
dec_m (numpy.array): decay matrix :eq:`dec_matrix` in dense or sparse representation
phi (numpy.array): initial state vector :math:`\\Phi(X_0)`
prog_bar (object,optional): handle to :class:`ProgressBar` object
Returns:
numpy.array: state vector :math:`\\Phi(X_{nsteps})` after integration
"""
c = context
c.set_phi(phi)
enmuloss = config['enable_muon_energy_loss']
de = mu_egrid.size
mu_egrid = mu_egrid.astype(c.fl_pr)
muloss_min_step = config['muon_energy_loss_min_step']
lidx, nmuspec = mu_lidx_nsp
# Accumulate at least a few g/cm2 for energy loss steps
# to avoid numerical errors
dXaccum = 0.
grid_step = 0
grid_sol = []
from time import time
start = time()
for step in xrange(nsteps):
if prog_bar and (step % 5 == 0):
prog_bar.update(step)
c.do_step(rho_inv[step], dX[step])
dXaccum += dX[step]
if enmuloss and (dXaccum > muloss_min_step or step == nsteps - 1):
# Download current solution vector to host
phc = c.get_phi()
for nsp in xrange(nmuspec):
phc[lidx + de * nsp:lidx + de * (nsp + 1)] = np.interp(
mu_egrid, mu_egrid + mu_dEdX * dXaccum,
phc[lidx + de * nsp:lidx + de * (nsp + 1)])
# Upload changed vector back..
c.set_phi(phc)
dXaccum = 0.
if (grid_idcs and grid_step < len(grid_idcs) and
grid_idcs[grid_step] == step):
grid_sol.append(c.get_phi())
grid_step += 1
if dbg:
print "Performance: {0:6.2f}ms/iteration".format(
1e3 * (time() - start) / float(nsteps))
return c.get_phi(), grid_sol | 5,333,308 |
def patch_import(file_path: Union[str, Path]) -> None:
"""If multi-client package, we need to patch import to be
from ..version
and not
from .version
That should probably means those files should become a template, but since right now
it's literally one dot, let's do it the raw way.
"""
# That's a dirty hack, maybe it's worth making configuration a template?
with open(file_path, "rb") as read_fd:
conf_bytes = read_fd.read()
conf_bytes = conf_bytes.replace(
b" .version", b" ..version"
) # Just a dot right? Worth its own template for that? :)
with open(file_path, "wb") as write_fd:
write_fd.write(conf_bytes) | 5,333,309 |
def test_no_matching_credentials(coresys: CoreSys):
"""Test no matching credentials."""
docker = DockerInterface(coresys)
coresys.docker.config.registries = {
DOCKER_HUB: {"username": "Spongebob Squarepants", "password": "Password1!"}
}
assert not docker._get_credentials("ghcr.io/homeassistant")
assert not docker._get_credentials("ghcr.io/homeassistant/amd64-supervisor") | 5,333,310 |
def _to_sequence(x):
"""shape batch of images for input into GPT2 model"""
x = x.view(x.shape[0], -1) # flatten images into sequences
x = x.transpose(0, 1).contiguous() # to shape [seq len, batch]
return x | 5,333,311 |
def read_wav_kaldi(wav_file_path: str) -> WaveData:
"""Read a given wave file to a Kaldi readable format.
Args:
wav_file_path: Path to a .wav file.
Returns:
wd: A Kaldi-readable WaveData object.
"""
# Read in as np array not memmap.
fs, wav = wavfile.read(wav_file_path, False)
wd = read_wav_kaldi_internal(wav, fs)
return wd | 5,333,312 |
def _testCheckSums(tableDirectory):
"""
>>> data = "0" * 44
>>> checkSum = calcTableChecksum("test", data)
>>> test = [
... dict(data=data, checkSum=checkSum, tag="test")
... ]
>>> bool(_testCheckSums(test))
False
>>> test = [
... dict(data=data, checkSum=checkSum+1, tag="test")
... ]
>>> bool(_testCheckSums(test))
True
"""
errors = []
for entry in tableDirectory:
tag = entry["tag"]
checkSum = entry["checkSum"]
data = entry["data"]
shouldBe = calcTableChecksum(tag, data)
if checkSum != shouldBe:
errors.append("Invalid checksum for the %s table." % tag)
return errors | 5,333,313 |
def read_img(path):
"""
读取图片,并将其转换为邻接矩阵
"""
# 对于彩色照片,只使用其中一个维度的色彩
im = sp.misc.imread(path)[:, :, 2]
im = im / 255.
# 若运算速度太慢,可使用如下的语句来缩减图片的大小
# im = sp.misc.imresize(im, 0.10) / 255.
# 计算图片的梯度,既相邻像素点之差
graph = image.img_to_graph(im)
beta = 20
# 计算邻接矩阵
graph.data = np.exp(-beta * graph.data / graph.data.std())
return im, graph | 5,333,314 |
def createmarker(name=None, source='default', mtype=None,
size=None, color=None, priority=None,
viewport=None, worldcoordinate=None,
x=None, y=None, projection=None):
"""%s
:param name: Name of created object
:type name: `str`_
:param source: A marker, or string name of a marker
:type source: `str`_
:param mtype: Specifies the type of marker, i.e. "dot", "circle"
:type mtype: `str`_
:param size:
:type size: `int`_
:param color: A color name from the `X11 Color Names list <https://en.wikipedia.org/wiki/X11_color_names>`_,
or an integer value from 0-255, or an RGB/RGBA tuple/list (e.g. (0,100,0), (100,100,0,50))
:type color: `str`_ or int
:param priority: The layer on which the marker will be drawn.
:type priority: `int`_
:param viewport: 4 floats between 0 and 1 which specify the area that X/Y values are mapped to inside of the canvas.
:type viewport: `list`_
:param worldcoordinate: List of 4 floats (xmin, xmax, ymin, ymax)
:type worldcoordinate: `list`_
:param x: List of lists of x coordinates. Values must be between worldcoordinate[0] and worldcoordinate[1].
:type x: `list`_
:param y: List of lists of y coordinates. Values must be between worldcoordinate[2] and worldcoordinate[3].
:type y: `list`_
:returns: A secondary marker method
:rtype: vcs.marker.Tm
"""
name, source = check_name_source(name, source, 'marker')
mrk = marker.Tm(name, source)
if (mtype is not None):
mrk.type = mtype
if (size is not None):
mrk.size = size
if (color is not None):
mrk.color = color
if (priority is not None):
mrk.priority = priority
if (viewport is not None):
mrk.viewport = viewport
if (worldcoordinate is not None):
mrk.worldcoordinate = worldcoordinate
if (x is not None):
mrk.x = x
if (y is not None):
mrk.y = y
if (projection is not None):
mrk.projection = projection
return mrk | 5,333,315 |
def get_parser(disable: List[str] = None ,
lang: str = 'en',
merge_terms: Optional[Set] = None,
max_sent_len: Optional[int] = None) -> Callable:
"""spaCy clinical text parser
Parameters
----------
disable
lang
merge_terms
max_sent_len
Returns
-------
"""
disable = ["ner", "parser", "tagger", "lemmatizer"] if not disable \
else disable
merge_terms = {} if not merge_terms else merge_terms
nlp = spacy.load(lang, disable=disable)
nlp.tokenizer = ct_tokenizer(nlp)
sbd_func = partial(ct_sbd_rules,
merge_terms=merge_terms,
max_sent_len=max_sent_len)
sbd = SentenceSegmenter(nlp.vocab, strategy=sbd_func)
nlp.add_pipe(sbd)
return nlp | 5,333,316 |
def block_device_mapping_get_all_by_instance(context, instance_uuid,
use_slave=False):
"""Get all block device mapping belonging to an instance."""
return IMPL.block_device_mapping_get_all_by_instance(context,
instance_uuid,
use_slave) | 5,333,317 |
def add_rain(img, slant, drop_length, drop_width, drop_color, blur_value, brightness_coefficient, rain_drops):
"""
From https://github.com/UjjwalSaxena/Automold--Road-Augmentation-Library
Args:
img (np.uint8):
slant (int):
drop_length:
drop_width:
drop_color:
blur_value (int): rainy view are blurry
brightness_coefficient (float): rainy days are usually shady
rain_drops:
Returns:
"""
non_rgb_warning(img)
input_dtype = img.dtype
needs_float = False
if input_dtype == np.float32:
img = from_float(img, dtype=np.dtype("uint8"))
needs_float = True
elif input_dtype not in (np.uint8, np.float32):
raise ValueError("Unexpected dtype {} for RandomSnow augmentation".format(input_dtype))
image = img.copy()
for (rain_drop_x0, rain_drop_y0) in rain_drops:
rain_drop_x1 = rain_drop_x0 + slant
rain_drop_y1 = rain_drop_y0 + drop_length
cv2.line(image, (rain_drop_x0, rain_drop_y0), (rain_drop_x1, rain_drop_y1), drop_color, drop_width)
image = cv2.blur(image, (blur_value, blur_value)) # rainy view are blurry
image_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS).astype(np.float32)
image_hls[:, :, 1] *= brightness_coefficient
image_rgb = cv2.cvtColor(image_hls.astype(np.uint8), cv2.COLOR_HLS2RGB)
if needs_float:
image_rgb = to_float(image_rgb, max_value=255)
return image_rgb | 5,333,318 |
def test_controls_have_techniques(attck_fixture):
"""
All MITRE Enterprise ATT&CK Malware should have Actors
Args:
attck_fixture ([type]): our default MITRE Enterprise ATT&CK JSON fixture
"""
for control in attck_fixture.enterprise.controls:
if control.techniques:
assert getattr(control,'techniques') | 5,333,319 |
def vrctst_tml(file_name):
""" adds vrctst_tml extension, if missing
:param file_name: name of file
:type file_name: str
:returns: file with extension added
:rtype: str
"""
return _add_extension(file_name, Extension.VRC_TML) | 5,333,320 |
def get_pixel_coords(x, y, xres, yres, xmin, ymax):
"""
Translate x, y coordinates to cols, rows.
Example:
col, row = map_pixel(x, y, geotransform[1],
geotransform[-1], geotransform[0], geotransform[3])
Parameters
----------
x : float, numpy.ndarray
X coordinates.
y : float, numpy.ndarray
Y coordinates.
xres : float
X resolution.
yres : float
Y resolution.
Returns
-------
col : int, numpy.ndarray
Column coordinates.
row : int, numpy.ndarray
Row coordinates.
"""
col = np.around((x - xmin) / xres).astype(int)
row = np.around((y - ymax) / yres).astype(int)
return col, row | 5,333,321 |
def _boundary_of_alternatives_indices(pattern):
"""
Determines the location of a set of alternatives in a glob pattern.
Alternatives are defined by a matching set of non-bracketed parentheses.
:param pattern: Glob pattern with wildcards.
:return: Indices of the innermost set of matching non-bracketed
parentheses in a tuple. The Index of a missing parenthesis
will be passed as None.
"""
# Taking the leftmost closing parenthesis and the rightmost opening
# parenthesis left of it ensures that the parentheses belong together and
# the pattern is parsed correctly from the most nested section outwards.
end_pos = None
for match in re.finditer('\\)', pattern):
if not _position_is_bracketed(pattern, match.start()):
end_pos = match.start()
break # Break to get leftmost.
start_pos = None
for match in re.finditer('\\(', pattern[:end_pos]):
if not _position_is_bracketed(pattern, match.start()):
start_pos = match.end()
# No break to get rightmost.
return start_pos, end_pos | 5,333,322 |
def plot(model, featnames=None, num_trees=None, plottype='horizontal', figsize=(25,25), verbose=3):
"""Make tree plot for the input model.
Parameters
----------
model : model
xgboost or randomforest model.
featnames : list, optional
list of feature names. The default is None.
num_trees : int, default None
The best performing tree is choosen. Specify any other ordinal number for another target tree
plottype : str, (default : 'horizontal')
Works only in case of xgb model.
* 'horizontal'
* 'vertical'
figsize: tuple, default (25,25)
Figure size, (height, width)
verbose : int, optional
Print progress to screen. The default is 3.
0: NONE, 1: ERROR, 2: WARNING, 3: INFO (default), 4: DEBUG, 5: TRACE
Returns
-------
ax : Figure axis
Figure axis of the input model.
"""
modelname = str(model).lower()
if ('xgb' in modelname):
if verbose>=4: print('xgboost plotting pipeline.')
ax = xgboost(model, featnames=featnames, num_trees=num_trees, figsize=figsize, plottype=plottype, verbose=verbose)
elif ('tree' in modelname) or ('forest' in modelname) or ('gradientboosting' in modelname):
if verbose>=4: print('tree plotting pipeline.')
ax = randomforest(model, featnames=featnames, num_trees=num_trees, figsize=figsize, verbose=verbose)
elif ('lgb' in modelname):
ax = lgbm(model, featnames=featnames, num_trees=num_trees, figsize=figsize, verbose=verbose)
else:
print('[treeplot] >Model not recognized: %s' %(modelname))
ax = None
return ax | 5,333,323 |
def post_images(*, image_path: str,) -> dict:
"""
Convert Image to PDF
"""
logging.debug('image_path: ' + image_path)
try:
if(image_path.startswith(NEXTCLOUD_USERNAME+"/files")):
image_path = image_path[len(NEXTCLOUD_USERNAME+"/files"):]
logging.debug('image_path: ' + image_path)
if(VERIFY_INPUTPATH and not image_path.startswith(NEXTCLOUD_OCR_INPUT_DIR)):
return {"image_path": image_path,"message": "ignored because folder not " + NEXTCLOUD_OCR_INPUT_DIR , "status": "IGNORED"}
except Exception as e:
print("parse error: " + str(e))
try:
workdir = tempfile.mkdtemp(prefix="ocr_")
with NextCloud(
NEXTCLOUD_URL,
user=NEXTCLOUD_USERNAME,
password=NEXTCLOUD_PASSWORD,
) as nxc:
try:
nc_file_list = nxc.list_folders(NEXTCLOUD_OCR_INPUT_DIR).data
logging.debug('nc_file_list: ' + str(nc_file_list))
nc_file_remote_name = image_path
logging.debug('nc_file_remote_name: ' + str(nc_file_remote_name))
nc_file_remote = nxc.get_file(nc_file_remote_name)
logging.debug('nc_file_remote: ' + str(nc_file_remote))
nc_file_name, nc_file_ext = os.path.splitext(os.path.basename(nc_file_remote_name))
logging.debug('nc_file_name: ' + str(nc_file_name))
logging.debug('nc_file_ext: ' + str(nc_file_ext))
nc_file_remote.download(target=workdir)
except Exception as e:
print("download error: " + str(e))
try:
pdf = pytesseract.image_to_pdf_or_hocr(os.path.join(workdir, nc_file_name + nc_file_ext), extension='pdf', lang="deu")
file_ocr_name = os.path.join(workdir, nc_file_name + "-" + str(nc_file_remote.file_id) + ".pdf")
with open(file_ocr_name, 'w+b') as file_ocr:
file_ocr.write(pdf)
except Exception as e:
print("ocr error: " + str(e))
try:
file_ocr_remote = NEXTCLOUD_OCR_OUTPUT_DIR + "/" + nc_file_name + "-" + str(nc_file_remote.file_id) + ".pdf"
nxc.upload_file(file_ocr_name, file_ocr_remote).data
nc_file_remote.add_tag(tag_name=NEXTCLOUD_OCR_TAG)
except Exception as e:
print("upload error: " + str(e))
shutil.rmtree(workdir)
return {"image_path": image_path,"message": "successfully converted " + nc_file_remote_name + " to " + file_ocr_remote, "status": "OK"}
except Exception as e:
return {"image_path": image_path,"message": str(e), "status": "Failure"} | 5,333,324 |
def sim_DA_from_timestamps2_p2_2states(timestamps, dt_ref, k_D, R0, R_mean,
R_sigma, tau_relax, k_s, rg,
chunk_size=1000, alpha=0.05, ndt=10):
"""
2-states recoloring using CDF in dt and with random number caching
"""
dt = np.array([dt_ref] * 2, dtype=np.float64)
for state in [0, 1]:
if tau_relax[state] < ndt * dt[state]:
dt[state] = tau_relax[state] / ndt
print(f'WARNING: Reducing dt[{state}] to {dt[state]:g} '
f'[tau_relax[{state}] = {tau_relax[state]}]')
# Array flagging photons as A (1) or D (0) emitted
A_ph = np.zeros(timestamps.size, dtype=np.uint8)
# Instantaneous D-A distance at D de-excitation time
R_ph = np.zeros(timestamps.size, dtype=np.float64)
# Time of D de-excitation relative to the last timestamp
T_ph = np.zeros(timestamps.size, dtype=np.float64)
# State for each photon
S_ph = np.zeros(timestamps.size, dtype=np.uint8)
peq = [k_s[1] / (k_s[0] + k_s[1]),
k_s[0] / (k_s[0] + k_s[1])]
k_s_sum = np.sum(k_s)
t0 = 0
nanotime = 0
state = 0 # the two states are 0 and 1
R = rg.randn() * R_sigma[state] + R_mean[state]
iN = chunk_size - 1 # value to get the first chunk of random numbers
for iph, t in enumerate(timestamps):
# each cycle starts with a new photon timestamp `t`
# excitation time is `t`, emission time is `t + nanotime`
delta_t0 = t - t0
delta_t = delta_t0 - nanotime
if delta_t < 0:
# avoid negative delta_t possible when when two photons have
# the same macrotime
delta_t = 0
t = t0
p_state = (1 - peq[state]) * np.exp(-(delta_t0 * k_s_sum)) + peq[state]
u = rg.rand()
#print(f'iph={iph}, state={state}, p_state={p_state}, u={u}, delta_t0={delta_t0}')
# Inversion of u is for compatibility with N-state version
if state == 1:
u = 1 - u
if p_state <= u:
#print(' * state change')
state = 0 if state == 1 else 1
R = rg.randn() * R_sigma[state] + R_mean[state]
# Compute the D-A distance at the "excitation time"
iN += 1
if iN == chunk_size:
Na = memoryview(rg.randn(chunk_size))
Pa = memoryview(rg.rand(chunk_size))
iN = 0
N = Na[iN]
p = Pa[iN]
R = ou_single_step_cy(R, delta_t, N, R_mean[state], R_sigma[state],
tau_relax[state])
nanotime = 0
# loop through D-A diffusion steps with a fixed time-step dt
# until D de-excitation by photon emission or energy transfer to A
while True:
k_ET = k_D * (R0 / R)**6
k_emission = k_ET + k_D
d_prob_ph_em = k_emission * dt[state] # prob. of emission in dt
if d_prob_ph_em > alpha:
d_prob_ph_em = 1 - exp(-d_prob_ph_em)
if d_prob_ph_em >= p:
break # break out of the loop when the photon is emitted
nanotime += dt[state]
iN += 1
if iN == chunk_size:
Na = memoryview(rg.randn(chunk_size))
Pa = memoryview(rg.rand(chunk_size))
iN = 0
N = Na[iN]
p = Pa[iN]
R = ou_single_step_cy(R, dt[state], N, R_mean[state], R_sigma[state],
tau_relax[state])
# photon emitted, let's decide if it is from D or A
p_DA = p / d_prob_ph_em # equivalent to rand(), but faster
prob_A_em = k_ET / k_emission
if prob_A_em >= p_DA:
A_ph[iph] = 1
# time of D de-excitation by photon emission or energy transfer to A
t0 = t
# save D-A distance at emission time
R_ph[iph] = R
# save time of emission relative to the excitation time `t`
T_ph[iph] = nanotime
# Save state for current photon
S_ph[iph] = state
return A_ph, R_ph, T_ph, S_ph | 5,333,325 |
def enabled_enhancements(config):
"""
Generator to yield the enabled new style enhancements.
:param config: Configuration.
:type config: :class:`certbot.interfaces.IConfig`
"""
for enh in _INDEX:
if getattr(config, enh["cli_dest"]):
yield enh | 5,333,326 |
def draw_heatmap(square_size: int, city: generation.City, data: ScreenData):
""" Draws the population heatmap to the screen in the given ScreenData """
x_max = math.ceil(config.SCREEN_RES[0] / square_size) + 1
y_max = math.ceil(config.SCREEN_RES[1] / square_size) + 1
for x in range(0, x_max):
for y in range(0, y_max):
screen_point = (x * square_size,
y * square_size)
world_point = screen_to_world(screen_point, data.pan, data.zoom)
intensity = city.pop.at_point(world_point)
color = (0, max(min(intensity * 83, 255), 0), 0)
pos = (screen_point[0] - (square_size / 2),
screen_point[1] - (square_size / 2))
dim = (square_size, square_size)
pygame.draw.rect(data.screen, color, pygame.Rect(pos, dim)) | 5,333,327 |
def get_cross_track_error(data, rate, velocity):
"""Returns the final cross-track position (in nautical miles)
The algorithm simulates an aircraft traveling on a straight trajectory who
turns according to the data provided. The aircraft instantaneously updates
its heading at each timestep by Ω * Δt.
.. warning: This code assumes that the magnitude of the rotations in data
is small in order to use a paraxial approximation sin(\theta) = \theta.
This paraxial approximation speeds up the algorithm, which is important
if cross track error simulations will occur hundreds of times.
This can be used in conjunction with `simulate_fog_single`. In order to
simulate a transpacific flight and estimate the cross-track error for a
single run, one could run:
>>> rate = 1 # Hz
>>> data = simulate_fog_single(rate=rate, hours=10, arw=.0413,
... drift=.944, correlation_time=3600)
>>> xtk = get_cross_track_error(data, rate, 900)
Parameters
----------
data: ndarray.float
An array of rotation rates, in deg/h
rate: float
The sampling rate of data in Hz
velocity: float
The velocity of the simulated aircraft in kph
Returns
-------
float
The cross track error from this FOG signal.
"""
Δθ = data * np.pi/180/3600/rate # radians
heading = np.cumsum(Δθ)
Δy = velocity * 1000 / 3600 / rate * heading # m
xtk = np.cumsum(Δy) / 1852 # nmi
return xtk | 5,333,328 |
def _download_files(download, kwargs):
"""
Helper function to download content from Salt Master.
:param download: (list or str) list of keys or comma separated string
of key names from kwargs to download
:param kwargs: (dict) dictionary with data to download
"""
saltenv = kwargs.get("saltenv", "base")
download = download.split(",") if isinstance(download, str) else download
# iterate over download keys and download data from master
for key in download:
if key not in kwargs:
continue
if not kwargs[key].startswith("salt://"):
continue
# download data
content = __salt__["cp.get_file_str"](kwargs[key], saltenv=saltenv)
if not content:
raise CommandExecutionError(
"Failed to get '{}' file content".format(key)
)
kwargs[key] = content
log.debug(
"Nornir-proxy MAIN PID {} worker thread, donwloaded '{}' data from Master".format(
os.getpid(), key
)
) | 5,333,329 |
async def get_list_address():
"""Get list of address
"""
return await service.address_s.all() | 5,333,330 |
def handle_usergroup_manifests(groups):
"""Creates usergroup manifests"""
for name in groups.keys():
usergroup_manifest = UsergroupManifest(name)
if usergroup_manifest.changed:
usergroup_manifest.save() | 5,333,331 |
def run_clustering(pss, cfg):
"""
key => value
"usuario" parameters["email"]
"dataset" parameters["dataset"]
"extension" parameters["ext"]
"model" parameters["mod"]
"algorithm" parameters["algorithm"]
"metric" parameters["metric"]
"optimizer" parameters["optimizer"]
"index" parameters["ivi"]
"""
if not os.path.isdir(LOCATION_OUT+pss+"/"):
os.mkdir(LOCATION_OUT+pss+"/")
if cfg == []:
location_in = LOCATION_IN+pss+"/"
location_out = LOCATION_OUT+pss+"/"
dbname = [d for d in os.listdir(location_in) if ".mtx" in d or ".mat" in d][0]
os.system(PYTHON_MODE+" "+CLSTR_APP+" -i "+location_in+" -o "+location_out+" "+dbname)
else:
dbfile = "data."+cfg["extension"]
# dblbl = pss+".labels"
folder = cfg["usuario"].replace('.','').split('@')[0]+cfg["dataset"]+"/"
location_in = LOCATION_IN+folder
location_out = LOCATION_OUT+folder
if not os.path.isdir(LOCATION_OUT):
os.mkdir(LOCATION_OUT)
mtr = cfg["metric"]
ivi = cfg["index"]
alg = cfg["algorithm"]
opt = cfg["optimizer"]
model = cfg["model"]
os.system(PYTHON_MODE+" "+CLSTR_APP+" -a "+alg+" -t "+opt+" -e "+ivi+" -f "+model+" -i "+location_in+" -o "+location_out+" -d "+mtr+" "+dbfile) | 5,333,332 |
def test_deprecate_pandas_dtype_enum(schema_cls):
"""Test that using the PandasDtype enum raises a DeprecationWarning."""
for attr in pa.PandasDtype:
if not FLOAT_128_AVAILABLE and attr in {
"Float128",
"Complex256",
}:
continue
with pytest.warns(DeprecationWarning):
pandas_dtype = getattr(pa.PandasDtype, attr)
schema_cls(dtype=pandas_dtype) | 5,333,333 |
def load_xml_stream(
file_path: pathlib.Path, progress_message: Optional[str] = None
) -> progress.ItemProgressStream:
"""Load an iterable xml file with a progress bar."""
all_posts = ElementTree.parse(file_path).getroot()
return progress.ItemProgressStream(
all_posts, len(all_posts), prefix=" ", message=progress_message,
) | 5,333,334 |
def SNR_band(cp, ccont, cb, iband, itime=10.):
"""
Calc the exposure time necessary to get a given S/N on a molecular band
following Eqn 7 from Robinson et al. 2016.
Parameters
----------
cp :
Planet count rate
ccont :
Continuum count rate
cb :
Background count rate
iband :
Indicies of molecular band
itime :
Integration time [hours]
Returns
-------
snr : float
SNR to detect band given exposure time
"""
denominator = np.power(np.sum(cp[iband] + 2.*cb[iband]), 0.5)
numerator = np.sum(np.fabs(ccont - cp[iband]))
return np.power(itime*3600., 0.5) * numerator / denominator | 5,333,335 |
def acquire_lease(lease_id, client_id, ttl=DEFAULT_LOCK_DURATION_SECONDS, timeout=DEFAULT_ACQUIRE_TIMEOUT_SECONDS):
"""
Try to acquire the lease. If fails, return None, else return lease object. The timeout is crudely implemented with backoffs and retries so the timeout is not precise
:param client_id: the id to use to set the lease to the caller's identity
:param lease_id: the lease name to aqcuire
:param timeout: int (seconds) to keep retrying and waiting before giving up.
:return: expiration time of the acquired lease or None if not acquired
"""
logger.debug('Attempting to acquire lease {} for {}'.format(lease_id, client_id))
t = 0
while t < timeout:
try:
with session_scope() as db:
ent = db.query(Lease).with_for_update(nowait=False, of=Lease).get((lease_id))
if not ent:
raise KeyError(lease_id)
if ent.do_acquire(client_id, duration_sec=ttl):
return ent.to_json()
except Exception as e:
if not is_lock_acquisition_error(e):
logger.exception('Unexpected exception during lease acquire. Will retry')
logger.debug('Retrying acquire of lease {} for {}'.format(lease_id, client_id))
t += 1
time.sleep(1)
else:
# Exceeded retry count, so failed to get lease
logger.info('Failed to get lease {} for {} after {} retries'.format(lease_id, client_id, t))
return None | 5,333,336 |
def create_index(conn, column_list, table='perfdata', unique=False):
"""Creates one index on a list of/one database column/s.
"""
table = base2.filter_str(table)
index_name = u'idx_{}'.format(base2.md5sum(table + column_list))
c = conn.cursor()
if unique:
sql = u'CREATE UNIQUE INDEX IF NOT EXISTS {} ON "{}" ({});'.format(
index_name, table, column_list
)
else:
sql = u'CREATE INDEX IF NOT EXISTS {} ON "{}" ({});'.format(
index_name, table, column_list
)
try:
c.execute(sql)
except Exception as e:
return(False, u'Query failed: {}, Error: {}'.format(sql, e))
return (True, True) | 5,333,337 |
def produce_hash(self: Type[PCell], extra: Any = None) -> str:
"""Produces a hash of a PCell instance based on:
1. the source code of the class and its bases.
2. the non-default parameter with which the pcell method is called
3. the name of the pcell
"""
# copy source code of class and all its ancestors
source_code = "".join(
[inspect.getsource(klass) for klass in self.__class__.__mro__ if issubclass(klass, PCell)]
)
diff_params = dict(self.params)
# str(diff_params) calls __repr__ in inner values, instead of __str__ ()
# therefore it would fail for instances without readable __repr__ methods
str_diff_params = "{%s}" % ", ".join("%r: %s" % p for p in diff_params.items())
long_hash_pcell = sha256(
(source_code + str_diff_params + self.name + str(extra)).encode()
).hexdigest()
short_hash_pcell = long_hash_pcell[0:7]
return short_hash_pcell | 5,333,338 |
def test_copy_keys_successful(
helpers, s3_mock, transactions, source_bucket, target_bucket, files
):
"""copy_keys should copy files to target location."""
# Arrange
helpers.create_s3_files({f: f for f in files["create_files"]}, bucket=source_bucket)
if source_bucket != target_bucket:
s3_mock.create_bucket(
Bucket=target_bucket,
CreateBucketConfiguration={"LocationConstraint": "eu-west1"},
)
# Act & Assert
assert copy_keys(transactions) is None
for target_file in files["check_files"]:
res = s3_mock.get_object(Bucket=target_bucket, Key=target_file)
assert res["ResponseMetadata"]["HTTPStatusCode"] == 200 | 5,333,339 |
def move_upload_files_to_trash(study_id, files_to_move):
"""Move files to a trash folder within the study_id upload folder
Parameters
----------
study_id : int
The study id
files_to_move : list
List of tuples (folder_id, filename)
Raises
------
QiitaDBError
If folder_id or the study folder don't exist and if the filename to
erase matches the trash_folder, internal variable
"""
trash_folder = 'trash'
folders = {k: v for k, v in get_mountpoint("uploads", retrieve_all=True)}
for fid, filename in files_to_move:
if filename == trash_folder:
raise qdb.exceptions.QiitaDBError(
"You can not erase the trash folder: %s" % trash_folder)
if fid not in folders:
raise qdb.exceptions.QiitaDBError(
"The filepath id: %d doesn't exist in the database" % fid)
foldername = join(folders[fid], str(study_id))
if not exists(foldername):
raise qdb.exceptions.QiitaDBError(
"The upload folder for study id: %d doesn't exist" % study_id)
trashpath = join(foldername, trash_folder)
create_nested_path(trashpath)
fullpath = join(foldername, filename)
new_fullpath = join(foldername, trash_folder, filename)
if exists(fullpath):
rename(fullpath, new_fullpath) | 5,333,340 |
def stopSimulation():
"""Application function to stop simulation run"""
sim.stopSimulation() | 5,333,341 |
def boxes3d_kitti_fakelidar_to_lidar(boxes3d_lidar):
"""
Args:
boxes3d_fakelidar: (N, 7) [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
w, l, h, r = boxes3d_lidar[:, 3:4], boxes3d_lidar[:, 4:5], boxes3d_lidar[:, 5:6], boxes3d_lidar[:, 6:7]
boxes3d_lidar[:, 2] += h[:, 0] / 2
return np.concatenate([boxes3d_lidar[:, 0:3], l, w, h, -(r + np.pi / 2)], axis=-1) | 5,333,342 |
def combine_per_choice(*args):
"""
Combines two or more per-choice analytics results into one.
"""
args = list(args)
result = args.pop()
new_weight = None
new_averages = None
while args:
other = args.pop()
for key in other:
if key not in result:
result[key] = other[key]
else:
old_weight, old_averages = result[key]
other_weight, other_averages = other[key]
if (
new_averages
and set(old_averages.keys()) != set(new_averages.keys())
):
raise ValueError(
"Can't combine per-choice results which used different sets of "
"player models."
)
new_weight = old_weight + other_weight
new_averages = {}
for pmn in old_averages:
new_averages[pmn] = (
old_averages[pmn] * old_weight
+ other_averages[pmn] * other_weight
) / new_weight
result[key] = (new_weight, new_averages)
return result | 5,333,343 |
def prepare_input(dirty: str) -> str:
"""
Prepare the plaintext by up-casing it
and separating repeated letters with X's
"""
dirty = "".join([c.upper() for c in dirty if c in string.ascii_letters])
clean = ""
if len(dirty) < 2:
return dirty
for i in range(len(dirty) - 1):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(clean) & 1:
clean += "X"
return clean | 5,333,344 |
def test_set_udp():
"""StatsClient.set works."""
cl = _udp_client()
_test_set(cl, 'udp') | 5,333,345 |
def calc_center_from_box(box_array):
"""calculate center point of boxes
Args:
box_array (array): N*4 [left_top_x, left_top_y, right_bottom_x, right_bottom_y]
Returns:
array N*2: center points array [x, y]
"""
center_array=[]
for box in box_array:
center_array.append([(box[0]+box[2])//2, (box[1]+box[3])//2])
return np.array(center_array) | 5,333,346 |
def fakeperson(bot, trigger):
"""Posts a not real person. 😱
Uses thispersondoesnotexist.com"""
url = "https://thispersondoesnotexist.com/image"
try:
image = requests.get(url)
filename = ''.join(
random.SystemRandom().choice(
string.ascii_letters +
string.digits) for _ in range(5))
with open("/mnt/media/websites/actionsack.com/tmp/fp_{}.jpg".format(filename), "wb") as file:
file.write(image.content)
bot.say(
"https://actionsack.com/tmp/fp_{}.jpg".format(filename))
except BaseException:
bot.reply("Error reaching API, probably.") | 5,333,347 |
def getW3D(coords):
"""
#################################################################
The calculation of 3-D Wiener index based
gemetrical distance matrix optimized
by MOPAC(Not including Hs)
-->W3D
#################################################################
"""
temp = []
for i in coords:
if i[3] != 'H':
temp.append([float(i[0]), float(i[1]), float(i[2])])
DistanceMatrix = GetGementricalDistanceMatrix(temp)
return scipy.sum(DistanceMatrix) / 2.0 | 5,333,348 |
def state_vectors(
draw: Any,
max_num_qudits: int = 3,
allowed_bases: Sequence[int] = (2, 3),
min_num_qudits: int = 1,
) -> StateVector:
"""Hypothesis strategy for generating `StateVector`'s."""
num_qudits, radixes = draw(
num_qudits_and_radixes(
max_num_qudits, allowed_bases, min_num_qudits,
),
)
return StateVector.random(num_qudits, radixes) | 5,333,349 |
def ProcessChainsAndLigandsOptionsInfo(ChainsAndLigandsInfo, ChainsOptionName, ChainsOptionValue, LigandsOptionName = None, LigandsOptionValue = None):
"""Process specified chain and ligand IDs using command line options.
Arguments:
ChainsAndLigandsInfo (dict): A dictionary containing information
existing chains and ligands.
ChainsOptionName (str): Name of command line chains option.
ChainsOptionValue (str): Value for command line chains option.
LigandsOptionName (str): Name of command line ligands option.
LigandsOptionValue (str): Value for command line ligands option.
Returns:
dict: A dictionary containing list of chain identifiers and dictionaries
of chains containing lists of ligand names for each chain.
Examples:
ChainsAndLigandsInfo = ProcessChainsAndLigandsOptionsInfo(Infile,
MolName)
for ChainID in ChainsAndLigandsInfo["ChainIDs"]:
for LigandID in ChainsAndLigandsInfo["LigandIDs"][ChainID]:
MiscUtil.PrintInfo("ChainID: %s; LigandID: %s" % (ChainID,
LigandID))
"""
SpecifiedChainsAndLigandsInfo = {}
SpecifiedChainsAndLigandsInfo["ChainIDs"] = []
SpecifiedChainsAndLigandsInfo["LigandIDs"] = {}
if ChainsOptionValue is None:
return SpecifiedChainsAndLigandsInfo
_ProcessChainIDs(ChainsAndLigandsInfo, SpecifiedChainsAndLigandsInfo, ChainsOptionName, ChainsOptionValue)
if LigandsOptionValue is None:
return SpecifiedChainsAndLigandsInfo
_ProcessLigandIDs(ChainsAndLigandsInfo, SpecifiedChainsAndLigandsInfo, LigandsOptionName, LigandsOptionValue)
return SpecifiedChainsAndLigandsInfo | 5,333,350 |
def signal_declaration_check(args, func):
"""
FUNCTION: signal_decleration_check(a(), b str)
a: tupple containing the signal statements
b: string name of the design function
- Signal decleration check.
"""
# Python's variable declarations
#----------------------------------------------------------------------------------------------------------------------------------
multiple_sig_names = ''
bit = "01"
letter_d = "abcdefghijklmnopqrstuvwxyz"
letter_u = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
letter = letter_u + letter_d
signal_decl_keys_sup = ['D', 'T', 'L', 'N', 'V', "del"]
signal_decl_keys_req_sim = ['T', 'N', 'V', "del"]
signal_decl_keys_req = ['D', 'T', 'L', 'N']
comp_signal_decl_keys_sup = ['D', 'T', 'L', 'N']
signal_decl_direct_sup = ['i', 'o', "intr", 'v']
comp_signal_decl_direct_sup = ['i', 'o']
signal_decl_types_sup = ['b', 's', "int", "arrb", "arri", "sim"]
comp_signal_decl_types_sup = ['b']
signals = []
flag_decl_keys = 0
flag_decl_types = 0
flag_decl_direct = 0
flag_letter = 0
flag_comp = 0
flag_decl_keys_count = 0
signal_dupl_count = 0
#----------------------------------------------------------------------------------------------------------------------------------
# extracting signals from "args[]" list
#----------------------------------------------------------------------------------------------------------------------------------
for i in range(len(args) - 1):
signals.append(args[i])
#----------------------------------------------------------------------------------------------------------------------------------
# Setting "flag_comp" if the signals to be checked are from a component
#----------------------------------------------------------------------------------------------------------------------------------
if ((args[len(args) - 1][0] == "Unisim") or (args[len(args) - 1][0] == "CoreLib") or (args[len(args) - 1][0] == "custom")):
flag_comp = 1
#----------------------------------------------------------------------------------------------------------------------------------
# Checking for correct signal dictionary keys in their declaration
#----------------------------------------------------------------------------------------------------------------------------------
for i in range(len(args) - 1):
k = args[i].keys()
if (flag_comp == 0):
for j in range(len(k)):
flag_decl_keys = 0
for m in range(len(signal_decl_keys_sup)):
if (k[j] == signal_decl_keys_sup[m]):
flag_decl_keys = 1
break
if (flag_decl_keys == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect key for \"" + k[j] + "\". Possible keys are: \"D\", \"T\", \"L\", \"N\", \"V\", \"del\".")
elif (flag_comp == 1):
for j in range(len(k)):
flag_decl_keys = 0
for m in range(len(comp_signal_decl_keys_sup)):
if (k[j] == signal_decl_keys_sup[m]):
flag_decl_keys = 1
break
if (flag_decl_keys == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect key for \"" + k[j] + "\" in component \"" + args[len(args) - 1][1] + "\". Possible keys are: \"D\", \"T\", \"L\", \"N\".")
#----------------------------------------------------------------------------------------------------------------------------------
# Checking if a signal has all the required dictionary keys in their declaration
#----------------------------------------------------------------------------------------------------------------------------------
for i in range(len(args) - 1):
k = args[i].keys()
decl_keys_dupl = []
flag_decl_keys_count = 0
if (flag_comp == 0):
if (args[i]['T'] != "sim"):
flag_decl_keys_count = 0
for j in range(len(k)):
for m in range(len(signal_decl_keys_req)):
if (k[j] == signal_decl_keys_req[m]):
flag_decl_keys_count += 1
if (flag_decl_keys_count < len(signal_decl_keys_req)):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Missing keys in signal declaration. Required keys are: \"D\", \"T\", \"L\", \"N\".")
## Simulation signal check
elif (args[i]['T'] == "sim"):
flag_decl_keys_count = 0
for j in range(len(k)):
for m in range(len(signal_decl_keys_req_sim)):
if (k[j] == signal_decl_keys_req_sim[m]):
flag_decl_keys_count += 1
if (flag_decl_keys_count < len(signal_decl_keys_req_sim)):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Missing keys in simulation signal declaration. Required keys are: \"T\", \"N\", \"V\".")
elif (flag_comp == 1):
flag_decl_keys_count = 0
for j in range(len(k)):
for m in range(len(signal_decl_keys_req)):
if (k[j] == signal_decl_keys_req[m]):
flag_decl_keys_count += 1
if (flag_decl_keys_count < len(signal_decl_keys_req)):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Missing keys in signals declarations in component \"" + args[len(args) - 1][1] + "\". Required keys are: \"D\", \"T\", \"L\", \"N\".")
#----------------------------------------------------------------------------------------------------------------------------------
# Adding 'D' and 'L' keys to simulation signals declaration
#----------------------------------------------------------------------------------------------------------------------------------
for i in range(len(args) - 1):
if (args[i]['T'] == "sim"):
for j in range(len(args) - 1):
n = args[j]['N'].__doc__
if (n.find("str") == 0):
if ((args[i]['N'] == args[j]['N']) and (args[j]['T'] != "sim")):
args[i]['D'] = args[j]['D']
args[i]['L'] = args[j]['L']
elif (n.find("list") == 0):
for k in range(len(args[j]['N'])):
if ((args[i]['N'] == args[j]['N'][k]) and (args[j]['T'] != "sim")):
args[i]['D'] = args[j]['D']
args[i]['L'] = args[j]['L']
#----------------------------------------------------------------------------------------------------------------------------------
# Checking if a signal has duplication of dictionary keys in its declaration
#----------------------------------------------------------------------------------------------------------------------------------
# Checking for correct signal's size declaration and raising the proper exceptions
#----------------------------------------------------------------------------------------------------------------------------------
for i in range(len(args) - 1):
if (args[i]['T'] != "sim"):
L = args[i]['L'].__doc__
n = args[i]['N'].__doc__
if ((L.find("int") == -1) and (L.find("list") == -1)):
if (n.find("str") == 0):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\".")
elif (flag_comp == 1):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\" in component \"" + args[len(args) - 1][1] + "\".")
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signals \"" + multiple_sig_names + "\".")
elif (flag_comp == 1):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signals \"" + multiple_sig_names + "\" in component \"" + args[len(args) - 1][1] + "\".")
elif (L.find("int") == 0):
if (args[i]['L'] != 1):
if (args[i]['T'] == 'b'):
if (n.find("str") == 0):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\". Single bit binary signals must have a length equal to \"1\".")
elif (flag_comp == 1):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\" in component \"" + args[len(args) - 1][1] + "\". Single bit binary signals must have a length equal to \"1\".")
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signals \"" + multiple_sig_names + "\". Single bit binary signals must have a length equal to \"1\".")
elif (flag_comp == 1):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signals \"" + multiple_sig_names + "\" in component \"" + args[len(args) - 1][1] + "\". Single bit binary signals must have a length equal to \"1\".")
elif (args[i]['T'] == 's'):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\". State signals must have a length equal to \"1\".")
elif (flag_comp == 1):
pass
if (args[i]['T'] == "int"):
if (n.find("str") == 0):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\". Integer signals must have a length declared by a 1x2 integer list .")
elif (flag_comp == 1):
pass
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signals \"" + multiple_sig_names + "\". Integer signals must have a length declared by a 1x2 integer list .")
elif (flag_comp == 1):
pass
elif (L.find("list") == 0):
if (len(args[i]['L']) != 2):
if (n.find("str") == 0):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\". Length of a binary bus or integer signal must be declared with a 1x2 integer list.")
elif (flag_comp == 1):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\" in component \"" + args[len(args) - 1][1] + "\". Length of a binary bus signal must be declared with a 1x2 integer list.")
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signals \"" + multiple_sig_names + "\". Length of a binary bus or integer signal must be declared with a 1x2 integer list.")
elif (flag_comp == 1):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signals \"" + multiple_sig_names + "\" in component \"" + args[len(args) - 1][1] +"\". Length of a binary bus signal must be declared with a 1x2 integer list.")
elif (len(args[i]['L']) == 2):
if ((args[i]['T'] == 'b') or (args[i]['T'] == "int")):
L0 = args[i]['L'][0].__doc__
L1 = args[i]['L'][1].__doc__
if ((L0.find("int") != 0) or (L1.find("int") != 0)):
if (n.find("str") == 0):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\". Length of a binary bus or integer signal must be declared with a 1x2 integer list.")
#elif (flag_comp == 1):
#raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\" in component \"" + args[len(args) - 1][2] + "\". Length of a binary bus signal must be declared with a 1x2 integer list.")
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signals \"" + multiple_sig_names + "\". Length of a binary bus or integer signal must be declared with a 1x2 integer list.")
#elif (flag_comp == 1):
#raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signals \"" + multiple_sig_names + "\" in component \"" + args[len(args) - 1][2] + "\". Length of a binary bus signal must be declared with a 1x2 integer list.")
elif (args[i]['T'] == "arrb"):
L00 = args[i]['L'][0][0].__doc__
L01 = args[i]['L'][0][1].__doc__
L10 = args[i]['L'][1][0].__doc__
L11 = args[i]['L'][1][1].__doc__
if ((L00.find("int") != 0) or (L01.find("int") != 0) or (L10.find("int") != 0) or (L11.find("int") != 0)):
if (n.find("str") == 0):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\". Length of a binary array must be declared with two 1x2 positive integer lists.")
#elif (flag_comp == 1):
#raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\" in component \"" + args[len(args) - 1][2] + "\". Length of a binary bus signal must be declared with a 1x2 integer list.")
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signals \"" + multiple_sig_names + "\". Length of a binary array must be declared with two 1x2 posotive integer lists.")
if ((args[i]['L'][0][0] < 0) or (args[i]['L'][0][1] < 0) or (args[i]['L'][1][0] < 0) or (args[i]['L'][1][1] < 0)):
if (n.find("str") == 0):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\". Length of a binary array must be declared with two 1x2 positive integer lists.")
#elif (flag_comp == 1):
#raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\" in component \"" + args[len(args) - 1][2] + "\". Length of a binary bus signal must be declared with a 1x2 integer list.")
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signals \"" + multiple_sig_names + "\". Length of a binary array must be declared with two 1x2 positive integer lists.")
elif (args[i]['T'] == "arri"):
L00 = args[i]['L'][0][0].__doc__
L01 = args[i]['L'][0][1].__doc__
L10 = args[i]['L'][1][0].__doc__
L11 = args[i]['L'][1][1].__doc__
print("L10, L11:", L10, L11)
if (args[i]['L'][1][0] >= args[i]['L'][1][1]):
if (n.find("str") == 0):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\". Length of an integer signal must be declared with a 1x2 integer list where the 2nd integer must be greater.")
if ((L00.find("int") != 0) or (L01.find("int") != 0) or (L10.find("int") != 0) or (L11.find("int") != 0)):
if (n.find("str") == 0):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\". Length of an integer signal must be declared with a 1x2 integer list where the 2nd integer must be greater.")
#elif (flag_comp == 1):
#raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\" in component \"" + args[len(args) - 1][2] + "\". Length of a binary bus signal must be declared with a 1x2 integer list.")
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + multiple_sig_names + "\". Length of an integer signal must be declared with a 1x2 integer list where the 2nd integer must be greater.")
if ((args[i]['L'][0][0] < 0) or (args[i]['L'][0][1] < 0)):
if (n.find("str") == 0):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\". Length of an integer array must be declared with a 1x2 positive integer lists.")
#elif (flag_comp == 1):
#raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\" in component \"" + args[len(args) - 1][2] + "\". Length of a binary bus signal must be declared with a 1x2 integer list.")
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signals \"" + multiple_sig_names + "\". Length of an integer array must be declared with a 1x2 positive integer lists.")
if (args[i]['T'] == "int"):
if (args[i]['L'][0] >= args[i]['L'][1]):
if (n.find("str") == 0):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signal \"" + args[i]['N'] + "\". Length of an integer signal must be declared with a 1x2 integer list where the 2nd integer must be greater.")
elif (flag_comp == 1):
pass
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
if (flag_comp == 0):
print("flag_comp:", flag_comp)
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect length for signals \"" + multiple_sig_names + "\". Length of an integer signal must be declared with a 1x2 integer list where the 2nd integer must be greater.")
elif (flag_comp == 1):
pass
#----------------------------------------------------------------------------------------------------------------------------------
# checking for correct signal type and direction declaration and raising the proper exceptions
#----------------------------------------------------------------------------------------------------------------------------------
for i in range(len(args) - 1):
if (args[i]['T'] != "sim"):
if (flag_comp == 0):
n = args[i]['N'].__doc__
for j in range(len(signal_decl_direct_sup)):
if (args[i]['D'] == signal_decl_direct_sup[j]):
flag_decl_direct = 1
break
if (flag_decl_direct == 0):
if (n.find("str") == 0):
raise _MyExceptions.MyExceptions("File: \"" + func + ".py\": " + "Signal direction not supported for signal \"" + args[i]['N'] + "\" (supported directions: input -> \"i\", output -> \"o\", internal -> \"intr\", variable -> \"v\").")
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Signal direction not supported for signals \"" + multiple_sig_names + "\" (supported directions: input -> \"i\", output-> \"o\", internal -> \"intr\", variable -> \"v\").")
elif (flag_comp == 1):
n = args[i]['N'].__doc__
for j in range(len(comp_signal_decl_direct_sup)):
if (args[i]['D'] == comp_signal_decl_direct_sup[j]):
flag_decl_direct = 1
break
if (flag_decl_direct == 0):
if (n.find("str") == 0):
raise _MyExceptions.MyExceptions("File: \"" + func + ".py\": " + "Signal direction not supported for signal \"" + args[i]['N'] + "\" in component \"" + args[len(args) - 1][1] + "\" (supported directions: input -> \"i\", output -> \"o\").")
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Signal direction not supported for signals \"" + multiple_sig_names + "\" in component \"" + args[len(args) - 1][1] + "\" (supported directions: input -> \"i\", output-> \"o\").")
if (flag_comp == 0):
flag_decl_types = 0
for j in range(len(signal_decl_types_sup)):
if (args[i]['T'] == signal_decl_types_sup[j]):
flag_decl_types = 1
break
if (flag_decl_types == 0):
if (n.find("str") == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Signal type not supported for signal \"" + args[i]['N'] + "\" (supported types: std_logic_vector -> \"b\", integer -> \"int\", states -> \"s\", std_logic_array -> \"arrb\", integer_array -> \"arri\").")
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Signal type not supported for signals \"" + multiple_sig_names + "\" (supported types: std_logic_vector -> \"b\", integer -> \"int\", states -> \"s\", std_logic_array -> \"arrb\", integer_array -> \"arri\").")
elif (flag_comp == 1):
flag_decl_types = 0
for j in range(len(comp_signal_decl_types_sup)):
if (args[i]['T'] == comp_signal_decl_types_sup[j]):
flag_decl_types = 1
break
if (flag_decl_types == 0):
if (n.find("str") == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Signal type not supported for signal \"" + args[i]['N'] + "\" in component \"" + args[len(args) - 1][1] + "\" (supported types: std_logic_vector -> \"b\").")
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Signal type not supported for signals \"" + multiple_sig_names + "\" in component \"" + args[len(args) - 1][1] + "\" (supported types: std_logic_vector -> \"b\").")
if (args[i]['T'] == "int"):
if ((args[i]['D'] != "intr") and (args[i]['D'] != 'v')):
if (flag_comp == 0):
if (n.find("str") == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Integer signal \"" + args[i]['N'] + "\" can be declared only as internal signal. I/O signals must be binary.")
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Integer signals \"" + multiple_sig_names + "\" can be declared only as internal signals. I/O signals must be binary.")
#----------------------------------------------------------------------------------------------------------------------------------
# checking for correct signal name declaration and raising the proper exceptions
#----------------------------------------------------------------------------------------------------------------------------------
for i in range(len(args) - 1):
n = args[i]['N'].__doc__
if (n.find("list") == 0):
if (len(args[i]['N']) == 1):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect signal name for \"" + str(args[i]['N'][0]) + "\". Single names must be declared as single strings.")
elif (flag_comp == 1):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect signal name for \"" + str(args[i]['N'][0]) + "\" in component \"" + args[len(args) - 1][1] + "\". Single names must be declared as single strings.")
elif (len(args[i]['N']) > 0):
for j in range(len(args[i]['N'])):
n1 = args[i]['N'][j].__doc__
if (n1.find("str") != 0):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect signal name for \"" + str(args[i]['N'][j]) + "\". Signal names must be declared as strings.")
elif (flag_comp == 1):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect signal name for \"" + str(args[i]['N'][j]) + "\" in component \"" + args[len(args) - 1][1] + "\". Signal names must be declared as strings.")
else:
if (n.find("str") != 0):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect signal name for \"" + str(args[i]['N']) + "\". Signal names must be declared as strings.")
elif (flag_comp == 1):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect signal name for \"" + str(args[i]['N']) + "\" in component \"" + args[len(args) - 1][1] + "\". Signal names must be declared as strings.")
## if (args[i]['T'] == 's'):
## if (n.find("list") == 0):
## if (flag_comp == 0):
## raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect signal name for \"" + args[i]['N'][0] + "\" . Signal name for state must be a single string.")
## elif (flag_comp == 1):
## pass
if (n.find("str") == 0):
flag_letter = 0
for j in range(len(letter)):
if (letter[j] == args[i]['N'][0]):
flag_letter = 1
break
if (flag_letter == 0):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect signal name for \"" + args[i]['N'] + "\". Signal names must begin with a letter.")
elif (flag_comp == 1):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect signal name for \"" + args[i]['N'] + "\" in component \"" + args[len(args) - 1][1] + "\". Signal names must begin with a letter.")
elif (n.find("list") == 0):
for k in range(len(args[i]['N'])):
flag_letter = 0
for j in range(len(letter)):
if (letter[j] == args[i]['N'][k][0]):
flag_letter = 1
break
if (flag_letter == 0):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect signal name for \"" + args[i]['N'][k] + "\". Signal names must begin with a letter.")
elif (flag_comp == 1):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Incorrect signal name for \"" + args[i]['N'][k] + "\" in component \"" + args[len(args) - 1][1] + "\". Signal names must begin with a letter.")
#----------------------------------------------------------------------------------------------------------------------------------
# Checking for signal duplication
#----------------------------------------------------------------------------------------------------------------------------------
for i in range(len(args) - 1):
signal_dupl_count = 0
for j in range(len(args) - 1):
n = args[j]['N'].__doc__
if (n.find("str") == 0):
if ((args[i]['D'] != "sim") and (args[j]['D'] != "sim")):
if (args[i]['N'] == args[j]['N']):
signal_dupl_count += 1
elif (n.find("list") == 0):
for k in range(len(args[j]['N'])):
if (args[i]['N'] == args[j]['N'][k]):
signal_dupl_count += 1
if (signal_dupl_count > 1):
if (flag_comp == 0):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Signal duplication for \"" + args[i]['N'] + "\".")
elif (flag_comp == 1):
raise _MyExceptions.MyExceptions("File: \"" + func +".py\": " + "Signal duplication for \"" + args[i]['N'] + "\" in component \"" + args[len(args) - 1][1] + "\".")
# Checking for correct signal initialization
#----------------------------------------------------------------------------------------------------------------------------------
## for i in range(len(args) - 1):
## if (args[i].has_key('V') == True):
## v = args[i]['V'].__doc__
## n = args[i]['N'].__doc__
## if (v.find("str") != 0):
## if (n.find("list") == 0):
## raise "Incorrect signal initialization for '" + args[i]['N'][0] + "' . String required."
## elif (n.find("str") == 0):
## raise "Incorrect signal initialization for '" + args[i]['N'] + "' . String required."
for i in range(len(args) - 1):
if (args[i]['D'] != "sim"):
if (flag_comp == 0):
if (args[i].has_key('V') == True):
L = args[i]['L'].__doc__
n = args[i]['N'].__doc__
v = args[i]['V'].__doc__
if (args[i]['T'] == 'b'):
if (v.find("list") == 0):
if (n.find("str") == 0):
raise _MyExceptions.MyExceptions("File:\"" + func +".py\": " + "Incorrect initialization for signal \"" + args[i]['N'] + "\". Binary signals must be initalized with strings.")
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
raise _MyExceptions.MyExceptions("File:\"" + func +".py\": " + "Incorrect initialization for signals \"" + multiple_sig_names + "\". Binary signals must be initalized with strings.")
if (args[i]['T'] == "int"):
if (v.find("int") != 0):
if (n.find("str") == 0):
raise _MyExceptions.MyExceptions("File:\"" + func +".py\": " + "Incorrect initialization for signal \"" + args[i]['N'] + "\". Integer signals must be initalized with a single integer.")
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
raise _MyExceptions.MyExceptions("File:\"" + func +".py\": " + "Incorrect initialization for signals \"" + multiple_sig_names + "\". Integer signals must be initalized with a single integer.")
elif (v.find("int") == 0):
if ((args[i]['V'] > args[i]['L'][1]) or (args[i]['V'] < args[i]['L'][0])):
if (n.find("str") == 0):
raise _MyExceptions.MyExceptions("File:\"" + func +".py\": " + "Incorrect initialization for signal \"" + args[i]['N'] + "\". Initialization value is not included in the range of the signal.")
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
raise _MyExceptions.MyExceptions("File:\"" + func +".py\": " + "Incorrect initialization for signals \"" + multiple_sig_names + "\". Initialization value is not included in the range of the signal.")
if (args[i]['T'] == 'b'):
if (L.find("list") == 0):
if ((abs(args[i]['L'][0] - args[i]['L'][1]) + 1) != len(args[i]['V'])):
if (n.find("str") == 0):
raise _MyExceptions.MyExceptions("File:\"" + func +".py\": " + "Incorrect signal length for \"" + args[i]['N'] + "\" initializtion.")
elif (n.find("list") == 0):
multiple_sig_names = ''
for j in range(len(args[i]['N'])):
if (j != (len(args[i]['N']) - 1)):
multiple_sig_names += args[i]['N'][j] + ", "
else:
multiple_sig_names += args[i]['N'][j]
raise _MyExceptions.MyExceptions("File:\"" + func +".py\": " + "Incorrect signal length for \"" + multiple_sig_names + "\" initializtion.")
elif (L.find("int") == 0):
if (args[i]['T'] == 'b'):
if (len(args[i]['V']) != 1):
if (n.find("list") == 0):
raise _MyExceptions.MyExceptions("File:\"" + func +".py\": " + "Incorrect signal length for \"" + args[i]['N'][0] + "\" initializtion.")
elif (n.find("str") == 0):
raise _MyExceptions.MyExceptions("File:\"" + func +".py\": " + "Incorrect signal length for \"" + args[i]['N'] + "\" initializtion.")
if (args[i]['T'] == 'b'):
if (L.find("list") == 0):
for j in range(len(args[i]['V'])):
if (bit.find(args[i]['V'][j]) == -1):
if (n.find("list") == 0):
raise _MyExceptions.MyExceptions("File:\"" + func +".py\": " + "Incorrect initialization for binary signal \"" + args[i]['N'][0] + "\" .")
elif (n.find("str") == 0):
raise _MyExceptions.MyExceptions("File:\"" + func +".py\": " + "Incorrect initialization for binary signal \"" + args[i]['N'] + "\" .")
elif (L.find("int") == 0):
if (bit.find(args[i]['V']) == -1):
if (n.find("list") == 0):
raise _MyExceptions.MyExceptions("File:\"" + func +".py\": " + "Incorrect initialization for binary signal \"" + args[i]['N'][0] + "\" .")
elif (n.find("str") == 0):
raise _MyExceptions.MyExceptions("File:\"" + func +".py\": " + "Incorrect initialization for binary signal \"" + args[i]['N'] + "\" .")
elif (args[i]['T'] == 's'):
if (v.find("list") == 0):
if (len(args[i]['V']) == 1):
raise _MyExceptions.MyExceptions("File:\"" + func +".py\": " + "Incorrect signal initialization for \"" + args[i]['V'][0] + "\". Single initialization values must be declared as single strings.")
for k in range(len(args[i]['V'])):
flag_letter = 0
for j in range(len(letter)):
if (letter[j] == args[i]['V'][k][0]):
flag_letter = 1
break
if (flag_letter == 0):
raise _MyExceptions.MyExceptions("File:\"" + func +".py\": " + "Incorrect signal initialization for \"" + args[i]['V'][k] + "\". State names must begin with a letter.")
elif (v.find("str") == 0):
flag_letter = 0
for j in range(len(letter)):
if (letter[j] == args[i]['V'][0]):
flag_letter = 1
break
if (flag_letter == 0):
raise _MyExceptions.MyExceptions("File:\"" + func +".py\": " + "Incorrect signal initialization for \"" + args[i]['V'] + "\". State names must begin with a letter.")
elif (flag_comp == 1):
pass | 5,333,351 |
def test_update_pool(pool_api_setup):
"""Test the patch /pools/{pool_id} API EP"""
POOL_DICT["slots"] = 2
updated_pool = Pool(**POOL_DICT)
api_response = pool_api_setup.patch_pool(
"test_pool",
updated_pool,
)
logging.getLogger().info("%s", api_response)
print(f"{BCOLORS.OKGREEN}OK{BCOLORS.ENDC}") | 5,333,352 |
def oa_filter(x, h, N, mode=0):
"""
Overlap and add transform domain FIR filtering.
This function implements the classical overlap and add method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> import numpy as np
>>> from sk_dsp_comm.sigsys import oa_filter
>>> n = np.arange(0,100)
>>> x = np.cos(2*np.pi*0.05*n)
>>> b = np.ones(10)
>>> y = oa_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = oa_filter(x,h,N,1)
"""
P = len(h)
L = int(N) - P + 1 # need N >= L + P -1
Nx = len(x)
Nframe = int(np.ceil(Nx/float(L)))
# zero pad to full number of frames needed
x = np.hstack((x,np.zeros(Nframe*L-Nx)))
y = np.zeros(Nframe*N)
# create an instrumentation matrix to observe the overlap and add behavior
y_mat = np.zeros((Nframe,Nframe*N))
H = fft.fft(h,N)
# begin the filtering operation
for k in range(Nframe):
xk = x[k*L:(k+1)*L]
Xk = fft.fft(xk,N)
Yk = H*Xk
yk = np.real(fft.ifft(Yk))
y[k*L:k*L+N] += yk
y_mat[k,k*L:k*L+N] = yk
if mode == 1:
return y[0:Nx], y_mat[:,0:Nx]
else:
return y[0:Nx] | 5,333,353 |
def get_languages(translation_dir: str, default_language: Optional[str] = None) -> Iterable[str]:
"""
Get a list of available languages.
The default language is (generic) English and will always be included. All other languages will be read from
the folder `translation_dir`. A folder within that directory is considered to contain a language for the
application if its name is either two lowercase letters, or two lowercase letters, a dash, and two uppercase
letters.
:Example:
The following list contains *valid* language codes:
* ``de``
* ``de-DE``
* ``de-AT``
* ``de-CH``
:Example:
The following list contains *invalid* language codes:
* ``EN``
* ``EN-us``
* ``EN-US``
* ``en-us``
* ``en-USA``
:param default_language: The default language as used in the GetText functions within the code. If not given,
the default language from :meth:`get_default_language` will be used.
:param translation_dir: The directory within which the translation folders can be found.
:return: A list of language codes supported by the application.
"""
if not default_language:
default_language = get_default_language()
# Get a list of all entries in the translations folder and filter it. If the given folder could not be read, do not
# include any additional languages.
pattern = re.compile('^([a-z]{2})(-[A-Z]{2})?$')
try:
languages = [language for language in listdir(translation_dir) if pattern.match(language)]
except OSError:
languages = []
return [default_language] + languages | 5,333,354 |
def calculate_direction(a, b):
"""Calculates the direction vector between two points.
Args:
a (list): the position vector of point a.
b (list): the position vector of point b.
Returns:
array: The (unnormalised) direction vector between points a and b. The smallest magnitude of an element is 1 (eg: [1,1,2]).
"""
difference = np.subtract(a, b)
if np.count_nonzero(difference) < 1:
print("The two k-points are equal")
return np.array([0, 0, 0])
# we need to find the smallest non-zero value within a-b
a = np.array(a)
b = np.array(b)
direction_masked = ma.masked_equal(
a - b, 0) # return array with invalid entries where values are equal
direction_filled = ma.filled(
direction_masked, 10
**6) # fill invalid elements of array with a large number s
direction_absolute = np.absolute(
direction_filled) # return absolute values of each element
smallest = np.amin(direction_absolute)
direction = (
b - a) / smallest # use the minimum absolute value as a divisor a-b
if -1 in direction:
direction = np.multiply(direction, -1)
return direction | 5,333,355 |
def argmin(a, axis=None, out=None, keepdims=None, combine_size=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input tensor.
axis : int, optional
By default, the index is into the flattened tensor, otherwise
along the specified axis.
out : Tensor, optional
If provided, the result will be inserted into this tensor. It should
be of the appropriate shape and dtype.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input tensor.
If the default value is passed, then `keepdims` will not be
passed through to the `mean` method of sub-classes of
`Tensor`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
combine_size: int, optional
The number of chunks to combine.
Returns
-------
index_array : Tensor of ints
Tensor of indices into the tensor. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
Tensor.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> import mars.tensor as mt
>>> from mars.session import new_session
>>> sess = new_session().as_default()
>>> a = mt.arange(6).reshape(2,3)
>>> a.execute()
array([[0, 1, 2],
[3, 4, 5]])
>>> mt.argmin(a).execute()
0
>>> mt.argmin(a, axis=0).execute()
array([0, 0, 0])
>>> mt.argmin(a, axis=1).execute()
array([0, 0])
Indices of the minimum elements of a N-dimensional tensor:
>>> ind = mt.unravel_index(mt.argmin(a, axis=None), a.shape)
>>> sess.run(ind)
(0, 0)
>>> a[ind] # TODO(jisheng): accomplish when fancy index on tensor is supported
>>> b = mt.arange(6)
>>> b[4] = 0
>>> b.execute()
array([0, 1, 2, 3, 0, 5])
>>> mt.argmin(b).execute() # Only the first occurrence is returned.
0
"""
op = TensorArgmin(axis=axis, dtype=np.dtype(int), keepdims=keepdims, combine_size=combine_size)
return op(a, out=out) | 5,333,356 |
def parse_game_state(gs_json: dict) -> game_state_pb2.GameState:
"""Deserialize a JSON-formatted game state to protobuf."""
if 'provider' not in gs_json:
raise InvalidGameStateException(gs_json)
try:
map_ = parse_map(gs_json.get('map'))
provider = parse_provider(gs_json['provider'])
round_ = parse_round(gs_json.get('round'))
player = parse_player(gs_json['player'])
allplayers = [
parse_allplayers_entry(steam_id, allplayers_entry)
for steam_id, allplayers_entry in gs_json.get('allplayers', {}).items()
]
previously = parse_previously(gs_json['previously']) \
if 'previously' in gs_json else None
added = parse_added(gs_json['added']) if 'added' in gs_json else None
return game_state_pb2.GameState(
provider=provider, map=map_, round=round_, player=player,
allplayers=allplayers, previously=previously, added=added)
except DeserializationError as e:
logger.error('Failed to deserialize game_state: %s', json.dumps(gs_json))
raise e | 5,333,357 |
def run():
"""
Main processing function.
"""
pref = mmcommon.pref('TimeMachine')
if pref is not None:
processTM(pref) | 5,333,358 |
def state2bin(s, num_bins, limits):
"""
:param s: a state. (possibly multidimensional) ndarray, with dimension d =
dimensionality of state space.
:param num_bins: the total number of bins in the discretization
:param limits: 2 x d ndarray, where row[0] is a row vector of the lower
limit of each discrete dimension, and row[1] are corresponding upper
limits.
Returns the bin number (index) corresponding to state s given a
discretization num_bins between each column of limits[0] and limits[1].
The return value has same dimensionality as ``s``. \n
Note that ``s`` may be continuous. \n
\n
Examples: \n
s = 0, limits = [-1,5], num_bins = 6 => 1 \n
s = .001, limits = [-1,5], num_bins = 6 => 1 \n
s = .4, limits = [-.5,.5], num_bins = 3 => 2 \n
"""
if s == limits[1]:
return num_bins - 1
width = limits[1] - limits[0]
if s > limits[1]:
print 'Tools.py: WARNING: ', s, ' > ', limits[1], '. Using the chopped value of s'
print 'Ignoring', limits[1] - s
s = limits[1]
elif s < limits[0]:
print 'Tools.py: WARNING: ', s, ' < ', limits[0], '. Using the chopped value of s'
# print("WARNING: %s is out of limits of %s . Using the chopped value of s" %(str(s),str(limits)))
s = limits[0]
return int((s - limits[0]) * num_bins / (width * 1.)) | 5,333,359 |
def _fix_json_agents(ag_obj):
"""Fix the json representation of an agent."""
if isinstance(ag_obj, str):
logger.info("Fixing string agent: %s." % ag_obj)
ret = {'name': ag_obj, 'db_refs': {'TEXT': ag_obj}}
elif isinstance(ag_obj, list):
# Recursive for complexes and similar.
ret = [_fix_json_agents(ag) for ag in ag_obj]
elif isinstance(ag_obj, dict) and 'TEXT' in ag_obj.keys():
ret = deepcopy(ag_obj)
text = ret.pop('TEXT')
ret['db_refs']['TEXT'] = text
else:
ret = ag_obj
return ret | 5,333,360 |
def fuzz(input_corpus, output_corpus, target_binary):
"""Run fuzzer."""
prepare_fuzz_environment(input_corpus)
# Note: dictionary automatically added by run_fuzz().
# Use a dictionary for original afl as well.
print('[run_fuzzer] Running AFL for original binary')
src_file = '{target}-normalized-none-nopt.dict'.format(target=target_binary)
dst_file = '{target}-original.dict'.format(target=target_binary)
shutil.copy(src_file, dst_file)
# Instead of generating a new dict, just hack this one
# to be non-optimized to prevent AFL from aborting.
os.system('sed -i \'s/OPTIMIZED/NORMAL/g\' {dict}'.format(dict=dst_file))
afl_fuzz_thread1 = threading.Thread(
target=run_fuzz,
args=(input_corpus, output_corpus,
'{target}-original'.format(target=target_binary),
['-S', 'slave-original']))
afl_fuzz_thread1.start()
print('[run_fuzzer] Running AFL for normalized and optimized dictionary')
afl_fuzz_thread2 = threading.Thread(
target=run_fuzz,
args=(input_corpus, output_corpus,
'{target}-normalized-none-nopt'.format(target=target_binary),
['-S', 'slave-normalized-nopt']))
afl_fuzz_thread2.start()
print('[run_fuzzer] Running AFL for FBSP and optimized dictionary')
run_fuzz(input_corpus,
output_corpus,
'{target}-no-collision-all-opt'.format(target=target_binary),
['-S', 'slave-no-collision-all-opt'],
hide_output=False) | 5,333,361 |
def vflip(img):
"""Vertically flip the given CV Image.
Args:
img (CV Image): Image to be flipped.
Returns:
CV Image: Vertically flipped image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be CV Image. Got {}'.format(type(img)))
return cv2.flip(img, 1) | 5,333,362 |
def request_download(session, product_id, outdir, override=False,
progressbar=False):
"""Request download to ESA and interpret the response."""
product_url = _dl_url(product_id)
r = session.get(product_url, stream=True)
# Product is not available
if r.status_code == 404:
xmlroot = ET.fromstring(r.text)
for child in xmlroot:
if 'ResponseMessage' in child.tag:
error_msg = child.text
raise requests.exceptions.InvalidURL(error_msg)
# Product is available, but ESA must process the order
if r.status_code == 202:
# Resend query to get correct "Retry-After" header value
r = session.get(product_url, stream=True)
retry_after = int(r.headers['Retry-After'])
if progressbar:
print('The order is being processed by ESA '
'and will be ready in {} seconds.'.format(retry_after))
progress = tqdm(total=retry_after)
for i in range(retry_after):
sleep(1)
if progressbar:
progress.update(1)
if progressbar:
progress.close()
request_download(session, product_id, outdir, override=override,
progressbar=progressbar)
# Product is directly available
if r.status_code == 200:
_dl_file(session, product_url, outdir, progressbar=progressbar) | 5,333,363 |
def list_pending_tasks():
"""List all pending tasks in celery cluster."""
inspector = celery_app.control.inspect()
return inspector.reserved() | 5,333,364 |
def setDifficulty():
"""sets the difficulty"""
global difficultyFactor
scrollMenu(
" _____ _ __ __ _ _ _ \n | __ \(_)/ _|/ _(_) | | | \n | | | |_| |_| |_ _ ___ _ _| | |_ _ _ \n | | | | | _| _| |/ __| | | | | __| | | |\n | |__| | | | | | | | (__| |_| | | |_| |_| |\n |_____/|_|_| |_| |_|\___|\__,_|_|\__|\__, |\n __/ |\n |___/ ",
["Invulnerable", "Easy", "Normal", "Hard", "Insane"],
[
"You cannot die or lose health. Basically, you win no matter what.",
"Feeling casual? This is the mode for you. You lose stats half as fast as normal.",
"Normal. For normal people. This is the normal mode. Very normal. Not much else to say.",
"This mode is harder. It's pretty hard. Twice as hard as normal. You don't get the invulnerability at the beginning.",
"What are you even doing?",
],
[
"global difficultyFactor\ndifficultyFactor = 0",
"global difficultyFactor\ndifficultyFactor = 0.5",
"global difficultyFactor\ndifficultyFactor = 1",
"global difficultyFactor\ndifficultyFactor = 2",
"global difficultyFactor\ndifficultyFactor = 10",
],
"Press backspace to go back",
)
settings() | 5,333,365 |
def get_attack(attacker, defender):
"""
Returns a value for an attack roll.
Args:
attacker (obj): Character doing the attacking
defender (obj): Character being attacked
Returns:
attack_value (int): Attack roll value, compared against a defense value
to determine whether an attack hits or misses.
Notes:
By default, returns a random integer from 1 to 100 without using any
properties from either the attacker or defender.
This can easily be expanded to return a value based on characters stats,
equipment, and abilities. This is why the attacker and defender are passed
to this function, even though nothing from either one are used in this example.
"""
# For this example, just return a random integer up to 100.
attack_value = randint(1, 100)
return attack_value | 5,333,366 |
def colored(s, color=None, attrs=None):
"""Call termcolor.colored with same arguments if this is a tty and it is available."""
if HAVE_COLOR:
return colored_impl(s, color, attrs=attrs)
return s | 5,333,367 |
def _combine_by_cluster(ad, clust_key='leiden'):
"""
Given a new AnnData object, we want to create a new object
where each element isn't a cell, but rather is a cluster.
"""
clusters = []
X_mean_clust = []
for clust in sorted(set(ad.obs[clust_key])):
cells = ad.obs.loc[ad.obs[clust_key] == clust].index
X_clust = ad[cells,:].X
x_clust = _aggregate_expression(X_clust)
X_mean_clust.append(x_clust)
clusters.append(str(clust))
X_mean_clust = np.array(X_mean_clust)
ad_mean_clust = AnnData(
X=X_mean_clust,
var=ad.var,
obs=pd.DataFrame(
data=clusters,
index=clusters
)
)
return ad_mean_clust | 5,333,368 |
def print_table(table):
"""Print the list of strings with evenly spaced columns."""
# print while padding each column to the max column length
col_lens = [0] * len(table[0])
for row in table:
for i, cell in enumerate(row):
col_lens[i] = max(len(cell), col_lens[i])
formats = ["{0:<%d}" % x for x in col_lens]
for row in table:
print(" ".join(formats[i].format(row[i]) for i in range(len(row)))) | 5,333,369 |
def test_serialization(mockplan):
"""Test serialization of test results."""
pool = ProcessPool(name="ProcPool", size=2)
mockplan.add_resource(pool)
mockplan.schedule(
target="make_serialization_mtest",
module="test_pool_process",
path=os.path.dirname(__file__),
resource="ProcPool",
)
res = mockplan.run()
assert res.success | 5,333,370 |
def subsample_data(features, scaled_features, labels, subsamp): # This is only for poker dataset
""" Subsample the data. """
# k is class, will iterate from class 0 to class 1
# v is fraction to sample, i.e. 0.1, sample 10% of the current class being iterated
for k, v in subsamp.items():
ix = np.where(labels == k)[0]
ix_rest = np.where(labels != k)[0]
sample_ix = np.random.choice(ix, int(v * len(ix)), replace=False)
keep_ix = np.union1d(ix_rest, sample_ix)
# subsample
features = features[keep_ix, :]
scaled_features = scaled_features[keep_ix, :]
labels = labels[keep_ix]
return features, scaled_features, labels | 5,333,371 |
def anomary_scores_ae(df_original, df_reduced):
"""AEで再生成された特徴量から異常度を計算する関数"""
"""再構成誤差を計算する異常スコア関数
Args:
df_original(array-like): training data of shape (n_samples, n_features)
df_reduced(array-like): prediction of shape (n_samples, n_features)
Returns:
pd.Series: 各データごとの異常スコア(二乗誤差をMinMaxScalingしたもの)
"""
# サンプルごとの予測値との二乗誤差を計算
loss = np.sum((np.array(df_original) - np.array(df_reduced)) ** 2, axis=1)
# lossをpd.Seriesに変換
loss = pd.Series(data=loss, index=df_original.index)
# 二乗誤差をMinMaxScalingして0~1のスコアに変換
min_max_normalized_loss = (loss - np.min(loss)) / (np.max(loss) - np.min(loss))
return min_max_normalized_loss | 5,333,372 |
def merge_runs(input_dir1, input_dir2, map_df, output, arguments):
"""
:param input_dir1: Directory 1 contains mutation counts files
:param input_dir2: Directory 2 contains mutation counts files
:param map_df: A dataframe that maps the samples to be merged
:param output: output directory to save merged mut_counts
"""
# make a new file
new_samples = []
for index, row in map_df.iterrows():
merged_header = ""
mut_count_f1 = os.path.join(input_dir1, f"counts_sample_{row['Sample ID_run1']}.csv")
mut_count_f2 = os.path.join(input_dir2, f"counts_sample_{row['Sample ID_run2']}.csv")
new_samples.append(f"{row['Sample ID_run1']}-{row['Sample ID_run2']}")
# join headers
with open(mut_count_f1) as file1, open(mut_count_f2) as file2:
for line1, line2 in zip(file1, file2):
if "#" not in line1: break
if "Final read-depth:" in line1:
n_depth_1 = int(line1.split(":")[1])
n_depth_2 = int(line2.split(":")[1])
total_depth = n_depth_1+n_depth_2
merged_header+= f"#Final read-depth:{total_depth}\n"
continue
if "Number of read pairs without mutations:" in line1:
n_depth_1 = int(line1.split(":")[1])
n_depth_2 = int(line2.split(":")[1])
total_depth = n_depth_1 + n_depth_2
merged_header += f"#Number of read pairs without mutations:{total_depth}\n"
continue
if "Total read pairs with mutations:" in line1:
n_depth_1 = int(line1.split(":")[1])
n_depth_2 = int(line2.split(":")[1])
total_depth = n_depth_1 + n_depth_2
merged_header += f"#Total read pairs with mutations:{total_depth}\n"
continue
if "Number of read pairs did not map to gene:" in line1:
n_depth_1 = int(line1.split(":")[1])
n_depth_2 = int(line2.split(":")[1])
total_depth = n_depth_1 + n_depth_2
merged_header += f"#Number of read pairs did not map to gene:{total_depth}\n"
continue
if "Number of reads outside of the tile:" in line1:
n_depth_1 = int(line1.split(":")[1])
n_depth_2 = int(line2.split(":")[1])
total_depth = n_depth_1 + n_depth_2
merged_header += f"#Number of reads outside of the tile:{total_depth}\n"
continue
if "Raw read depth:" in line1:
n_depth_1 = int(line1.split(":")[1])
n_depth_2 = int(line2.split(":")[1])
total_depth = n_depth_1 + n_depth_2
merged_header += f"#Raw read depth:{total_depth}\n"
continue
merged_header += line1.strip() + "; " + line2
output_f = open(os.path.join(output, f"counts_sample_{row['Sample ID_run1']}-{row['Sample ID_run2']}.csv"), "w")
output_f.write(merged_header)
# skip header and read the mutations
df1 = pd.read_csv(mut_count_f1, skiprows=20)
df2 = pd.read_csv(mut_count_f2, skiprows=20)
# for each file find the corresponded wt file and read the wt
merge = [df1, df2]
merge = pd.concat(merge)
merge_counts = merge.groupby(by=["HGVS"], as_index=False)["count"].sum().sort_values("count", ascending=False)
# save header and counts to file
merge_counts.to_csv(output_f, mode="a", index=False)
output_f.close()
if not arguments.covOverride:
cov_f1 = os.path.join(input_dir1, f"coverage_{row['Sample ID_run1']}.csv")
cov_f2 = os.path.join(input_dir2, f"coverage_{row['Sample ID_run2']}.csv")
# read coverage files
# sum up two df
cov_d1 = pd.read_csv(cov_f1).set_index("pos")
cov_d2 = pd.read_csv(cov_f2).set_index("pos")
df_sum = cov_d1.add(cov_d2, fill_value=0)
output_cov = os.path.join(output, f"coverage_{row['Sample ID_run1']}-{row['Sample ID_run2']}.csv")
df_sum.to_csv(output_cov)
return new_samples | 5,333,373 |
def handle_left(left_entry_box, right_entry_box, mqtt_sender):
"""
Tells the robot to move using the speeds in the given entry boxes,
but using the negative of the speed in the left entry box.
:type left_entry_box: ttk.Entry
:type right_entry_box: ttk.Entry
:type mqtt_sender: com.MqttClient
"""
print('Move left')
mqtt_sender.send_message('go', [-1 * int(left_entry_box.get()), int(right_entry_box.get())]) | 5,333,374 |
def latest_res_ords():
"""Get last decade from reso and ords table"""
filename = 'documentum_scs_council_reso_ordinance_v.csv'
save_path = f"{conf['prod_data_dir']}/documentum_scs_council_reso_ordinance_v"
df = pd.read_csv(f"{conf['prod_data_dir']}/{filename}",
low_memory=False)
df['DOC_DATE'] = pd.to_datetime(df['DOC_DATE'],errors='coerce')
df_current = df.loc[df['DOC_DATE'] >= f"01/01/2016"]
general.pos_write_csv(df_current, f"{save_path}_2016_current.csv")
logging.info(f"Wrote 2016_current")
return f"Successfully extracted this decade of resos and ords" | 5,333,375 |
def test_plot_density_no_subset():
"""Test plot_density works when variables are not subset of one another (#1093)."""
model_ab = from_dict(
{
"a": np.random.normal(size=200),
"b": np.random.normal(size=200),
}
)
model_bc = from_dict(
{
"b": np.random.normal(size=200),
"c": np.random.normal(size=200),
}
)
axes = plot_density([model_ab, model_bc])
assert axes.size == 3 | 5,333,376 |
def langevin_coefficients(temperature, dt, friction, masses):
"""
Compute coefficients for langevin dynamics
Parameters
----------
temperature: float
units of Kelvin
dt: float
units of picoseconds
friction: float
collision rate in 1 / picoseconds
masses: array
mass of each atom in standard mass units. np.inf masses will
effectively freeze the particles.
Returns
-------
tuple (ca, cb, cc)
ca is scalar, and cb and cc are n length arrays
that are used during langevin dynamics as follows:
during heat-bath update
v -> ca * v + cc * gaussian
during force update
v -> v + cb * force
"""
kT = BOLTZ * temperature
nscale = np.sqrt(kT / masses)
ca = np.exp(-friction * dt)
cb = dt / masses
cc = np.sqrt(1 - np.exp(-2 * friction * dt)) * nscale
return ca, cb, cc | 5,333,377 |
def get_description():
"""
Read full description from 'README.md'
:return: description
:rtype: str
"""
with open('README.md', 'r', encoding='utf-8') as f:
return f.read() | 5,333,378 |
def qt_matrices(matrix_dim, selected_pp_indices=[0, 5, 10, 11, 1, 2, 3, 6, 7]):
"""
Get the elements of a special basis spanning the density-matrix space of
a qutrit.
The returned matrices are given in the standard basis of the
density matrix space. These matrices form an orthonormal basis
under the trace inner product, i.e. Tr( dot(Mi,Mj) ) == delta_ij.
Parameters
----------
matrix_dim : int
Matrix-dimension of the density-matrix space. Must equal 3
(present just to maintain consistency which other routines)
Returns
-------
list
A list of 9 numpy arrays each of shape (3, 3).
"""
if matrix_dim == 1: # special case of just identity mx
return [_np.identity(1, 'd')]
assert(matrix_dim == 3)
A = _np.array([[1, 0, 0, 0],
[0, 1. / _np.sqrt(2), 1. / _np.sqrt(2), 0],
[0, 0, 0, 1]], 'd') # projector onto symmetric space
def _toQutritSpace(inputMat):
return _np.dot(A, _np.dot(inputMat, A.transpose()))
qt_mxs = []
pp_mxs = pp_matrices(4)
#selected_pp_indices = [0,5,10,11,1,2,3,6,7] #which pp mxs to project
# labels = ['II', 'XX', 'YY', 'YZ', 'IX', 'IY', 'IZ', 'XY', 'XZ']
qt_mxs = [_toQutritSpace(pp_mxs[i]) for i in selected_pp_indices]
# Normalize so Tr(BiBj) = delta_ij (done by hand, since only 3x3 mxs)
qt_mxs[0] *= 1 / _np.sqrt(0.75)
#TAKE 2 (more symmetric = better?)
q1 = qt_mxs[1] - qt_mxs[0] * _np.sqrt(0.75) / 3
q2 = qt_mxs[2] - qt_mxs[0] * _np.sqrt(0.75) / 3
qt_mxs[1] = (q1 + q2) / _np.sqrt(2. / 3.)
qt_mxs[2] = (q1 - q2) / _np.sqrt(2)
#TAKE 1 (XX-II and YY-XX-II terms... not symmetric):
#qt_mxs[1] = (qt_mxs[1] - qt_mxs[0]*_np.sqrt(0.75)/3) / _np.sqrt(2.0/3.0)
#qt_mxs[2] = (qt_mxs[2] - qt_mxs[0]*_np.sqrt(0.75)/3 + qt_mxs[1]*_np.sqrt(2.0/3.0)/2) / _np.sqrt(0.5)
for i in range(3, 9): qt_mxs[i] *= 1 / _np.sqrt(0.5)
return qt_mxs | 5,333,379 |
def find_continous_edits(instance):
"""Helper processing step that identifies continuous edits"""
instance['tgt_token_diffs'] = list(group_by_continuous(instance['tgt_token_diff']))
instance['src_token_diffs'] = list(group_by_continuous(instance['src_token_diff']))
yield instance | 5,333,380 |
def get_signed_value(bit_vector):
"""
This function will generate the signed value for a given bit list
bit_vector : list of bits
"""
signed_value = 0
for i in sorted(bit_vector.keys()):
if i == 0:
signed_value = int(bit_vector[i])
else:
signed_value += ((2 << 7) << (int(i) - 1)) * int(bit_vector[i])
return signed_value | 5,333,381 |
def sign_table(data, sec_key, metadata_hash="!sdata_sha3_256_table", metadata_hash_signature="!sdata_sha3_256_table_signature"):
"""sign Data.table
:param data:
:param sec_key:
:param metadata_hash: (default="!sdata_sha3_256_table")
:param metadata_hash_signature: (default="!sdata_sha3_256_table_signature")
:return:
"""
sha3_256_table_signature = sec_key.sign(data.sha3_256_table)
sha3_256_table_signature_str = str(sha3_256_table_signature)
data.metadata.add(metadata_hash,
data.sha3_256_table)
data.metadata.add(metadata_hash_signature,
sha3_256_table_signature_str,
dtype="str") | 5,333,382 |
def values_to_colors(values, cmap, vmin=None, vmax=None):
"""
Function to map a set of values through a colormap
to get RGB values in order to facilitate coloring of meshes.
Parameters
----------
values: array-like, (n_vertices, )
values to pass through colormap
cmap: array-like, (n_colors, 3)
colormap describing the RGB values from vmin to vmax
vmin : float
(optional) value that should receive minimum of colormap.
default to minimum of values
vmax : float
(optional) values that should receive maximum of colormap
default to maximum of values
Output
------
colors: array-like, (n_vertices, 3)
RGB values for each entry in values (as np.uint8 [0-255])
Example
-------
Assuming mesh object and 'values' have been calculated already
::
import seaborn as sns
cmap = np.array(sns.color_palette('viridis', 1000))
clrs = trimesh_vtk.values_to_colors(values, cmap)
mesh_actor = trimesh_io.mesh_actor(mesh, vertex_colors = clrs, opacity=1.0)
trimesh_vtk.render_actors([mesh_actor])
"""
n_colors = cmap.shape[0]
if vmin is None:
vmin = np.nanmin(values)
if vmax is None:
vmax = np.nanmax(values)
values = np.clip(values, vmin, vmax)
r = np.interp(x=values, xp=np.linspace(vmin, vmax, n_colors), fp=cmap[:, 0])
g = np.interp(x=values, xp=np.linspace(vmin, vmax, n_colors), fp=cmap[:, 1])
b = np.interp(x=values, xp=np.linspace(vmin, vmax, n_colors), fp=cmap[:, 2])
colors = np.vstack([r, g, b]).T
colors = (colors * 255).astype(np.uint8)
return colors | 5,333,383 |
def _clean_sys_argv(pipeline: str) -> List[str]:
"""Values in sys.argv that are not valid option values in Where
"""
reserved_opts = {pipeline, "label", "id", "only_for_rundate", "session", "stage", "station", "writers"}
return [o for o in sys.argv[1:] if o.startswith("--") and o[2:].split("=")[0] not in reserved_opts] | 5,333,384 |
def get_number_from_user_input(prompt: str, min_value: int, max_value: int) -> int:
"""gets a int integer from user input"""
# input loop
user_input = None
while user_input is None or user_input < min_value or user_input > max_value:
raw_input = input(prompt + f" ({min_value}-{max_value})? ")
try:
user_input = int(raw_input)
if user_input < min_value or user_input > max_value:
print("Invalid input, please try again")
except ValueError:
print("Invalid input, please try again")
return user_input | 5,333,385 |
def test_cli_run_rl_dueling_dqn(cli_args):
"""Test running CLI for an example with default params."""
from pl_bolts.models.rl.dueling_dqn_model import cli_main
cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
cli_main() | 5,333,386 |
async def test_template_static(opp, caplog):
"""Test that we allow static templates."""
with assert_setup_component(1, lock.DOMAIN):
assert await setup.async_setup_component(
opp,
lock.DOMAIN,
{
"lock": {
"platform": "template",
"value_template": "{{ 1 + 1 }}",
"lock": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"unlock": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
}
},
)
await opp.async_block_till_done()
await opp.async_start()
await opp.async_block_till_done()
state = opp.states.get("lock.template_lock")
assert state.state == lock.STATE_UNLOCKED
opp.states.async_set("lock.template_lock", lock.STATE_LOCKED)
await opp.async_block_till_done()
state = opp.states.get("lock.template_lock")
assert state.state == lock.STATE_LOCKED | 5,333,387 |
def word_saliency(topic_word_distrib, doc_topic_distrib, doc_lengths):
"""
Calculate word saliency according to [Chuang2012]_ as ``saliency(w) = p(w) * distinctiveness(w)`` for a word ``w``.
.. [Chuang2012] J. Chuang, C. Manning, J. Heer. 2012. Termite: Visualization Techniques for Assessing Textual Topic
Models
:param topic_word_distrib: topic-word distribution; shape KxM, where K is number of topics, M is vocabulary size
:param doc_topic_distrib: document-topic distribution; shape NxK, where N is the number of documents, K is the
number of topics
:param doc_lengths: array of size N (number of docs) with integers indicating the number of terms per document
:return: array of size M (vocabulary size) with word saliency
"""
p_t = marginal_topic_distrib(doc_topic_distrib, doc_lengths)
p_w = marginal_word_distrib(topic_word_distrib, p_t)
return p_w * word_distinctiveness(topic_word_distrib, p_t) | 5,333,388 |
def parse_cl_items(s):
"""Take a json string of checklist items and make a dict of item objects keyed on
item name (id)"""
dispatch = {"floating":Floating,
"weekly":Weekly,
"monthly": Monthly,
"daily":Daily
}
if len(s) == 0:
return []
raw = json.loads(s)
il = []
for d in raw:
t = d.pop('type')
t = t.lower()
il.append(dispatch[t](**d))
return il | 5,333,389 |
def get_queue(queue, flags=FLAGS.ALL, **conn):
"""
Orchestrates all the calls required to fully fetch details about an SQS Queue:
{
"Arn": ...,
"Region": ...,
"Name": ...,
"Url": ...,
"Attributes": ...,
"Tags": ...,
"DeadLetterSourceQueues": ...,
"_version": 1
}
:param queue: Either the queue name OR the queue url
:param flags: By default, set to ALL fields.
:param conn: dict containing enough information to make a connection to the desired account. Must at least have
'assume_role' key.
:return: dict containing a fully built out SQS queue.
"""
# Check if this is a Queue URL or a queue name:
if queue.startswith("https://") or queue.startswith("http://"):
queue_name = queue
else:
queue_name = get_queue_url(QueueName=queue, **conn)
sqs_queue = {"QueueUrl": queue_name}
return registry.build_out(flags, sqs_queue, **conn) | 5,333,390 |
def draw(
x,
x_extents=(-100, 100),
y_extents=(-100, 100),
landmarks=None,
observations=None,
particles=None,
weights=None,
ellipses=None,
fig=None,
):
"""Draw vehicle state x = [x, y, theta] on the map."""
xmin, xmax = x_extents
ymin, ymax = y_extents
tick_spacing = (xmax - xmin) / 20
if fig is not None:
print("saving", draw.i)
gr.beginprint("{}_{:03d}.pdf".format(fig, draw.i))
init_plot_window(xmin, xmax, ymin, ymax)
draw_vehicle(x)
if landmarks is not None:
draw_landmarks(landmarks)
if observations is not None:
draw_observation_lines(x, observations)
if particles is not None:
draw_particles(particles, weights=weights)
if ellipses is not None:
for i, ell in enumerate(ellipses):
draw_ellipse(ell, alpha=(0.1))
draw_axes(tick_spacing, xmin, ymin)
gr.updatews()
if fig is not None:
gr.endprint()
draw.i += 1
return | 5,333,391 |
def mixed_float_frame():
"""
Fixture for DataFrame of different float types with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
"""
df = DataFrame(tm.getSeriesData())
df.A = df.A.astype('float32')
df.B = df.B.astype('float32')
df.C = df.C.astype('float16')
df.D = df.D.astype('float64')
return df | 5,333,392 |
def create_parsetestcase(durationstring, expectation, format, altstr):
"""
Create a TestCase class for a specific test.
This allows having a separate TestCase for each test tuple from the
PARSE_TEST_CASES list, so that a failed test won't stop other tests.
"""
class TestParseDuration(unittest.TestCase):
'''
A test case template to parse an ISO duration string into a
timedelta or Duration object.
'''
def test_parse(self):
'''
Parse an ISO duration string and compare it to the expected value.
'''
result = parse_duration(durationstring)
self.assertEqual(result, expectation)
def test_format(self):
'''
Take duration/timedelta object and create ISO string from it.
This is the reverse test to test_parse.
'''
if altstr:
self.assertEqual(duration_isoformat(expectation, format),
altstr)
else:
# if durationstring == '-P2W':
# import pdb; pdb.set_trace()
self.assertEqual(duration_isoformat(expectation, format),
durationstring)
return unittest.TestLoader().loadTestsFromTestCase(TestParseDuration) | 5,333,393 |
async def test_reauth_auth_failed(hass: HomeAssistantType, fritz: Mock):
"""Test starting a reauthentication flow with authentication failure."""
fritz().login.side_effect = LoginError("Boom")
mock_config = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_DATA)
mock_config.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH, "entry_id": mock_config.entry_id},
data=mock_config.data,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_USERNAME: "other_fake_user",
CONF_PASSWORD: "other_fake_password",
},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
assert result["errors"]["base"] == "invalid_auth" | 5,333,394 |
def convert_train_run_repeat():
"""
12/13/16
Basically a run of the almost complete package on repeat: convert, train, then run.
:return: combined outputs of 3 functions
"""
n = input('Number of repitions? -> ')
try:
n = int(n)
for i in range(0, n):
convert_train_run()
except ValueError:
print('The number you entered is not valid.') | 5,333,395 |
def environment(cluster: str, service: str, arguments: Sequence[str]) -> None:
"""
Show or modify environment variables.
If no arguments are given, prints all environment variable name/value
pairs.
If arguments are given, set environment variables with the given names to
the given values. If a value is not provided, remove the variable.
"""
stagingClient = ECSServiceClient(cluster=cluster, service=service)
if arguments:
click.echo(f"Changing environment variables for {cluster}:{service}:")
updates: Dict[str, Optional[str]] = {}
for arg in arguments:
if "=" in arg:
key, value = arg.split("=", 1)
updates[key] = value
click.echo(f" Setting {key}.")
else:
updates[arg] = None
click.echo(f" Removing {arg}.")
stagingClient.deployTaskEnvironment(updates)
else:
currentTaskEnvironment = stagingClient.currentTaskEnvironment()
click.echo(f"Environment variables for {cluster}:{service}:")
for key, value in currentTaskEnvironment.items():
click.echo(f" {key} = {value!r}") | 5,333,396 |
def draw_data_from_db(host, port=None, pid=None, startTime=None, endTime=None, system=None, disk=None):
"""
Get data from InfluxDB, and visualize
:param host: client IP, required
:param port: port, visualize port data; optional, choose one from port, pid and system
:param pid: pid, visualize pid data; optional, choose one from port, pid and system
:param startTime: Start time; optional
:param endTime: end time; optional
:param system: visualize system data; optional, choose one from port, pid and system
:param disk: disk number; optional
:return:
"""
post_data = {
'types': 'system',
'cpu_time': [],
'cpu': [],
'iowait': [],
'usr_cpu': [],
'mem': [],
'mem_available': [],
'jvm': [],
'io_time': [],
'io': [],
'disk_r': [],
'disk_w': [],
'disk_d': [],
'rec': [],
'trans': [],
'nic': [],
'tcp': [],
'close_wait': [],
'time_wait': [],
'retrans': [],
'disk': disk}
res = {'code': 1, 'flag': 1, 'message': 'Successful!'}
connection = influxdb.InfluxDBClient(cfg.getInflux('host'), cfg.getInflux('port'), cfg.getInflux('username'),
cfg.getInflux('password'), cfg.getInflux('database'))
try:
if startTime and endTime: # If there is a start time and an end time
pass
elif startTime is None and endTime is None: # If the start time and end time do not exist, use the default time.
startTime = '2020-05-20 20:20:20'
endTime = time.strftime('%Y-%m-%d %H:%M:%S')
else: # If the end time does not exist, the current time is used
endTime = time.strftime('%Y-%m-%d %H:%M:%S')
s_time = time.time()
if port:
sql = f"select cpu, wait_cpu, mem, tcp, jvm, rKbs, wKbs, iodelay, close_wait, time_wait from \"{host}\" " \
f"where time>'{startTime}' and time<'{endTime}' and type='{port}' tz('Asia/Shanghai')"
logger.info(f'Execute sql: {sql}')
datas = connection.query(sql)
if datas:
post_data['types'] = 'port'
for data in datas.get_points():
post_data['cpu_time'].append(data['time'][:19].replace('T', ' '))
post_data['cpu'].append(data['cpu'])
post_data['iowait'].append(data['wait_cpu'])
post_data['mem'].append(data['mem'])
post_data['tcp'].append(data['tcp'])
post_data['jvm'].append(data['jvm'])
post_data['io'].append(data['iodelay'])
post_data['disk_r'].append(data['rKbs'])
post_data['disk_w'].append(data['wKbs'])
post_data['close_wait'].append(data['close_wait'])
post_data['time_wait'].append(data['time_wait'])
else:
res['message'] = f'No monitoring data of the port {port} is found, ' \
f'please check the port or time setting.'
res['code'] = 0
if disk:
sql = f"select rec, trans, net from \"{host}\" where time>'{startTime}' and time<'{endTime}' and " \
f"type='system' tz('Asia/Shanghai')"
logger.info(f'Execute sql: {sql}')
datas = connection.query(sql)
if datas:
for data in datas.get_points():
post_data['nic'].append(data['net'])
post_data['rec'].append(data['rec'])
post_data['trans'].append(data['trans'])
else:
res['message'] = 'No monitoring data is found, please check the disk number or time setting.'
res['code'] = 0
if pid:
pass
if system and disk:
disk_n = disk.replace('-', '')
disk_r = disk_n + '_r'
disk_w = disk_n + '_w'
disk_d = disk_n + '_d'
sql = f"select cpu, iowait, usr_cpu, mem, mem_available, {disk_n}, {disk_r}, {disk_w}, {disk_d}, rec, trans, " \
f"net, tcp, retrans from \"{host}\" where time>'{startTime}' and time<'{endTime}' and " \
f"type='system' tz('Asia/Shanghai')"
logger.info(f'Execute sql: {sql}')
datas = connection.query(sql)
if datas:
post_data['types'] = 'system'
for data in datas.get_points():
post_data['cpu_time'].append(data['time'][:19].replace('T', ' '))
post_data['cpu'].append(data['cpu'])
post_data['iowait'].append(data['iowait'])
post_data['usr_cpu'].append(data['usr_cpu'])
post_data['mem'].append(data['mem'])
post_data['mem_available'].append(data['mem_available'])
post_data['rec'].append(data['rec'])
post_data['trans'].append(data['trans'])
post_data['nic'].append(data['net'])
post_data['io'].append(data[disk_n])
post_data['disk_r'].append(data[disk_r])
post_data['disk_w'].append(data[disk_w])
post_data['disk_d'].append(data[disk_d])
post_data['tcp'].append(data['tcp'])
post_data['retrans'].append(data['retrans'])
else:
res['message'] = 'No monitoring data is found, please check the disk number or time setting.'
res['code'] = 0
res.update({'post_data': post_data})
logger.info(f'Time consuming to query is {time.time() - s_time}')
# lines = get_lines(post_data) # Calculate percentile, 75%, 90%, 95%, 99%
# res.update(lines)
except Exception as err:
logger.error(traceback.format_exc())
res['message'] = str(err)
res['code'] = 0
del connection, post_data
return res | 5,333,397 |
def get_sample_generator(filenames, batch_size, model_config):
"""Set data loader generator according to different tasks.
Args:
filenames(list): filenames of the input data.
batch_size(int): size of the each batch.
model_config(dict): the dictionary containing model configuration.
Raises:
NameError: if key ``task`` in ``model_config`` is invalid.
Returns:
reader(func): data reader.
"""
task = model_config['task']
if task == 'pretrain':
return pretrain_sample_reader(filenames, batch_size)
elif task == 'seq_classification':
label_name = model_config.get('label_name', 'labels')
return sequence_sample_reader(filenames, batch_size, label_name)
elif task in ['classification', 'regression']:
label_name = model_config.get('label_name', 'labels')
return normal_sample_reader(filenames, batch_size, label_name)
else:
raise NameError('Task %s is unsupport.' % task) | 5,333,398 |
def execute_batch(table_type, bulk, count, topic_id, topic_name):
"""
Execute bulk operation. return true if operation completed successfully
False otherwise
"""
errors = False
try:
result = bulk.execute()
if result['nModified'] != count:
print(
"bulk execute of {} data for {}:{}.\nnumber of op sent to "
"bulk execute ({}) does not match nModified count".format(
table_type, topic_id, topic_name, count))
print ("bulk execute result {}".format(result))
errors = True
except BulkWriteError as ex:
print(str(ex.details))
errors = True
return errors | 5,333,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.