content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def remove_whitespace(s):
"""Remove excess whitespace including newlines from string
"""
words = s.split() # Split on whitespace
return ' '.join(words) | 7d9e7b15ba101f00412565b42c260e8bc29ac49a | 3,631,700 |
def get_first_group (match):
"""
Retrieves the first group from the match object.
"""
return match.group(1) | d4103989a7fbd55e40600d391b51dfb93053ed8f | 3,631,701 |
from typing import Dict
from typing import Tuple
from typing import Any
def plot_from_region_frames(
frames: Dict[str, pd.DataFrame],
variable: str,
binning: Tuple[int, float, float],
region_label: str,
logy: bool = False,
legend_kw : Dict[str, Any] = None,
) -> Tuple[plt.Figure, plt.Axes, plt.Axes]:
"""create a histogram plot pdf from dataframes and a desired variable
Parameters
----------
frames : dict(str, pd.DataFrame)
the dataframes for all samples
variable : str
the variable we want to histogram
binning : tuple(int, float, float)
the bin definition
region_label : str
the region label (will be part of out file name)
logy : bool
if true set the yscale to log
legend_kw : dict(str, Any)
keyward arguments passed to :py:func:`matplotlib.Axes.axes.legend`.
"""
if variable not in frames["Data"].columns.to_list():
log.warning("%s not in dataframe; skipping" % variable)
return None, None, None
nbins, start, stop = binning
bin_edges = np.linspace(start, stop, nbins + 1)
counts = {}
errors = {}
for samp in ALL_SAMPLES:
x = frames[samp][variable].to_numpy()
w = frames[samp]["weight_nominal"].to_numpy()
count, err = pg.histogram(x, bins=nbins, range=(start, stop), weights=w, flow=True)
counts[samp] = count
errors[samp] = err
fig, ax, axr = canvas_from_counts(counts, errors, bin_edges)
draw_atlas_label(
ax, extra_lines=["$\sqrt{s} = 13$ TeV, $L = 139$ fb$^{-1}$", region_label]
)
tune_axes(ax, axr, variable, binning, logy=logy)
if legend_kw is None:
legend_kw = {}
legend_kw["ncol"] = 2
ax.legend(loc="upper right")
handles, labels = ax.get_legend_handles_labels()
handles.insert(0, handles.pop())
labels.insert(0, labels.pop())
ax.legend(handles, labels, loc="upper right", **legend_kw)
fig.subplots_adjust(left=0.125, bottom=0.095, right=0.965, top=0.95)
return fig, ax, axr | 3a0fe727d7160a0ed203bd0431e8ca8329259f27 | 3,631,702 |
from unittest.mock import patch
async def init_integration(hass, co2_sensor=True) -> MockConfigEntry:
"""Set up the Nettigo Air Monitor integration in Home Assistant."""
entry = MockConfigEntry(
domain=DOMAIN,
title="10.10.2.3",
unique_id="aa:bb:cc:dd:ee:ff",
data={"host": "10.10.2.3"},
)
if not co2_sensor:
# Remove conc_co2_ppm value
nam_data["sensordatavalues"].pop(6)
with patch(
"homeassistant.components.nam.NettigoAirMonitor._async_get_data",
return_value=nam_data,
):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry | 5e56cfd160499cc9c6164f231fe21cdcd2e41596 | 3,631,703 |
def decode(ciphered_text: str) -> str:
"""
Decode Atbash cipher
:param ciphered_text: Atbash cipher
:return: decoded text
"""
return ''.join(replace_char(char) for char in ciphered_text
if char in LOWERCASE or char in DIGITS) | ff70504009ab45a0de476e7d8fcb139785f3590a | 3,631,704 |
def text(label, default=None):
"""
@brief prompt and read a single line of input from a user
@retval string input from user
"""
display = label
if( default ):
display += " (default: " + str(default) + ")"
input = raw_input( display + ": " )
if input == '' and default:
input = default
if input == '':
print "%s is a required field" % label
return text(label, default)
return input | 0676596272a13607beae0b4f4ca289be61601834 | 3,631,705 |
def update_reservation(reservation, updatedSpots, comments):
"""
Update the reservation with the new spots needed
"""
newSpots = updatedSpots - reservation.seats_needed
reservation.seats_needed = updatedSpots
reservation.note=comments
db.session.commit()
flash("Reservation updated.")
return newSpots | eea612b228c7d3b66690b735cde76d26df964b3a | 3,631,706 |
def qc_freq(species, geom, natom, atom, mult, charge, index=-1, high_level = 0):
"""
Creates a frequency input and runs it.
index: >=0 for sampling, each job will get numbered with index
"""
if index == -1:
job = str(species.chemid) + '_fr'
else:
job = str(species.chemid) + '_fr_' + str(index).zfill(par.zf)
if high_level:
job = str(species.chemid) + '_fr_high'
kwargs = get_qc_arguments(job,mult, high_level = high_level)
if par.qc == 'gauss':
kwargs['freq'] = 'freq'
kwargs['ioplist'] = ['7/33=1']
elif par.qc == 'nwchem':
kwargs['task'] = 'frequencies'
dummy = is_linear(geom,species.bond)
if len(dummy) > 0: # add a dummy atom for each close to linear angle
for d in dummy:
atom = np.append(atom,['X'])
geom = np.concatenate((geom, [d]), axis=0)
natom += 1
#switch on the symmetry of gaussian
if 'NoSymm' in kwargs:
del kwargs['NoSymm']
dummy = [d.tolist() for d in dummy]
template = open(par.tpldir + 'ase_{qc}_freq_well.py.tpl'.format(qc = par.qc),'r').read()
template = template.format(label = job, kwargs = kwargs, atom = list(atom), geom = list([list(gi) for gi in geom]),
ppn = par.ppn, dummy = dummy)
f_out = open('{}.py'.format(job),'w')
f_out.write(template)
f_out.close()
submit_qc(job)
return 0 | 26f9bd91f2c688ab687363011661dd816ac250ff | 3,631,707 |
from datetime import datetime
def sample_to_timed_points(model, size):
"""As :func:`sample` but return in :class:`open_cp.data.TimedPoints`.
"""
t = [datetime.datetime(2017,1,1)] * size
pts = sample(model, size)
assert pts.shape == (size, 2)
return open_cp.data.TimedPoints.from_coords(t, *pts.T) | 11775060c76efdf6cfe31cd7efe3a09d562c888a | 3,631,708 |
def _per_image_standardization(image):
"""
:param image: image numpy array
:return:
"""
num_compare = 1
for dim in image.shape:
num_compare = np.multiply(num_compare, dim)
_standardization = (image - np.mean(image)) / max(np.std(image), 1 / num_compare)
return _standardization | 18461f97a91c1cad2156606b67c7a5f732587b1f | 3,631,709 |
import torch
def unflatten_parameters(params, example, device):
"""Unflatten parameters.
:args params: parameters as a single 1D np array
:args example: generator of parameters (as returned by module.parameters()),
used to reshape params
:args device: where to store unflattened parameters
:returns: unflattened parameters
"""
params = torch.Tensor(params).to(device)
idx = 0
unflattened = []
print(f'params.size is {params.size()}\n\n')
for e_p in example:
unflattened += [params[idx : idx + e_p.numel()].view(e_p.size())]
idx += e_p.numel()
return unflattened | f321c536bcbfada2e2cd254ecffaf5b0b9573472 | 3,631,710 |
def pattern_to_regex(pattern):
"""
Convert the CODEOWNERS path pattern into a regular expression string.
"""
orig_pattern = pattern # for printing errors later
# Replicates the logic from normalize_pattern function in Gitlab ee/lib/gitlab/code_owners/file.rb:
if not pattern.startswith('/'):
pattern = '/**/' + pattern
if pattern.endswith('/'):
pattern = pattern + '**/*'
# Convert the glob pattern into a regular expression:
# first into intermediate tokens
pattern = (pattern.replace('**/', ':REGLOB:')
.replace('**', ':INVALID:')
.replace('*', ':GLOB:')
.replace('.', ':DOT:')
.replace('?', ':ANY:'))
if pattern.find(':INVALID:') >= 0:
raise ValueError("Likely invalid pattern '{}': '**' should be followed by '/'".format(orig_pattern))
# then into the final regex pattern:
re_pattern = (pattern.replace(':REGLOB:', '(?:.*/)?')
.replace(':GLOB:', '[^/]*')
.replace(':DOT:', '[.]')
.replace(':ANY:', '.') + '$')
if re_pattern.startswith('/'):
re_pattern = '^' + re_pattern
return re_pattern | 8b82ad2efa9e47028a7419dcef72fb9c6b3741ba | 3,631,711 |
def register_extensions(app):
"""Register Flask extensions."""
assets.init_app(app)
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
flask_mail.init_app(app)
celery.conf.update(app.config)
return None | f27986ddbd77814d111321726c4d88f9efacdbf4 | 3,631,712 |
def unit2uniform(x, vmin, vmax):
"""
mapping from uniform distribution on parameter space
to uniform distribution on unit hypercube
"""
return vmin + (vmax - vmin) * x | 2765db219dfda5debd5f8957c0ad9c0b44335f89 | 3,631,713 |
def adaptive_minmax(data, x_data=None, poly_order=None, method='modpoly',
weights=None, constrained_fraction=0.01, constrained_weight=1e5,
estimation_poly_order=2, method_kwargs=None, **kwargs):
"""
Fits polynomials of different orders and uses the maximum values as the baseline.
Each polynomial order fit is done both unconstrained and constrained at the
endpoints.
Parameters
----------
data : array-like, shape (N,)
The y-values of the measured data, with N data points.
x_data : array-like, shape (N,), optional
The x-values of the measured data. Default is None, which will create an
array from -1 to 1 with N points.
poly_order : int, Sequence(int, int) or None, optional
The two polynomial orders to use for fitting. If a single integer is given,
then will use the input value and one plus the input value. Default is None,
which will do a preliminary fit using a polynomial of order `estimation_poly_order`
and then select the appropriate polynomial orders according to [3]_.
method : {'modpoly', 'imodpoly'}, optional
The method to use for fitting each polynomial. Default is 'modpoly'.
weights : array-like, shape (N,), optional
The weighting array. If None (default), then will be an array with
size equal to N and all values set to 1.
constrained_fraction : float or Sequence(float, float), optional
The fraction of points at the left and right edges to use for the
constrained fit. Default is 0.01. If `constrained_fraction` is a sequence,
the first item is the fraction for the left edge and the second is the
fraction for the right edge.
constrained_weight : float or Sequence(float, float), optional
The weighting to give to the endpoints. Higher values ensure that the
end points are fit, but can cause large fluctuations in the other sections
of the polynomial. Default is 1e5. If `constrained_weight` is a sequence,
the first item is the weight for the left edge and the second is the
weight for the right edge.
estimation_poly_order : int, optional
The polynomial order used for estimating the baseline-to-signal ratio
to select the appropriate polynomial orders if `poly_order` is None.
Default is 2.
**method_kwargs
Additional keyword arguments to pass to :func:`.modpoly` or
:func:`.imodpoly`. These include `tol`, `max_iter`, `use_original`,
`mask_initial_peaks`, and `num_std`.
Returns
-------
numpy.ndarray, shape (N,)
The calculated baseline.
params : dict
A dictionary with the following items:
* 'weights': numpy.ndarray, shape (N,)
The weight array used for fitting the data.
* 'constrained_weights': numpy.ndarray, shape (N,)
The weight array used for the endpoint-constrained fits.
* 'poly_order': numpy.ndarray, shape (2,)
An array of the two polynomial orders used for the fitting.
References
----------
.. [3] Cao, A., et al. A robust method for automated background subtraction
of tissue fluorescence. Journal of Raman Spectroscopy, 2007, 38,
1199-1205.
"""
y, fit_func, _, method_kws = _setup_optimizer(
data, method, [polynomial], method_kwargs, False, **kwargs
)
_, x, weight_array, _ = _setup_polynomial(y, x_data, weights)
if poly_order is None:
poly_orders = _determine_polyorders(
y, x, estimation_poly_order, weight_array, fit_func, **method_kws
)
else:
poly_orders, scalar_poly_order = _check_scalar(poly_order, 2, True, dtype=int)
if scalar_poly_order:
poly_orders[1] += 1 # add 1 since they are initially equal if scalar input
# use high weighting rather than Lagrange multipliers to constrain the points
# to better work with noisy data
weightings = _check_scalar(constrained_weight, 2, True)[0]
constrained_fractions = _check_scalar(constrained_fraction, 2, True)[0]
if np.any(constrained_fractions < 0) or np.any(constrained_fractions > 1):
raise ValueError('constrained_fraction must be between 0 and 1')
len_y = len(y)
constrained_weights = weight_array.copy()
constrained_weights[:ceil(len_y * constrained_fractions[0])] = weightings[0]
constrained_weights[len_y - ceil(len_y * constrained_fractions[1]):] = weightings[1]
# TODO should make parameters available; a list with an item for each fit like collab_pls
baselines = np.empty((4, y.shape[0]))
baselines[0] = fit_func(y, x, poly_orders[0], weights=weight_array, **method_kws)[0]
baselines[1] = fit_func(y, x, poly_orders[0], weights=constrained_weights, **method_kws)[0]
baselines[2] = fit_func(y, x, poly_orders[1], weights=weight_array, **method_kws)[0]
baselines[3] = fit_func(y, x, poly_orders[1], weights=constrained_weights, **method_kws)[0]
# TODO should the coefficients also be made available? Would need to get them from
# each of the fits
params = {
'weights': weight_array, 'constrained_weights': constrained_weights,
'poly_order': poly_orders
}
return np.maximum.reduce(baselines), params | b93d2025eee27159c722479283a92b83b116aff6 | 3,631,714 |
def extractHachidoriTranslations(item):
"""
Parser for 'Hachidori Translations'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'Charging Magic with a Smile' in item['tags']:
return buildReleaseMessageWithType(item, 'Charging Magic with a Smile', vol, chp, frag=frag, postfix=postfix)
if 'Kokugensou wo Item Cheat' in item['tags']:
return buildReleaseMessageWithType(item, 'Kokugensou wo Item Cheat', vol, chp, frag=frag, postfix=postfix)
if 'Ochitekita Naga to Majo no Kuni' in item['tags']:
return buildReleaseMessageWithType(item, 'Ochitekita Naga to Majo no Kuni', vol, chp, frag=frag, postfix=postfix)
if 'Humans are the Strongest Race' in item['tags']:
return buildReleaseMessageWithType(item, 'Humans are the Strongest Race', vol, chp, frag=frag, postfix=postfix)
if 'Seiun wo Kakeru' in item['tags']:
return buildReleaseMessageWithType(item, 'Seiun wo Kakeru', vol, chp, frag=frag, postfix=postfix)
return False | f596e40b65fa1c92b74bcf8e83ea4b32ccad5124 | 3,631,715 |
import os
import tempfile
import ctypes
def create_build_tools_zip(lib):
"""
Create the update package file.
:param lib: lib object
:return:
"""
opera_script_file_name_dict = OPTIONS_MANAGER.opera_script_file_name_dict
tmp_dict = {}
for each in SCRIPT_KEY_LIST:
tmp_dict[each] = []
if opera_script_file_name_dict == tmp_dict:
UPDATE_LOGGER.print_log(
"Script dict is null!",
log_type=UPDATE_LOGGER.ERROR_LOG)
return False
count = 0
opera_script_dict = {}
for each_value in opera_script_file_name_dict.values():
for each in each_value:
opera_script_dict[each[1].name] = each[0]
count += 1
# other_file_count --> 1(updater_binary) + 1(loadScript.us)
other_file_count = 2
count += other_file_count
if OPTIONS_MANAGER.register_script_file_obj is not None:
count += 1
head_list = get_tools_head_list(count)
component_list, num = \
get_tools_component_list(count, opera_script_dict)
total_script_file_obj = OPTIONS_MANAGER.total_script_file_obj
register_script_file_obj = OPTIONS_MANAGER.register_script_file_obj
update_exe_path = os.path.join(OPTIONS_MANAGER.target_package_dir,
UPDATE_EXE_FILE_NAME)
if not os.path.exists(update_exe_path):
UPDATE_LOGGER.print_log(
"updater_binary file does not exist!path: %s" % update_exe_path,
log_type=UPDATE_LOGGER.ERROR_LOG)
return False
file_obj = tempfile.NamedTemporaryFile(prefix="build_tools-")
file_save_patch = file_obj.name.encode("utf-8")
component_list[num].file_path = update_exe_path.encode("utf-8")
component_list[num].component_addr = \
UPDATE_EXE_FILE_NAME.encode("utf-8")
component_list[num + 1].file_path = \
total_script_file_obj.name.encode("utf-8")
component_list[num + 1].component_addr = \
TOTAL_SCRIPT_FILE_NAME.encode("utf-8")
if OPTIONS_MANAGER.register_script_file_obj is not None:
component_list[num + 2].file_path = \
register_script_file_obj.name.encode("utf-8")
component_list[num + 2].component_addr = \
REGISTER_SCRIPT_FILE_NAME.encode("utf-8")
if OPTIONS_MANAGER.private_key == ON_SERVER:
private_key = "./update_package.py"
else:
private_key = OPTIONS_MANAGER.private_key.encode("utf-8")
lib.CreatePackage(
ctypes.pointer(head_list), component_list, file_save_patch,
private_key)
return file_obj | 4e7438cf7a7be9d46a46a4549343c789ea2fe022 | 3,631,716 |
def _tvgp_qvgp_optim_setup():
"""Creates a VGP model and a matched tVGP model"""
time_points, observations, kernel, noise_variance = _setup()
input_data = (
tf.constant(time_points),
tf.constant((observations > 0.5).astype(float)),
)
likelihood = Bernoulli()
tvgp = t_VGP(
data=input_data,
kernel=kernel,
likelihood=likelihood,
)
qvgp = gpflow.models.VGP(
data=input_data,
kernel=kernel,
mean_function=None,
likelihood=likelihood,
)
natgrad_rep = 20
# one step of natgrads for tVGP
[tvgp.update_variational_parameters(beta=1.0) for _ in range(natgrad_rep)]
# one step of natgrads for VGP
natgrad_opt = NaturalGradient(gamma=1.0)
variational_params = [(qvgp.q_mu, qvgp.q_sqrt)]
training_loss = qvgp.training_loss_closure()
[natgrad_opt.minimize(training_loss, var_list=variational_params) for _ in range(natgrad_rep)]
return tvgp, qvgp | 95578fceaa5206d6318c6045a521681d268a1e7d | 3,631,717 |
def get_average_rate(**options):
"""
gets average imdb rate of all movies.
:rtype: float
"""
return movies_stat_services.get_average_rate() | c32af301d3e2f789a68e5b179d78324b7205cbaa | 3,631,718 |
def _uint_to_le(val, length):
"""Returns a byte array that represents an unsigned integer in little-endian format.
Args:
val: Unsigned integer to convert.
length: Number of bytes.
Returns:
A byte array of ``length`` bytes that represents ``val`` in little-endian format.
"""
return val.to_bytes(length=length, byteorder='little') | 54e765e7b3772c6e2e6dc4c7e6de48d034b9d4b5 | 3,631,719 |
def extract_preposition(arg_number, postags, morph, lemmas, syntax_dep_tree):
""" Returns preposition for a word in the sentence """
#TODO: fix duplication
#TODO: there was a list of words for complex preposition, as we use the whole preposition as a feature
children = get_children(arg_number, syntax_dep_tree)
for child_number in children:
lemma_child, postag_child = lemmas[child_number], postags[child_number]
if postag_child in PREP_POSTAGS:
complex_prep = in_complex_preposition(child_number, postags, morph,
lemmas, syntax_dep_tree)
if complex_prep:
return complex_prep
else:
return child_number
siblings = get_children(syntax_dep_tree[arg_number].parent, syntax_dep_tree)
for child_number in siblings:
lemma_child, postag_child = lemmas[child_number], postags[child_number]
if postag_child in PREP_POSTAGS:
complex_prep = is_complex_preposition(child_number, morph, lemmas, syntax_dep_tree)
if complex_prep:
return complex_prep
else:
return child_number
return None | 52818f0bec42081f8c2c0e4b0e66ad9e84719a4e | 3,631,720 |
import torch
def batch_to_patches(x, patch_size, patches_per_image):
"""
:param x: torch tensor with images in batch (batsize, numchannels, height, width)
:param patch_size: size of patch
:param patches_per_image:
:return:
"""
device = x.device
assert(x.dim() == 4)
batsize, nchannels, height, width = x.size() # (minibatch, channel, height, width)
assert(nchannels == 3)
patch_count = patches_per_image * batsize
hs = patch_size // 2
patch_id, chan, x, y = np.ogrid[0:batsize, 0:3, -hs:hs+1, -hs:hs+1]
patch_id, chan, x, y = [torch.tensor(_e, device=device) for _e in (patch_id, chan, x, y)]
img_id = patch_id // patches_per_image
x = x + torch.randint(hs, width -hs, size=(patch_count, 1, 1, 1), device=device, dtype=torch.int64)
y = y + torch.randint(hs, height-hs, size=(patch_count, 1, 1, 1), device=device, dtype=torch.int64)
idx = ((img_id * nchannels + chan) * height + y) * width + x
patches = x.view(-1)[idx]
return patches # (patch_count, nchannels, patch_size, patch_size) | 200ea8d893d660e981608ddd1276679b3765ee01 | 3,631,721 |
import json
def parse_json(json_data, category):
"""
Parses the <json_data> from intermediate value.
Args:
json_data (str): A string of data to process in JSON format.
category (str): The category ('all' / 'spam' / 'ham') to extract data from.
Returns:
(status_code, data), where status_code = 0 if there is no error, 1 otherwise.
"""
if json_data:
loaded_data = json.loads(json_data)
categories = loaded_data['categories']
error = loaded_data['error']
if error:
return (1, error)
data = categories[category]
else:
data = CATEGORIES[category]
return (0, data) | bc56ddf35d3551f43b2bd5644a6337d288d6af52 | 3,631,722 |
def get_dset_size(shape_json, typesize):
""" Return the size of the dataspace. For
any unlimited dimensions, assume a value of 1.
(so the return size will be the absolute minimum)
"""
if shape_json is None or shape_json["class"] == 'H5S_NULL':
return None
if shape_json["class"] == 'H5S_SCALAR':
return typesize # just return size for one item
if typesize == 'H5T_VARIABLE':
typesize = DEFAULT_TYPE_SIZE # just take a guess at the item size
dset_size = typesize
shape = shape_json["dims"]
rank = len(shape)
for n in range(rank):
if shape[n] == 0:
# extendable extent with value of 0
continue # assume this is one
dset_size *= shape[n]
return dset_size | 82e0cf9041a81ed6f9d2195502ca7f21f557164d | 3,631,723 |
from typing import List
def check_shapes(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.shapes``.
"""
table = "shapes"
problems = []
# Preliminary checks
if feed.shapes is None:
return problems
f = feed.shapes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
f.sort_values(["shape_id", "shape_pt_sequence"], inplace=True)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check shape_id
problems = check_column(problems, table, f, "shape_id", valid_str)
# Check shape_pt_lon and shape_pt_lat
for column, bound in [("shape_pt_lon", 180), ("shape_pt_lat", 90)]:
v = lambda x: pd.notna(x) and -bound <= x <= bound
cond = ~f[column].map(v)
problems = check_table(
problems,
table,
f,
cond,
f"{column} out of bounds {[-bound, bound]}",
)
# Check for duplicated (shape_id, shape_pt_sequence) pairs
cond = f[["shape_id", "shape_pt_sequence"]].duplicated()
problems = check_table(
problems, table, f, cond, "Repeated pair (shape_id, shape_pt_sequence)"
)
# Check if shape_dist_traveled does decreases on a trip
if "shape_dist_traveled" in f.columns:
g = f.dropna(subset=["shape_dist_traveled"])
indices = []
prev_sid = None
prev_index = None
prev_dist = -1
cols = ["shape_id", "shape_dist_traveled"]
for i, sid, dist in g[cols].itertuples():
if sid == prev_sid and dist < prev_dist:
indices.append(prev_index)
prev_sid = sid
prev_index = i
prev_dist = dist
if indices:
problems.append(
[
"error",
"shape_dist_traveled decreases on a trip",
table,
indices,
]
)
return format_problems(problems, as_df=as_df) | 2c22c674f19f2e711fc337cd9c734e300e94b2ad | 3,631,724 |
def get_help_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "OPM Status Help"
speech_output = "To begin, ask o. p. m. status an acceptable question. For example, " \
"Is the government is open today?, or " \
"Was the government open on 2017-03-14?"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = ""
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session)) | 8c1fea3cbb575dee9a80dd8b8e0351b90f837b4c | 3,631,725 |
import random
def mm_clustering(sampler, state, float_names, fixed_df, total_df_names, fit_scale, runprops, obsdf,geo_obj_pos, best_llhoods, backend, pool, mm_likelihood, ndim, moveset, const = 50, lag = 10, max_prune_frac = 0.9):
"""
Determines if walkers in the ensemble are lost and removes them. Replacing them
with random linear combinations of two random good wwalkers.
Inputs:
sampler: an emcee sampler object on which to run MultiMoon
state: ndarray, a place from which to start the emcee run
float_names-fit_scale: internal multimoon data types used in mm_likelihood
runprops: internal runprops dictionary
obsdf, geo_obj_pos: internal observational data types
best_llhood-pool: more internal inputs
ndim: number of dimensions of parameter vector
moveset: the moveset you wwant emcee to use
const: constant used in Hou 2010 clustering algorithm. Determines how tolerant the clustering is.
lag: the number of steps to average the likelihood over.
max_prune_frac: maximum fraction of wwalkers to remove.
Outputs:
emcee sampler object, state object
"""
# Get inputs from runprops
nwalkers = runprops.get("nwalkers")
reburnin = runprops.get("clustering_burnin")
if reburnin == 0:
return sampler, state
verbose = runprops.get("verbose")
nthinning = runprops.get("nthinning")
# Getting important values from the chain
lastparams = sampler.get_chain()[-1,:,:]
ngens = sampler.get_chain().shape[0]
if ngens < lag:
if verbose:
print("Chain too short for clustering algorithm, clustering not performed")
return sampler, state
avllhood = np.mean(sampler.get_log_prob()[-lag:,:], axis = 0)
if verbose:
print(np.sort(avllhood))
print(sampler.acceptance_fraction)
# Sorting the walkers by likelihood values
llhoodparam = pd.DataFrame(columns = ['llhood'] + float_names)
for i in range(nwalkers):
llhoodparam.loc[i] = np.concatenate([np.array([avllhood[i]]),lastparams[i,:]])
llhoodparam = llhoodparam.sort_values(by=['llhood'], ascending = False)
llhoodparam = llhoodparam.values
# Performing rejection tests
reject = np.zeros(nwalkers)
for i in range(1,nwalkers-1):
term1 = -llhoodparam[i+1,0] + llhoodparam[i,0]
term2 = const*(-llhoodparam[i,0] + llhoodparam[0,0])/(i)
print(term1, term2)
if term1 > term2:
reject[(i+1):] = 1
break
freject = reject.sum()/nwalkers
if verbose:
print(freject)
# Pruning walkers based on the clusters found,
# replacing them with random linear combinations of walkers within the cluster
# Skipping if cluster is not big enough
if freject < max_prune_frac:
params = llhoodparam[:,1:]
for i in range(len(reject)):
if reject[i] == 1:
# Picking random inputs
p = random.random()
c1 = random.randrange(i)
c2 = random.randrange(i)
while c1 == c2:
c2 = random.randrange(i)
# Calculating random linear comb of two random walkers
params[i,:] = (p*params[c1,:] + (1-p)*params[c2,:])
# Create a new sampler
sampler = emcee.EnsembleSampler(nwalkers, ndim, mm_likelihood.log_probability,
backend=backend, pool = pool,
args = (float_names, fixed_df, total_df_names, fit_scale, runprops, obsdf,geo_obj_pos, best_llhoods),
moves = moveset)
# Perform another burn in
if runprops.get('thin_run'):
state = sampler.run_mcmc(params, reburnin, progress = True, store = True, thin=nthinning)
else:
state = sampler.run_mcmc(params, reburnin, progress = True, store = True)
return sampler, state
else:
print("Cluster not big enough, clustering not performed")
return sampler, state | 724159d78948b57ca1fbc06974b6728b24144ffe | 3,631,726 |
def off_diagonal_min(A):
"""Returns the minimum of the off diagonal elements
Args:
A (jax.numpy.ndarray): A real 2D matrix
Returns:
(float): The smallest off-diagonal element in A
"""
off_diagonal_entries = off_diagonal_elements(A)
return jnp.min(off_diagonal_entries) | 0b05e7d2b091538cd4ca95624faaa347cae5401f | 3,631,727 |
def cli_cosmosdb_sql_stored_procedure_create_update(client,
resource_group_name,
account_name,
database_name,
container_name,
stored_procedure_name,
stored_procedure_body):
"""Creates or Updates an Azure Cosmos DB SQL stored procedure """
sql_stored_procedure_resource = SqlStoredProcedureResource(id=stored_procedure_name)
sql_stored_procedure_resource.body = stored_procedure_body
sql_stored_procedure_create_update_resource = SqlStoredProcedureCreateUpdateParameters(
resource=sql_stored_procedure_resource,
options={})
return client.create_update_sql_stored_procedure(resource_group_name,
account_name,
database_name,
container_name,
stored_procedure_name,
sql_stored_procedure_create_update_resource) | 4dbeccf559f32cb10762081706d98602a9f8f646 | 3,631,728 |
def create(box=None,n=None,nr=None,nc=None,sr=1,sc=1,cr=None,cc=None,const=None) :
"""
Creates a new HDU
"""
if box is not None:
nr=box.nrow()
nc=box.ncol()
sc=box.xmin
sr=box.ymin
else :
if nr is None and nc is None :
try :
nr=n
nc=n
except:
print('You must specify either box=, n=, or nr= and nc=')
return
if cr is not None and cc is not None :
sr=cr-nr/2
sc=cc-nr/2
try :
im=np.zeros([nr,nc])
except :
print('must specify image size ')
return
hd=fits.PrimaryHDU(im)
hd.header['CNPIX1'] = sc
hd.header['CNPIX2'] = sr
if const is not None :
hd.data += const
return hd | 298802654b7f5a5572f4d59c0c3ab30a2c2af6e7 | 3,631,729 |
def gcd(num1, num2):
"""Return Greatest Common Divisor"""
# Euclidean Algorithm for GCD
a = max([num1, num2])
b = min([num1, num2])
while b != 0:
mod = a % b
a = b
b = mod
return a | 36c788d44a4aafaaf000963a7c5e1b80fa6f64f5 | 3,631,730 |
from typing import List
from typing import Dict
from typing import Any
def read_params_from_config(config: dict, _root_name: str = "") -> List[NamedPyParam]:
"""Reads params from the nested python dictionary.
Args:
config: a python dictionary with params definitions
_root_name: used internally
Returns:
params a list of NamedPyParam extracted from the dictionary
"""
def _is_end_node(node: Dict[str, Any]) -> bool:
"""Checks whether node is an end node i.e. contains PyParam definition"""
return "value" in node and "dtype" in node
params = []
for name, node in config.items():
if _is_end_node(node):
# extract scope from the full path. Note: _root_name.split("/")[0] == "/"
node["scope"] = "/".join(_root_name.split("/")[1:])
params += [NamedPyParam.from_dict(name, node)]
else:
params += read_params_from_config(node, "/".join([_root_name, name]))
return params | bfdd3de4977bcd2ad09ec52fbdccdd77ed685d09 | 3,631,731 |
def dataParser (path):
"""
This function parses all files in the directory specified by
the path input variable. It is assumed that all said files are
valid .wav files.
"""
waves = []
rates = []
digits = []
speakers = []
files = [f for f in listdir (path) if isfile (join (path, f))]
for i in range (len (files)):
#Keep both the signals themselves, and the sampling rates.
sig, rate = librosa.load (join (path, files[i]), sr = None)
waves.append (sig)
rates.append (rate)
jspeak, jdig = nameToFacts (files [i])
digits.append (jdig)
speakers.append (jspeak)
print ('Parsing complete! ', len (waves), ' files in total.')
return waves, np.array (digits), np.array (speakers), rates | 921d7381f3daff233fb5e568b414c97d726b8c5d | 3,631,732 |
def deserialize_function(serial, function_type):
"""Deserializes the Keras-serialized function.
(De)serializing Python functions from/to bytecode is unsafe. Therefore we
also use the function's type as an anonymous function ('lambda') or named
function in the Python environment ('function'). In the latter case, this lets
us use the Python scope to obtain the function rather than reload it from
bytecode. (Note that both cases are brittle!)
Keras-deserialized functions do not perform lexical scoping. Any modules that
the function requires must be imported within the function itself.
This serialization mimicks the implementation in `tf.keras.layers.Lambda`.
Args:
serial: Serialized Keras object: typically a dict, string, or bytecode.
function_type: Python string denoting 'function' or 'lambda'.
Returns:
function: Function the serialized Keras object represents.
#### Examples
```python
serial, function_type = serialize_function(lambda x: x)
function = deserialize_function(serial, function_type)
assert function(2.3) == 2.3 # function is identity
```
"""
if function_type == 'function':
# Simple lookup in custom objects
function = tf.keras.utils.deserialize_keras_object(serial)
elif function_type == 'lambda':
# Unsafe deserialization from bytecode
function = generic_utils.func_load(serial)
else:
raise TypeError('Unknown function type:', function_type)
return function | eb76660c494bf43497dde32617583c17f6ba8ef3 | 3,631,733 |
from src.datasets import VOC2007
from src.datasets import VOC2012
import torch
def _dataset(
dataset_type: str,
train_val_test: str = 'train'
) -> torch.utils.data.Dataset:
"""
Dataset:
voc2007
voc2012
"""
if dataset_type == "voc2007":
if train_val_test == "val":
train_val_test = "trainval"
elif train_val_test == "test":
train_val_test = "val"
dataset = VOC2007(root=ROOT, image_set=train_val_test, download=DOWNLOAD)
elif dataset_type == "voc2012":
if train_val_test == "val":
train_val_test = "trainval"
elif train_val_test == "test":
train_val_test = "val"
dataset = VOC2012(root=ROOT, image_set=train_val_test, download=DOWNLOAD)
else:
raise DatasetNotIncludeError("Dataset {0} is not included." \
"Refer to the following: {1}".format(dataset_type, _dataset.__doc__))
return dataset | 1c818013d04ec33e10df57b5f40c2c38351fd36b | 3,631,734 |
def disaggregate_forecast(history,
aggregated_history,
aggregated_forecast,
dt_units='D',
period_agg=7,
period_disagg=1,
x_reg=None,
x_future=None,
x_cols_seasonal_interactions=[],
pred_level=0.8,
agg_fun=['sum']):
"""Function to disaggregate an aggregated forecast using logit transformed
ARIMA model.
Parameters
----------
history: pandas dataframe containing unaggregated dt and actual
aggregated_history: pandas dataframe containing the aggregated history
period_agg: period that aggregated_history is aggregated over
x_features: optional matrix of x_features containing columns to include in
modelling disaggregation proportions
x_cols_seasonal_interactions: columns of x_features to interact with
seasonality
Returns
-------
pd data frame containing the disaggregated forecast and prediction
intervals"""
hist = prepare_history_to_disaggregate(history, aggregated_history,
period_agg,
x_reg.reset_index(),
x_cols_seasonal_interactions)
forecast_dates_disaggregated = pd.to_datetime(pd.bdate_range(
str(max(history.dt) + _datetime_delta(period_disagg, dt_units)),
max(aggregated_forecast.dt),
freq=str(period_disagg) + dt_units))
fcst = prepare_forecast_to_disaggregate(forecast_dates_disaggregated,
aggregated_forecast,
period_agg,
x_future.reset_index(),
x_cols_seasonal_interactions)
per_dummies = ['p_' + str(p) for p in range(1, period_agg)]
per_interactions = [x_col + '_x_p_' + str(p)
for x_col in x_cols_seasonal_interactions
for p in list(range(1, period_agg))]
print(per_interactions)
if x_reg is not None and x_future is not None:
x_cols = list(x_reg.columns)
else:
x_cols = []
arima002_disagg_model = ARIMA(
hist.set_index('dt').logit_proportion_aggregated, order=(0, 0, 2),
exog=hist.set_index('dt')[x_cols + per_dummies + per_interactions],
freq=str(period_disagg) + dt_units)
arima002_disagg_fit = arima002_disagg_model.fit()
disagg_model_summary = arima002_disagg_fit.summary()
disagg_model_residuals = pd.DataFrame({
'residual': arima002_disagg_fit.resid})
disagg_model_coefficients = pd.read_html(
disagg_model_summary.tables[1].as_html(), header=0, index_col=0)[0]
disagg_model_coefficients['regressor'] = disagg_model_coefficients.index
disagg_fcst = arima002_disagg_fit.get_forecast(
len(forecast_dates_disaggregated),
exog=fcst.set_index('dt')[x_cols + per_dummies + per_interactions])
disagg_fcst_df = pd.DataFrame({'forecast':disagg_fcst.predicted_mean})
disagg_pred_int_df = disagg_fcst.conf_int(alpha=(1-pred_level)).rename(
columns={'lower logit_proportion_aggregated':'forecast_lower',
'upper logit_proportion_aggregated':'forecast_upper'})
disagg_fcst_df = pd.concat([disagg_fcst_df, disagg_pred_int_df], axis=1)
disagg_fcst_df.reset_index(inplace=True)
disagg_fcst_df = disagg_fcst_df.rename(columns={'index':'dt'})
fcst_cols = ['forecast', 'forecast_lower', 'forecast_upper']
for col in fcst_cols:
disagg_fcst_df[col] = np.exp(disagg_fcst_df[col]) / (
1 + np.exp(disagg_fcst_df[col]))
agg_fcst_df = aggregate_to_longest(disagg_fcst_df, [period_agg],
agg_fun, cols_agg=fcst_cols)
agg_fcst_df = agg_fcst_df['period' + str(period_agg)]
for col in fcst_cols:
disagg_fcst_df[col] = disagg_fcst_df[col] / np.repeat(list(
agg_fcst_df[col]), period_agg)
disagg_fcst_df[col] = disagg_fcst_df[col] * fcst['aggregated_' + col]
return {'summary':disagg_model_summary,
'residuals':disagg_model_residuals,
'coefficients':disagg_model_coefficients,
'disaggregated':disagg_fcst_df} | e74f42335af23325d08f7ad9aa593b85b6917078 | 3,631,735 |
def get_path_cost(latency_dict, path):
"""
Cost of all links over a path combined
:param latency_dict:
:param path:
:return:
"""
cost = 0
for i in range(len(path) - 1):
cost += get_link_cost(latency_dict, path[i], path[i+1])
return cost | ed7be1c7a1a38fcc5ad0904a7a784e8bcf0f4941 | 3,631,736 |
import os
import json
def main(compilation_db_path, source_files, verbose, formatter, iwyu_args):
""" Entry point. """
# Canonicalize compilation database path
if os.path.isdir(compilation_db_path):
compilation_db_path = os.path.join(compilation_db_path,
'compile_commands.json')
compilation_db_path = os.path.realpath(compilation_db_path)
if not os.path.isfile(compilation_db_path):
print('ERROR: No such file or directory: \'%s\'' % compilation_db_path)
return 1
# Read compilation db from disk
with open(compilation_db_path, 'r') as fileobj:
compilation_db = json.load(fileobj)
# expand symlinks
for entry in compilation_db:
entry['file'] = os.path.realpath(entry['file'])
# Cross-reference source files with compilation database
source_files = [os.path.realpath(s) for s in source_files]
if not source_files:
# No source files specified, analyze entire compilation database
entries = compilation_db
else:
# Source files specified, analyze the ones appearing in compilation db,
# warn for the rest.
entries = []
for source in source_files:
matches = [e for e in compilation_db if e['file'] == source]
if matches:
entries.extend(matches)
else:
print('WARNING: \'%s\' not found in compilation database.' %
source)
# Run analysis
try:
for entry in entries:
cwd, compile_command = entry['directory'], entry['command']
run_iwyu(cwd, compile_command, iwyu_args, verbose, formatter)
except OSError as why:
print('ERROR: Failed to launch include-what-you-use: %s' % why)
return 1
return 0 | fc0ebba14bb6aede43c1491804426997c8074fc4 | 3,631,737 |
def qderiv(array): # TAKE THE ABSOLUTE DERIVATIVE OF A NUMARRY OBJECT
"""Take the absolute derivate of an image in memory."""
#Create 2 empty arrays in memory of the same dimensions as 'array'
tmpArray = np.zeros(array.shape,dtype=np.float64)
outArray = np.zeros(array.shape, dtype=np.float64)
# Get the length of an array side
(naxis1,naxis2) = array.shape
#print "The input image size is (",naxis1,",",naxis2,")."
#Main derivate loop:
#Shift images +/- 1 in Y.
for y in range(-1,2,2):
if y == -1:
#shift input image 1 pixel right
tmpArray[0:(naxis1-1),1:(naxis2-1)] = array[0:(naxis1-1),0:(naxis2-2)]
#print "Y shift = 1"
else:
#shift input image 1 pixel left
tmpArray[0:(naxis1-1),0:(naxis2-2)] = array[0:(naxis1-1),1:(naxis2-1)]
#print "Y shift = -1"
#print "call _absoluteSubtract()"
(tmpArray,outArray) = _absoluteSubtract(array,tmpArray,outArray)
#Shift images +/- 1 in X.
for x in range(-1,2,2):
if x == -1:
#shift input image 1 pixel right
tmpArray[1:(naxis1-1),0:(naxis2-1)] = array[0:(naxis1-2),0:(naxis2-1)]
#print "X shift = 1"
else:
#shift input image 1 pixel left
tmpArray[0:(naxis1-2),0:(naxis2-1)] = array[1:(naxis1-1),0:(naxis2-1)]
#print "X shift = -1"
#print "call _absoluteSubtract()"
(tmpArray,outArray) = _absoluteSubtract(array,tmpArray,outArray)
return outArray.astype(np.float32) | ebe0242b829409a9844c1d36ac151cbddcf12391 | 3,631,738 |
def byte(number):
"""Return the given bytes as a human friendly KB, MB, GB, or TB string"""
B = float(number)
KB = float(1024) # 1024 b, 1 千字节=1024 字节
MB = float(KB ** 2) # 1024 kb, 1 兆字节=1048576 字节
GB = float(KB ** 3) # 1024 mb, 1 千兆字节(GB)=1073741824 字节(B)
TB = float(KB ** 4) # 1024 gb
c = color_yz.color()
if B < KB:
return c.okgreen(f"{round(B, ndigits=2)} Bytes")
elif KB <= B < MB:
return c.okgreen(f"{round(B / KB, ndigits=2)} KB")
elif MB <= B < GB:
return c.okgreen(f"{round(B / MB, ndigits=2)} MB")
elif GB <= B < TB:
return c.okgreen(f"{round(B / GB, ndigits=2)} GB")
elif TB <= B:
return c.okgreen(f"{round(B / TB, ndigits=2)} TB") | 88c062d97645741025731e2f2cde2646be2891f9 | 3,631,739 |
def compute_optimal_warping_path_subsequence_dtw(D, m=-1):
"""Given an accumulated cost matrix, compute the warping path for
subsequence dynamic time warping with step sizes {(1, 0), (0, 1), (1, 1)}
Notebook: C7/C7S2_SubsequenceDTW.ipynb
Args:
D (np.ndarray): Accumulated cost matrix
m (int): Index to start back tracking; if set to -1, optimal m is used (Default value = -1)
Returns:
P (np.ndarray): Optimal warping path (array of index pairs)
"""
N, M = D.shape
n = N - 1
if m < 0:
m = D[N - 1, :].argmin()
P = [(n, m)]
while n > 0:
if m == 0:
cell = (n - 1, 0)
else:
val = min(D[n-1, m-1], D[n-1, m], D[n, m-1])
if val == D[n-1, m-1]:
cell = (n-1, m-1)
elif val == D[n-1, m]:
cell = (n-1, m)
else:
cell = (n, m-1)
P.append(cell)
n, m = cell
P.reverse()
P = np.array(P)
return P | ba1b86ef7bfd8c5d322b27d5a9c0f264a97351ef | 3,631,740 |
from typing import Dict
from typing import Any
from typing import Union
def _start_tracker(n_workers: int) -> Dict[str, Any]:
"""Start Rabit tracker """
env: Dict[str, Union[int, str]] = {'DMLC_NUM_WORKER': n_workers}
host = get_host_ip('auto')
rabit_context = RabitTracker(hostIP=host, n_workers=n_workers, use_logger=False)
env.update(rabit_context.worker_envs())
rabit_context.start(n_workers)
thread = Thread(target=rabit_context.join)
thread.daemon = True
thread.start()
return env | a8be8ca89fd6ba9383f9b029ac936579b1f164d5 | 3,631,741 |
def get_type_path(type, type_hierarchy):
"""Gets the type's path in the hierarchy (excluding the root type, like
owl:Thing).
The path for each type is computed only once then cached in type_hierarchy,
to save computation.
"""
if 'path' not in type_hierarchy[type]:
type_path = []
current_type = type
while current_type in type_hierarchy:
type_path.append(current_type)
current_type = type_hierarchy[current_type]['parent']
type_hierarchy[type]['path'] = type_path
return type_hierarchy[type]['path'] | 29344b63197f4ea6650d059767100401c693990a | 3,631,742 |
def get_required_webots_version():
"""Return the Webots version compatible with this version of the package."""
return 'R2020b revision 1' | 89e6e458c2409670d70a833996c76f05e77bd7b1 | 3,631,743 |
def ScoreStatistics(scores, percentile):
"""
Capture statistics related to the gencall score distribution
Args:
scores (list(float)): A list of gencall scores
percentile (int): percentile to calculate
gc_10 : 10th percentile of Gencall score distribution
gc_50 : 50th percentile of Gencall score distribution
Returns:
float
"""
num_scores = len(scores)
if num_scores == 0:
return nan
idx = int(num_scores*percentile/100)
fractional_index = num_scores*percentile/100.0 - idx
if fractional_index < 0.5 :
idx -= 1
if idx < 0:
return scores[0]
if idx >= num_scores - 1:
return scores[-1]
x1 = 100 * (idx + 0.5)/float(num_scores)
x2 = 100 * (idx + 1 + 0.5)/float(num_scores)
y1 = float(scores[idx])
y2 = float(scores[idx+1])
score_stat = y1 + (y2 - y1) / (x2 - x1) * (percentile - x1)
score_stat = round(score_stat,4)
return score_stat | 35c3b98681a53cec82908e5634b1c49439b5b044 | 3,631,744 |
def download_drill(load=True): # pragma: no cover
"""Download scan of a power drill.
Originally obtained from Laser Design.
Parameters
----------
load : bool, optional
Load the dataset after downloading it when ``True``. Set this
to ``False`` and only the filename will be returned.
Returns
-------
pyvista.PolyData or str
DataSet or filename depending on ``load``.
Examples
--------
>>> from pyvista import examples
>>> dataset = examples.download_drill()
>>> dataset.plot()
"""
return _download_and_read('drill.obj', load=load) | c40635c718afd404f5df55004a32cb5108d8b370 | 3,631,745 |
def schlieren_colormap(color=[0, 0, 0]):
"""
Creates and returns a colormap suitable for schlieren plots.
"""
if color == 'k':
color = [0, 0, 0]
if color == 'r':
color = [1, 0, 0]
if color == 'b':
color = [0, 0, 1]
if color == 'g':
color = [0, 0.5, 0]
if color == 'y':
color = [1, 1, 0]
color = np.array([1, 1, 1]) - np.array(color)
s = np.linspace(0, 1, 20)
colors = {}
for key in s:
colors[key] = np.array([1, 1, 1]) - key ** 10 * color
schlieren_colors = make_colormap(colors)
return schlieren_colors | 7becf570f5af8368d2f169fe9c222d72d22f04d7 | 3,631,746 |
def evaluate_binned_cut(values, bin_values, cut_table, op):
"""
Evaluate a binned cut as defined in cut_table on given events
Parameters
----------
values: ``~numpy.ndarray`` or ``~astropy.units.Quantity``
The values on which the cut should be evaluated
bin_values: ``~numpy.ndarray`` or ``~astropy.units.Quantity``
The values used to sort the ``values`` into bins
cut_table: ``~astropy.table.Table``
A table describing the binned cuts, e.g. as created by
``~pyirf.cuts.calculate_percentile_cut``.
Required columns:
`low`: lower edges of the bins
`high`: upper edges of the bins,
`cut`: cut value
op: binary operator function
A function taking two arguments, comparing element-wise and
returning an array of booleans.
"""
bins = np.append(cut_table["low"].quantity, cut_table["high"].quantity[-1])
bin_index = calculate_bin_indices(bin_values, bins)
return op(values, cut_table["cut"][bin_index].quantity) | 541a1bf7196e41d2db2bdf4ab272a9cfa760591b | 3,631,747 |
import tempfile
def add_EVM(final_update, wd, consensus_mapped_gff3):
"""
"""
db_evm = gffutils.create_db(final_update, ':memory:', merge_strategy='create_unique', keep_order=True)
ids_evm = [gene.attributes["ID"][0] for gene in db_evm.features_of_type("mRNA")]
db_gmap = gffutils.create_db(consensus_mapped_gff3, ':memory:', merge_strategy='create_unique', keep_order=True)
ids_gmap_full = [gene.attributes["ID"][0] for gene in db_gmap.features_of_type("gene")]
ids_gmap = [gene.attributes["ID"][0].split("_")[0] for gene in db_gmap.features_of_type("gene")]
uniq_evm = [evm for evm in ids_evm if not evm in ids_gmap]
mRNA = []
for evm in uniq_evm:
for line in db_evm.parents(evm, order_by='start'):
mRNA.append(line.attributes["ID"][0])
mRNA_uniq = list(set(mRNA))
outfile = tempfile.NamedTemporaryFile(delete=False, prefix="additional.1.", suffix=".gff3", dir=wd)
gff_out_s = gffwriter.GFFWriter(outfile.name)
for name in mRNA_uniq:
for i in db_evm.children(name, order_by='start'):
gff_out_s.write_rec(i)
gff_out_s.write_rec(db_evm[name])
for name in ids_gmap_full:
for i in db_gmap.children(name, order_by='start'):
gff_out_s.write_rec(i)
gff_out_s.write_rec(db_gmap[name])
gff_out_s.close()
return outfile.name
#return outfile_out.name | c36945fe984d82245247271210683d81880a757a | 3,631,748 |
def smartCheck(policy_del, policy_add,
list_rules_match_add=None,
matched_rules_extended=None,
subpolicy=False,
print_add_matches=False,
print_progress=False,
DEBUG=False):
"""
smartCheck will compare two policies trying to find if we can remove from the first policy rules that they are already included in the second one.
:param policy_del: Rules that usually we want to check if we can delete
:param policy_add: Rules that usually we want to check if they "cover" rules to be deleted
:param list_rules_match_add: (OUTPUT Parameter) list of all rules matching the "del" rule. Usually this parameter is internal and it is only needed for non-contiguous wildcard and split networks
:param matched_rules_extended: (OUTPUT Parameter) dictionary with rules can be removed and their matches
:param subpolicy: True when checking a subpolicy created from a parent one. For example when a big network needs to be matched by split networks
:param print_add_matches: Switch to see which rules match the one to be removed
:param print_progress: Swith for a very verbose mode
:param DEBUG: debug flag
:return: List of rules matched
"""
def _is_any(wild, ip):
if wild:
return ip == '0.0.0.0/255.255.255.255'
else:
return ip == '0.0.0.0/0.0.0.0'
def _is_ip_equal(ip1, ip2):
if ip1 == ip2:
return True
try:
ipa1 = ipaddr.IPv4Network(ip1)
ipa2 = ipaddr.IPv4Network(ip2)
except:
return False
return ipa1 == ipa2
tools.DEBUG(DEBUG, 'smartCheck', 'Entering smartCheck. subpolicy:', subpolicy)
# Sentinel values
if list_rules_match_add is None:
list_rules_match_add = []
if matched_rules_extended is None:
matched_rules_extended = {}
# It reduces complexity if policy_add is always split for non-cont
if not subpolicy:
policy_add.split_non_contiguous_wild()
rules_to_remove = []
rules_not_matched = []
a = policy_del.get_rules_number()
for x in range(1, a+1):
if print_progress and not subpolicy:
print 'Processing rule:', x, 'out of', a
rule_found = False
non_contigouos_found = False
fake_any = False
is_0_any = True
ruled = policy_del.get_rule(x)
tools.DEBUG(DEBUG, 'smartCheck', 'get_rule:', ruled)
if ruled[0] in rules_not_matched:
continue
# Usually the last rule it's a DENY IP ANY ANY LOG, if this is the case, we skip it
if x == a:
if _is_any(ruled[7], ruled[1]) and _is_any(ruled[7], ruled[2]) and not ruled[6]:
continue
if ruled[0].startswith('^'): # Especial rule, usually empty or inactive
list_rules_match_add.append(ruled[0])
rule_found = True
# if DSMO rule was split and one of the networks didn't match. NOT continue
if subpolicy:
if ruled[0].startswith('split dsmo rule'):
if ruled[0].split('}')[1] in rules_not_matched:
continue
wildcard = ruled[7]
# The wildcard 0.0.0.0 (host wildcard) is not working with ipaddress library as wildcard, only as netmask
# the same happens with 255.255.255.255 so:
# 0.0.0.0 wildcard -> 255.255.255.255 netmask
# 255.255.255.255 wilcard -> 0.0.0.0 netmask
if wildcard and (ruled[1] == '0.0.0.0/0.0.0.0' or ruled[2] == '0.0.0.0/0.0.0.0'):
# Link use 0.0.0.0 usually as ANY. This is the specific case when the host 0.0.0.0/32 is being request to be checked
is_0_any = False
if not rule_found and (ruled[1] == '' or ruled[2] == ''): # Empty rule
continue
sIP = ruled[1]
if wildcard and '/0.0.0.0' in sIP:
sIP = sIP.split('/')[0] + '/255.255.255.255'
elif wildcard and sIP == '0.0.0.0/255.255.255.255':
sIP = sIP.split('/')[0] + '/0'
else:
if wildcard:
non_contigouos_found = not tools.wild_is_contiguous(sIP.split('/')[1])
dIP = ruled[2]
if wildcard == True and '/0.0.0.0' in dIP:
dIP = dIP.split('/')[0] + '/255.255.255.255'
elif wildcard == True and dIP == '0.0.0.0/255.255.255.255':
dIP = dIP.split('/')[0] + '/0'
else:
if wildcard:
non_contigouos_found = non_contigouos_found or not tools.wild_is_contiguous(dIP.split('/')[1])
dPort = ruled[3]
sPort = ruled[4]
proto = ruled[5]
action = ruled[6]
# If the rule that we are checking has non-cont wild, we need to split it and check again
if non_contigouos_found:
# If this is a subpolicy (policy_temp) and we find a non-cont, we can't continue checking
# in subpolicies are not allowed non-cont
if subpolicy:
while len(list_rules_match_add) > 0:
list_rules_match_add.pop()
return []
policy_del_temp = FWPolicy('del-temp', 'temp', DEBUG)
policy_del_temp.new_rule(ruled[1], ruled[2], ruled[3], ruled[4], ruled[5], ruled[6], ruled[7])
policy_del_temp.split_non_contiguous_wild()
tools.DEBUG(DEBUG, 'smartCheck', 'Non contiguous found. Splitting policy.', ruled)
if policy_del_temp.check_if_any_non_contiguous():
tools.DEBUG(DEBUG, 'smartCheck', 'Non contiguous found after policy splitting. Can not continue.', ruled)
if subpolicy:
while len(list_rules_match_add) > 0:
list_rules_match_add.pop()
return []
else:
if ruled[0] in rules_to_remove:
rules_to_remove.remove(ruled[0])
rules_not_matched.append(ruled[0])
while len(list_rules_match_add) > 0:
list_rules_match_add.pop()
else:
# When a rule is split, all the new rules will have the same name of the parent rule
# While this is the expected behaviour, it creates a problem, because we need to match
# the whole new temp policy, to know that the parent rule is fully covered
# To do that, we need to rename the temp-policy with different names
policy_temp_len = policy_del_temp.get_rules_number()
for icont in range(1, policy_temp_len + 1):
policy_del_temp.set_rule_name(icont, 'split dsmo rule - ' + str(icont) + ' }' + ruled[0])
tempPolicy_rules_matched = smartCheck(policy_del_temp, policy_add, list_rules_match_add, matched_rules_extended, subpolicy=True, print_add_matches=False, DEBUG=DEBUG)
if len(tempPolicy_rules_matched) == policy_temp_len:
rule_found = True
else:
rules_not_matched.append(ruled[0])
if ruled[0] in rules_to_remove:
rules_to_remove.remove(ruled[0])
elif not rule_found:
if dPort != '0' and sPort != '0':
tools.DEBUG(DEBUG, 'smartCheck', 'request check flow:', sIP.split('/')[0], dIP.split('/')[0], dPort, sPort, proto)
check1 = policy_add.link(sIP.split('/')[0], dIP.split('/')[0], dPort, sPort, proto, show_deny=True, hide_allow_all=False, strict_search=True, is_0_any=is_0_any)
tools.DEBUG(DEBUG, 'smartCheck', 'requested check flow answer', check1)
else:
'''
When dport or sport are 0, we want to check ALL ports, so ALL ports should be allowed. When LINK sees a 0
in port (destination or source) is going to match with ANY rule that matches source/destination IPs, because 0 in port means ANY rule.
In this case, we need something different, 0 it doesn't mean any, it means ALL. So, we have to perform a STRICT SEARCH:
- Check the first rule matched
- If we hit a DENY, clearly ALL ports are NOT allowed.
- If we hit any other rule, we need to verify if would need to catch any DENY that could hit (NO STRICT SEARCH)
'''
tools.DEBUG(DEBUG, 'smartCheck', 'dport or sport is 0. Checking ALL')
tools.DEBUG(DEBUG, 'smartCheck', 'request check flow:', sIP.split('/')[0], dIP.split('/')[0], dPort, sPort, proto)
check1 = policy_add.link(sIP.split('/')[0], dIP.split('/')[0], dPort, sPort, proto, show_deny=True, hide_allow_all=False, strict_search=True, is_0_any=is_0_any)
tools.DEBUG(DEBUG, 'smartCheck', 'dport/sport != 0 first requested check flow answer', check1)
if len(check1) > 0:
fake_any = False
'''
There is a candidate rule for ALL ports, now it's time to check if there is ANY DENY above that could affect
DENY HUNT Example:
ACL1:
permit tcp 10.230.0.0 0.0.0.127 10.240.0.0 0.0.0.127
ACL2:
deny tcp 10.230.0.0 0.0.0.127 10.240.0.0 0.0.0.127 eq 22
permit tcp 10.230.0.0 0.0.0.127 10.240.0.0 0.0.0.127
The line1 in ACL1 will match with line2 in ACL2. After that, we need to check if a DENY rule is matching also before the permit
'''
rule_matched = policy_add.get_rule(check1[0])
if rule_matched[6] or True: # Permit = TRUE
tools.DEBUG(DEBUG, 'smartCheck', 'request check flow (DENY HUNT):', sIP.split('/')[0], dIP.split('/')[0], dPort, sPort, proto)
check2 = policy_add.link(sIP.split('/')[0], dIP.split('/')[0], dPort, sPort, proto, show_deny=True, hide_allow_all=False, strict_search=False, is_0_any=is_0_any)
tools.DEBUG(DEBUG, 'smartCheck', 'dport/sport != 0 second requested check flow answer (DENY HUNT)', check2)
if len(check2) > 0:
for i in check2:
rule_matched2 = policy_add.get_rule(i)
# If there was a match in the 'DENY HUNT' we need to be sure that:
# - if the original rule is an ACCEPT, the HUNT is for a DENY
# - if the original rule is a DENY, the HUNT is for an ACCEPT
if ((rule_matched[6] and not rule_matched2[6]) or
(not rule_matched[6] and rule_matched2[6])):
tools.DEBUG(DEBUG, 'smartCheck', 'Matched DENY. FAKE ANY')
# We found a rule matching with a DENY ABOVE the ANY, so, the ANY is "fake"
fake_any = True
break
matching_action = False
if len(check1) > 0 and not fake_any:
rule_matched = policy_add.get_rule(check1[0])
matching_action = action == rule_matched[6]
if matching_action:
# Action
if action != rule_matched[6]:
continue
# Adding every matching
t = rule_matched[0]
if t not in list_rules_match_add:
list_rules_match_add.append(t)
tools.DEBUG(DEBUG, 'smartCheck', 'Matching rule', check1, 'subpolicy', subpolicy, 'list_rules_match_add:', list_rules_match_add)
# Check is the src/dst that we are looking for is exactly the same we found
if not (_is_ip_equal(ruled[1], rule_matched[1]) and _is_ip_equal(ruled[2], rule_matched[2])):
# When smartCheck is called, all non-contiguous wildcards are split, so if at this point
# we found a match, either in the del_policy or in the add_policy, the rule can't be marked as
# rule match.
if wildcard:
if not _is_any(rule_matched[7], rule_matched[1]):
non_contigouos_found = not tools.wild_is_contiguous(rule_matched[1].split('/')[1])
if not _is_any(rule_matched[7], rule_matched[2]):
non_contigouos_found = non_contigouos_found or not tools.wild_is_contiguous(rule_matched[2].split('/')[1])
# If there is a non-cont wild in the matched rule, we need to go to the next rule, we can't do more
if non_contigouos_found:
t = rule_matched[0]
if t in list_rules_match_add:
list_rules_match_add.remove(t)
continue
tools.DEBUG(DEBUG, 'smartCheck', 'working with IPs', sIP, dIP, rule_matched[1], rule_matched[2])
n1s = ipaddr.IPv4Network(sIP)
n1d = ipaddr.IPv4Network(dIP)
if _is_any(rule_matched[7], rule_matched[1]):
n2s = ipaddr.IPv4Network('0.0.0.0/0')
else:
if '/0.0.0.0' in rule_matched[1]: # This will happen only with wildcard = True
# The wildcard 0.0.0.0 (host wildcard) is not working with ipaddress library
n2s = ipaddr.IPv4Network(rule_matched[1].split('/')[0] + '/255.255.255.255')
else:
n2s = ipaddr.IPv4Network(rule_matched[1])
if _is_any(rule_matched[7], rule_matched[2]):
n2d = ipaddr.IPv4Network('0.0.0.0/0')
else:
if '/0.0.0.0' in rule_matched[2]:
# The wildcard 0.0.0.0 (host wildcard) is not working with ipaddress library
n2d = ipaddr.IPv4Network(rule_matched[2].split('/')[0] + '/255.255.255.255')
else:
n2d = ipaddr.IPv4Network(rule_matched[2])
if n1s.compare_networks(n2s) < 0 and n1s != ipaddr.IPv4Network('0.0.0.0/0'):
new_sources = list(n1s.address_exclude(n2s))
else:
new_sources = [n1s]
if n1d.compare_networks(n2d) < 0 and n1d != ipaddr.IPv4Network('0.0.0.0/0'):
new_dest = list(n1d.address_exclude(n2d))
else:
new_dest = [n1d]
tools.DEBUG(DEBUG, 'smartCheck', 'working with IPs (2)', sIP, dIP, rule_matched[1], rule_matched[2], new_sources, new_dest)
if new_sources[0] != n1s or new_dest[0] != n1d:
tools.DEBUG(DEBUG, 'smartCheck', 'Creating new policy with smaller network')
tools.DEBUG(DEBUG, 'smartCheck', 'Sources:', new_sources, 'A:', new_sources[0], type(new_sources[0]), 'B:', n1s, type(n1s))
tools.DEBUG(DEBUG, 'smartCheck', 'Dest', new_dest)
policy_del_temp = FWPolicy('del-temp', 'temp', DEBUG)
for new_s in new_sources:
for new_d in new_dest:
irule_number = policy_del_temp.new_rule(str(new_s.with_hostmask) if wildcard else str(new_s.with_netmask),
str(new_d.with_hostmask) if wildcard else str(new_d.with_netmask),
ruled[3], ruled[4], ruled[5], ruled[6], ruled[7])
# Check comment for non-contiguous rule to understand the naming of the rules
policy_del_temp.set_rule_name(irule_number, 'split rule -' + str(irule_number))
# We are going to check a subpolicy, so from a rule partially matched, we create a subpolicy
# with all networks not mached. And start again, if in this case all of them are matched
# then the original rule can be removed
tempPolicy_rules_matched = smartCheck(policy_del_temp, policy_add, list_rules_match_add, matched_rules_extended, subpolicy=True, print_add_matches=False, DEBUG=DEBUG)
if len(tempPolicy_rules_matched) == policy_del_temp.get_rules_number():
rule_found = True
else:
tools.DEBUG(DEBUG, 'smartCheck', tempPolicy_rules_matched, '!=', policy_del_temp.get_rules_number(), 'Not all policy temp matched. Parent rule not mached.')
while len(list_rules_match_add) > 0:
list_rules_match_add.pop()
if ruled[0].startswith('split dsmo rule'):
rules_not_matched.append(ruled[0].split('}')[1])
else:
rules_not_matched.append(ruled[0])
if ruled[0] in rules_to_remove:
rules_to_remove.remove(ruled[0])
else:
# If there isn't any smaller network to check the whole rule is a match
rule_found = True
else:
rule_found = True
else: # if not matching_action
tools.DEBUG(DEBUG, 'smartCheck', 'Not matching ACTION')
if subpolicy:
tools.DEBUG(DEBUG, 'smartCheck', 'One rule not matched in subpolicy')
# list_rules_match_add can't be cleared with assigning an empty value because
# is a mutable object, so assigning an empty value will create a new object instead
# of removin the old one
while len(list_rules_match_add) > 0:
list_rules_match_add.pop()
return []
# If we find a line that is not matched we need to be sure that line is not part of a bigger rule
# for example a DSMO line
while len(list_rules_match_add) > 0:
list_rules_match_add.pop()
rules_not_matched.append(ruled[0])
if ruled[0] in rules_to_remove:
rules_to_remove.remove(ruled[0])
if rule_found and len(list_rules_match_add) > 0:
if ruled[0] not in rules_to_remove:
rules_to_remove.append(ruled[0])
if not subpolicy:
if print_add_matches:
print 'RULE MATCHED!'
print 'Rule to be removed:', ruled[0]
print 'Rules to be added: ', '\n\t\t '.join(list_rules_match_add)
print '-------------------------------'
matched_rules_extended[ruled[0]] = '\n'.join(list_rules_match_add)
while len(list_rules_match_add) > 0:
list_rules_match_add.pop()
return rules_to_remove | 9c66196422566af42188ed90156f63190acffeab | 3,631,749 |
import logging
def create_volume(cinder, size, name=None, image=None):
"""Create cinder volume.
:param cinder: Authenticated cinderclient
:type cinder: cinder.Client
:param size: Size of the volume
:type size: int
:param name: display name for new volume
:type name: Option[str, None]
:param image: Image to download to volume.
:type image: Option[str, None]
:returns: cinder volume pointer
:rtype: cinderclient.common.utils.RequestIdProxy
"""
logging.debug('Creating volume')
# Create volume
volume = cinder.volumes.create(
size=size,
name=name,
imageRef=image)
resource_reaches_status(
cinder.volumes,
volume.id,
expected_status='available',
msg='Volume status wait')
return volume | a0001c7addc39e57928b5baffc9bc0446e4fca4d | 3,631,750 |
def function_linenumber(function_index=1, function_name=None, width=5):
"""
:param width:
:param function_index: int of how many frames back the program should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should not be used with function_index
:return str of the current linenumber
"""
frm = func_frame(function_index + 1, function_name)
if width is None:
return frm._f_lineno
return str(frm.f_lineno).ljust(width) | ba046f1106eacb998a6728a6878bb48822920270 | 3,631,751 |
from typing import Tuple
def get_event_times(
data: np.ndarray,
kernel_size: int = 71,
skip_first: int = 20 * 60,
th: float = 0.1,
abs_val: bool = False,
shift: int = 0,
debug: bool = False,
) -> Tuple[list, list]:
"""
Given a 1D time serires it gets all the times there's a new 'stimulus' (signal going > threshold).
"""
original = data.copy()
if abs_val:
data = np.abs(data)
if kernel_size is not None:
data = medfilt(data, kernel_size=kernel_size)
onsets, offsets = get_onset_offset(data, th=th)
onsets = [on for on in onsets if on > offsets[0]]
# skip onsets that occurred soon after the session start
onsets = [on - shift for on in onsets if on > skip_first]
offsets = [off - shift for off in offsets if off > onsets[0]]
# check
if len(onsets) != len(offsets):
raise ValueError("Error while getting event times")
for on, off in zip(onsets, offsets):
if on > off:
raise ValueError("Onsets cant be after offset")
if debug:
logger.debug(f"Found {len(onsets)} event times")
# visualize event times
plot_signal_and_events(
data, onsets, offsets, second_signal=original, show=True
)
return onsets, offsets | 1c44a940995d23a2561b19463f29d7f9e89c6056 | 3,631,752 |
def init_base_item(mocker):
"""Initialize a dummy BaseItem for testing."""
mocker.patch.multiple(
houdini_package_runner.items.base.BaseItem,
__abstractmethods__=set(),
__init__=lambda x, y: None,
)
def _create():
return houdini_package_runner.items.base.BaseItem(None)
return _create | d7d4c0951e4013583f8ea89574da9735daa9aa21 | 3,631,753 |
def create_sftp_client2(host, port, username, password, keyfilepath, keyfiletype):
"""
create_sftp_client(host, port, username, password, keyfilepath, keyfiletype) -> SFTPClient
Creates a SFTP client connected to the supplied host on the supplied port authenticating as the user with
supplied username and supplied password or with the private key in a file with the supplied path.
If a private key is used for authentication, the type of the keyfile needs to be specified as DSA or RSA.
:rtype: SFTPClient object.
"""
ssh = None
sftp = None
key = None
try:
if keyfilepath is not None:
# Get private key used to authenticate user.
if keyfiletype == 'DSA':
# The private key is a DSA type key.
key = paramiko.DSSKey.from_private_key_file(keyfilepath)
else:
# The private key is a RSA type key.
key = paramiko.RSAKey.from_private_key(keyfilepath)
# Connect SSH client accepting all host keys.
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, port, username, password, key)
# Using the SSH client, create a SFTP client.
sftp = ssh.open_sftp()
# Keep a reference to the SSH client in the SFTP client as to prevent the former from
# being garbage collected and the connection from being closed.
sftp.sshclient = ssh
return sftp
except Exception as e:
print('An error occurred creating SFTP client: %s: %s' % (e.__class__, e))
if sftp is not None:
sftp.close()
if ssh is not None:
ssh.close()
pass | c489850945ffc9387781f23f6d58b3c675de8928 | 3,631,754 |
def get_vectorize_layer(max_features=10000, sequence_length=250) \
-> tf.keras.layers.experimental.preprocessing.TextVectorization:
"""Transforms a batch of strings into either a list of token indices or a dense representation.
Parameters
----------
max_features : int
The maximum size of the vocabulary for this layer.
sequence_length : int
Dimension padded or truncated to exactly sequence_length values.
Returns
-------
vectorize_layer : tf.keras.layers.experimental.preprocessing.TextVectorization
Text vectorization layer.
"""
vectorize_layer = tf.keras.layers.experimental.preprocessing.TextVectorization(
standardize=custom_standardization,
max_tokens=max_features,
output_mode='int',
output_sequence_length=sequence_length
)
return vectorize_layer | bfd677757a8347f521c53445a2f4bddd2fd57d06 | 3,631,755 |
def test_login_success(self):
"""
In this case both are false, meaning the if statements doesn't get executed
"""
return login_user(test_user.email, test_user.password) == test_user | 53c1599b9a2c442e0be093e90522d08a905f0503 | 3,631,756 |
async def tally():
"""
Get the results of all election tallies.
Returns:
Tally results for each contest in the election
"""
tally = election.get_election_tally()
results = {
"contests": [
{
"contest": contest,
"selections": [
{
"selection": selection,
"tally": selection_details.tally
} for selection, selection_details in contest_details.selections.items()
]
} for contest, contest_details in tally.contests.items()
]
}
return results | 144de5592804fd5e6f950385b2789ba1a7c3e195 | 3,631,757 |
def expand_parameters_from_remanence_array(magnet_parameters, params, prefix):
"""
Return a new parameters dict with the magnet parameters in the form
'<prefix>_<magnet>_<segment>', with the values from 'magnet_parameters'
and other parameters from 'params'.
The length of the array 'magnet_parameters' must be equal to the sum of
the number of segments in both cylinders.
The first n_II elements refer to the inner magnet,
and the remaining elements to the outer magnet.
"""
params_expanded = params.copy()
n_II = params["n_II"]
for i in range(0, n_II):
params_expanded["%s_II_%d" % (prefix, i + 1,)] = magnet_parameters[i]
n_IV = params["n_IV"]
for j in range(0, n_IV):
k = j + n_II # the first n_II elements refer to magnet II
params_expanded["%s_IV_%d" % (prefix, j + 1,)] = magnet_parameters[k]
return params_expanded | e087f5b1e8ea264f074f921a5283d7806178664b | 3,631,758 |
import math
def addToOrStartNewRange(oldRange, tissueRangeScores, newPosition, vals, tissues, tissueFhs):
"""For all the tissues in vals, figure out if we are still in the same exon
and adding to the previous range/score combo, or if we are in the same
exon but with a different score, or a new exon altogether.
Return: The new range (either an expanded oldRange or brand new range if a new exon)"""
newRange = oldRange
if not oldRange:
newRange = newPosition
for t,tissue in enumerate(tissueRangeScores):
tissueRangeScores[tissue][newPosition.gene] = newPosition._replace(score=float(vals[t]))
else:
if newPosition.chrom != oldRange.chrom:
#brand new exon/gene
writePreviousRangeScores(tissueRangeScores, tissues, None, -1, tissueFhs)
for t,tissue in enumerate(tissues):
newBed = newPosition._replace(score=float(vals[t]))
tissueRangeScores[tissue].clear()
tissueRangeScores[tissue][newPosition.gene] = newBed
newRange = newPosition
else:
if newPosition.start > oldRange.end:
# new exon
writePreviousRangeScores(tissueRangeScores,tissues, None, -1, tissueFhs)
for t,tissue in enumerate(tissues):
newPosition = newPosition._replace(score=float(vals[t]))
tissueRangeScores[tissue].clear()
tissueRangeScores[tissue][newPosition.gene] = newPosition
newRange = newPosition
else:
# figure out whether to extend old record or maybe add a new one
newRange = oldRange._replace(end=newPosition.end)
if newPosition.gene not in oldRange.gene:
# new gene that overlaps exons, disallow
newRange = oldRange._replace(gene=oldRange.gene+[newPosition.gene])
for t,tissue in enumerate(tissues):
for gene in oldRange.gene:
if gene in tissueRangeScores[tissue]:
del tissueRangeScores[tissue][gene]
elif len(oldRange.gene) == 1:
for t,tissue in enumerate(tissues):
oldRange = tissueRangeScores[tissue][newPosition.gene]
newScore = float(vals[t])
if oldRange.score == newScore or (math.isnan(newScore) and math.isnan(oldRange.score)):
temp = oldRange._replace(end=newPosition.end)
tissueRangeScores[tissue][newPosition.gene] = temp
else:
# write and clear old for this gene, then add new
writePreviousRangeScores(tissueRangeScores, [tissue], [newPosition.gene], -1, tissueFhs)
tissueRangeScores[tissue][newPosition.gene] = newPosition._replace(score=newScore)
if type(newRange.gene) is not list:
newRange = newRange._replace(gene=[newRange.gene])
return newRange | addbe75f611995c05dd6aa1998f0d66b1f038798 | 3,631,759 |
def transformNode(doc, newTag, node=None, **attrDict):
"""Transform a DOM node into new node and copy selected attributes.
Creates a new DOM node with tag name 'newTag' for document 'doc'
and copies selected attributes from an existing 'node' as provided
in 'attrDict'. The source 'node' can be None. Attribute values will
be converted to strings.
E.g.
n = transformNode(doc, "node1", x="0", y="1")
-> DOM node for <node1 x="0" y="1"/>
n = transformNode(doc, "node1", x=0, y=1+1)
-> DOM node for <node1 x="0" y="2"/>
n = transformNode(doc, "node1", node0, x="x0", y="x0", zoo=bar())
-> DOM node for <node1 x="[node0.x0]" y="[node0.y0]" zoo="[bar()]"/>
"""
newNode = doc.createElement(newTag)
for newAttr, attr in attrDict.items():
sattr = str(attr)
if not node:
newNode.setAttribute(newAttr, sattr)
else:
attrVal = node.getAttribute(sattr)
newNode.setAttribute(newAttr, attrVal or sattr)
return newNode | 2329858a02c643077f67d5c705fb3df72c2a96ee | 3,631,760 |
def jsonify(status=200, indent=2, sort_keys=True, **kwargs):
""" Creates a jsonified response. Necessary because the default
flask.jsonify doesn't correctly handle sets, dates, or iterators
Args:
status (int): The status code (default: 200).
indent (int): Number of spaces to indent (default: 2).
sort_keys (bool): Sort response dict by keys (default: True).
kwargs (dict): The response to jsonify.
Returns:
(obj): Flask response
"""
options = {'indent': indent, 'sort_keys': sort_keys, 'ensure_ascii': False}
response = make_response(dumps(kwargs, cls=CustomEncoder, **options))
response.headers['Content-Type'] = 'application/json; charset=utf-8'
response.headers['mimetype'] = 'application/json'
response.status_code = status
return response | d32eb4418d49802872bbf96fafa29a4704374b1b | 3,631,761 |
from pathlib import Path
from typing import Optional
from typing import List
import tempfile
import os
import tarfile
import requests
from typing import Dict
def detect_symbols(
sources_dir: Path,
host: str = "http://127.0.0.1",
port: int = 8001,
try_expand_macros: Optional[bool] = None,
require_blank_border: Optional[bool] = None,
insert_function_elements: Optional[bool] = None,
merge_adjacent_elements: Optional[bool] = None,
) -> List[Symbol]:
"""
Detect positions of symbols in LaTeX paper. Documentation and defaults for options (e.g.,
'require_blank_border') appears in the server code for the 'extract_symbols' endpoint.
"""
with tempfile.TemporaryDirectory() as temp_dir:
# Prepare a gzipped tarball file containing the sources.
archive_filename = os.path.join(temp_dir, "archive.tgz")
with tarfile.open(archive_filename, "w:gz") as archive:
archive.add(sources_dir, arcname=os.path.sep)
# Prepare query parameters.
with open(archive_filename, "rb") as archive_file:
files = {"sources": ("archive.tgz", archive_file, "multipart/form-data")}
# Make request to service.
endpoint = f"{host}:{port}/"
try:
response = requests.post(
endpoint,
files=files,
params={
"try_expand_macros": try_expand_macros,
"require_blank_border": require_blank_border,
"insert_function_elements": insert_function_elements,
"merge_adjacent_elements": merge_adjacent_elements,
},
)
except requests.exceptions.RequestException as e:
raise ServerConnectionException(
f"Request to server {endpoint} failed.", e
)
# Get result
data = response.json()
# Create symbols from JSON.
symbols: Dict[SymbolId, Symbol] = {}
parents: Dict[SymbolId, SymbolId] = {}
for item in data:
symbol = Symbol(
id_=item["id"],
type_=item["type"],
mathml=item["mathml"],
tex=item["tex"],
location=Location(
item["location"]["left"],
item["location"]["top"],
item["location"]["width"],
item["location"]["height"],
item["location"]["page"],
),
parent=None,
)
symbols[symbol.id_] = symbol
parents[symbol.id_] = item["parent"]
# Resolve parents of symbols.
for id_, symbol in symbols.items():
if parents[id_]:
symbol.parent = symbols[parents[id_]]
return [s for s in symbols.values()] | b8d0bd9c97db3a496417e8d1c6f4186a5ed9a2fd | 3,631,762 |
import datasets
def browse(dataset_id=None, endpoint_id=None, endpoint_path=None):
"""
- Get list of files for the selected dataset or endpoint ID/path
- Return a list of files to a browse view
The target template (browse.jinja2) expects an `endpoint_uri` (if
available for the endpoint), `target` (either `"dataset"`
or `"endpoint"`), and 'file_list' (list of dictionaries) containing
the following information about each file in the result:
{'name': 'file name', 'size': 'file size', 'id': 'file uri/path'}
If you want to display additional information about each file, you
must add those keys to the dictionary and modify the browse.jinja2
template accordingly.
"""
assert bool(dataset_id) != bool(endpoint_id and endpoint_path)
if dataset_id:
try:
dataset = next(ds for ds in datasets if ds['id'] == dataset_id)
except StopIteration:
abort(404)
endpoint_id = app.config['DATASET_ENDPOINT_ID']
endpoint_path = app.config['DATASET_ENDPOINT_BASE'] + dataset['path']
else:
endpoint_path = '/' + endpoint_path
transfer_tokens = session['tokens']['transfer.api.globus.org']
authorizer = RefreshTokenAuthorizer(
transfer_tokens['refresh_token'],
load_portal_client(),
access_token=transfer_tokens['access_token'],
expires_at=transfer_tokens['expires_at_seconds'])
transfer = TransferClient(authorizer=authorizer)
try:
transfer.endpoint_autoactivate(endpoint_id)
listing = transfer.operation_ls(endpoint_id, path=endpoint_path)
except TransferAPIError as err:
flash('Error [{}]: {}'.format(err.code, err.message))
return redirect(url_for('transfer'))
file_list = [e for e in listing if e['type'] == 'file']
ep = transfer.get_endpoint(endpoint_id)
https_server = ep['https_server']
endpoint_uri = https_server + endpoint_path if https_server else None
webapp_xfer = 'https://app.globus.org/file-manager?' + \
urlencode(dict(origin_id=endpoint_id, origin_path=endpoint_path))
return render_template('browse.jinja2', endpoint_uri=endpoint_uri,
target="dataset" if dataset_id else "endpoint",
description=(dataset['name'] if dataset_id
else ep['display_name']),
file_list=file_list, webapp_xfer=webapp_xfer) | ca88068558e9e32e52ac032c891f28579a9d03b0 | 3,631,763 |
def tag_group(tag_group, tag):
"""Select a tag group and a tag."""
payload = {"group": tag_group, "tag": tag}
return payload | f22ccd817145282729876b0234c8309c24450140 | 3,631,764 |
from typing import Iterable
def rollup(step: Step, store: TableStore):
"""Rollup a table to produce an aggregation summary.
:param step:
Parameters to execute the operation.
See :py:class:`~data_wrangling_components.engine.verbs.rollup.RollupArgs`.
:type step: Step
:param store:
Table store that contains the inputs to be used in the execution.
:type store: TableStore
:return: new table with the result of the operation.
"""
args = RollupArgs(
column=step.args["column"],
to=step.args["to"],
operation=FieldAggregateOperation(step.args["operation"]),
)
input_table = store.get(step.input)
agg_result = input_table.agg(args.operation.value)[args.column]
if not isinstance(agg_result, Iterable):
agg_result = [agg_result]
if isinstance(agg_result, pd.Series):
agg_result = agg_result.reset_index(drop=True)
output = pd.DataFrame({args.to: agg_result})
return output | ee8302a4605fd5ad850b3c1d466276259a12736e | 3,631,765 |
from optimade.server.config import CONFIG
def prefix_provider(string: str) -> str:
"""Prefix string with `_{provider}_`"""
if string in CONFIG.provider_fields.get("structures", []):
return f"_{CONFIG.provider.prefix}_{string}"
return string | 6faa8af6d24e4f5ae17a7997b097545415ce53b8 | 3,631,766 |
def combine_sequences(vsequences, jsequences):
"""
Do a pairwise combination of the v and j sequences to get putative germline sequences for the species.
"""
combined_sequences = {}
for v in vsequences:
vspecies, vallele = v
for j in jsequences:
_, jallele= j
combined_sequences[("%s_%s_%s"%(vspecies, vallele,jallele)).replace(" ", "_")] = vsequences[v] + jsequences[j]
return combined_sequences | dac2aea73bd078bcf96dc8e7b44c5dcdeade2759 | 3,631,767 |
def read_seq_file(filename):
"""Reads data from sequence alignment test file.
Args:
filename (str): The file containing the edge list.
Returns:
str: The first sequence of characters.
str: The second sequence of characters.
int: The cost per gap in a sequence.
int: The cost per mismatch in a sequence.
"""
with open(filename, 'r') as f:
next(f) # Skip first line
cost_gap, cost_mismatch = next(f).strip().split()
cost_gap, cost_mismatch = int(cost_gap), int(cost_mismatch)
seq_x = next(f).strip()
seq_y = next(f).strip()
return seq_x, seq_y, cost_gap, cost_mismatch | 9160bb0b2643deae669818cea1bc1ebeb51506b8 | 3,631,768 |
import scipy
import numpy
def OrthogonalInit(rng, sizeX, sizeY, sparsity=-1, scale=1):
"""
Orthogonal Initialization
"""
sizeX = int(sizeX)
sizeY = int(sizeY)
assert sizeX == sizeY, 'for orthogonal init, sizeX == sizeY'
if sparsity < 0:
sparsity = sizeY
else:
sparsity = numpy.minimum(sizeY, sparsity)
values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
for dx in range(sizeX):
perm = rng.permutation(sizeY)
new_vals = rng.normal(loc=0, scale=scale, size=(sparsity,))
values[dx, perm[:sparsity]] = new_vals
# Use SciPy:
if sizeX*sizeY > 5000000:
u,s,v = scipy.linalg.svd(values)
else:
u,s,v = numpy.linalg.svd(values)
values = u * scale
return values.astype(theano.config.floatX) | 98b53e11d6c3a641d6e8fede0d28651c48aa5407 | 3,631,769 |
def build_complement(dna):
"""
:param dna: str, the DNA strand that user gives(all letters are upper case)
:return: str, the complement of dna
"""
new_dna = ''
for base in dna:
if base == 'A':
new_dna += 'T'
elif base == 'T':
new_dna += 'A'
elif base == 'G':
new_dna += 'C'
elif base == 'C':
new_dna += 'G'
return new_dna | dffdf6345ec25ea80e89996aef7c85a41f38d6f4 | 3,631,770 |
def _seasonal_prediction_with_confidence(arima_res, start, end, exog, alpha,
**kwargs):
"""Compute the prediction for a SARIMAX and get a conf interval
Unfortunately, SARIMAX does not really provide a nice way to get the
confidence intervals out of the box, so we have to perform the
``get_prediction`` code here and unpack the confidence intervals manually.
Notes
-----
For internal use only.
"""
results = arima_res.get_prediction(
start=start,
end=end,
exog=exog,
**kwargs)
f = results.predicted_mean
conf_int = results.conf_int(alpha=alpha)
return f, conf_int | 9520bf1a60eeb39c25e9a369b0b337905df9afb8 | 3,631,771 |
def attack(X_train, y_train, X_test, y_test, unmon_label, args, VERBOSE=1):
"""
Perform WF training and testing
"""
classes = len(set(list(y_train)))
print(classes)
# shuffle and split for val
s = np.arange(X_test.shape[0])
np.random.shuffle(s)
sp = X_test.shape[0]//2
X_va = X_test[s][sp:]
y_va = y_test[s][sp:]
X_test = X_test[s][:sp]
y_test = y_test[s][:sp]
# convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, classes)
y_va = np_utils.to_categorical(y_va, classes)
# Build and compile model
print("Compiling model...")
model = ConvNet.build(classes=classes, input_shape=(5000, 1))
# Train the model
filepath = args.output
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', save_best_only=True, mode='max')
early_stopping = EarlyStopping(monitor='val_loss', patience=3, mode='auto', restore_best_weights=True)
callbacks_list = [checkpoint, early_stopping]
history = model.fit(X_train, y_train,
epochs=30,
verbose=VERBOSE,
validation_data=(X_va, y_va),
callbacks=callbacks_list)
# Save & reload model
model.save(filepath)
del model
model = load_model(filepath)
X_test_mon = X_test[y_test != unmon_label]
y_test_mon = y_test[y_test != unmon_label]
X_test_unmon = X_test[y_test == unmon_label]
y_test_unmon = y_test[y_test == unmon_label]
# Test the model
y_test = np_utils.to_categorical(y_test, classes)
score = model.evaluate(X_test, y_test,
verbose=VERBOSE)
score_train = model.evaluate(X_train, y_train,
verbose=VERBOSE)
y_test_mon = np_utils.to_categorical(y_test_mon, classes)
y_test_unmon = np_utils.to_categorical(y_test_unmon, classes)
all_acc = score[1]
print("\n=> Train score:", score_train[0])
print("=> Train accuracy:", score_train[1])
print("\n=> Test score:", score[0])
print("=> Test accuracy:", score[1])
score = model.evaluate(X_test_mon, y_test_mon,
verbose=VERBOSE)
print("\n=> Test_mon score:", score[0])
print("=> Test_mon accuracy:", score[1])
mon_acc = score[1]
score = model.evaluate(X_test_unmon, y_test_unmon,
verbose=VERBOSE)
print("\n=> Test_unmon score:", score[0])
print("=> Test_unmon accuracy:", score[1])
umon_acc = score[1]
return all_acc, mon_acc, umon_acc | 57c33e5e26f04412650bf65a5032cb81fb7d46bf | 3,631,772 |
from typing import Counter
def create_lexicon(pos, neg):
"""Create Lexicon."""
lexicon = []
for fi in [pos, neg]:
with open(fi, 'r') as f:
contents = f.readlines()
for l in contents[:hm_lines]:
all_words = word_tokenize(l.lower())
lexicon += list(all_words)
lexicon = [lemmatizer.lemmatize(i) for i in lexicon]
w_counts = Counter(lexicon)
resultlexicon = []
for w in w_counts:
if 1000 > w_counts[w] > 50: # No super common words or too rare
resultlexicon.append(w)
return resultlexicon | f1f81310d0e12e6aa23589c98e0fcb1eb3283dc1 | 3,631,773 |
def trailing_zeros(x):
""" Number of trailing zeros in a number."""
if x % 1 != 0 | x == 0:
return 0
magn = floor(log10(x))
trailing = 0
for i in range(1, magn + 1):
if x % (10 ** i) == 0:
trailing = i
else:
break
return trailing | d712afc601866eafc8ea1d6fac8e33c50e053b64 | 3,631,774 |
import xml
from typing import List
from typing import Optional
import sys
def _check_dependency(dependency: xml.etree.ElementTree.Element,
include: List[str],
exclude: Optional[List[str]] = None) -> bool:
"""Check a dependency for a component.
Verifies that the given {dependency} is satisfied by components listed in
`include` or `exclude`.
Args:
dependency: XML Element of dependency.
include: list of component ids included in the project.
exclude: list of component ids explicitly excluded from the project.
Returns:
True if dependencies are satisfied, False if not.
"""
if dependency.tag == 'component_dependency':
component_id = dependency.attrib['value']
return component_id in include or (exclude is not None
and component_id in exclude)
if dependency.tag == 'all':
for subdependency in dependency:
if not _check_dependency(subdependency, include, exclude=exclude):
return False
return True
if dependency.tag == 'any_of':
for subdependency in dependency:
if _check_dependency(subdependency, include, exclude=exclude):
return True
tree = xml.etree.ElementTree.tostring(dependency).decode('utf-8')
print(f'Unsatisfied dependency from: {tree}', file=sys.stderr)
return False
# Unknown dependency tag type.
return True | f6cfeda5f6f9ad7fd03695e58f3c27d3f299de4d | 3,631,775 |
def top_height(sz):
"""Returns the height of the top part of size `sz' AS-Waksman network."""
return sz // 2 | 1e4a43a8935cc5c3ccf104e93f87919205baf4a4 | 3,631,776 |
def insert_sequence_read_set(db: DatabaseSession, sample_id: int, urls: list):
"""
Insert sequencing read set directly into warehouse.sequence_read_set,
with the *sample_id* and *urls*.
"""
LOG.debug(f"Inserting sequence read set for sample {sample_id}")
data = {
"sample_id": sample_id,
"urls": urls
}
sequence_read_set = db.fetch_row("""
insert into warehouse.sequence_read_set (sample_id, urls)
values (%(sample_id)s, %(urls)s)
returning sequence_read_set_id as id
""", data)
assert sequence_read_set.id, "Insert failed"
return sequence_read_set | 71201e49266ea8604b2fd1d09d1e2af271cefb5c | 3,631,777 |
def constructAuxGraph(path):
"""
This function constructs the auxiliary graph given a python dictionary as argument wich
consists of the # of the path as the key of the dictionary and the path (source, intermediate
nodes, destination) that a predifined route has to traverse in order to go from the source
node to the destination node. The dictionary should look something like this:
{#ofpath:(source_node, interm_node, ...,iterm_node,destination_node),...}
The document that comes with this python script explains what is the auxiliary graph and
how it is constructed. Please refer to that document for more details.
"""
auxGraph=nx.Graph() #create an empty graph object which will be used as the auxiliary graph
for key in path.iterkeys(): #create a node in the auxGraph for each of the predefined routes
auxGraph.add_node(key)
examined_keys=[]
for key, value in path.iteritems():
examined_keys.append(key) #this list is used to keep track of the routes that have been examined
for key2,value2 in path.iteritems():
if key2 not in examined_keys: #do not examine a route that you examined before
if haveCommonEdge(list(value),list(value2)) == True:
auxGraph.add_edge(key,key2) #connect the routes that share a common edge
return auxGraph | 04441f20a9d55e6dcf88c64f9c54efa320dc8ee1 | 3,631,778 |
from typing import Callable
import logging
import time
def eval_time(function: Callable):
"""decorator to log the duration of the decorated method"""
def timed(*args, **kwargs):
log = logging.getLogger(__name__)
time_start = time.time()
result = function(*args, **kwargs)
time_elapsed = round(time.time() - time_start, 2)
log.info(f"Processing time of {function.__name__}: {time_elapsed}s")
return result
return timed | 3f40394c5638bf0fc6371d4247c8980da1f6363f | 3,631,779 |
from typing import Union
from typing import Optional
from typing import Iterable
from typing import Tuple
def parse_data(
data: Union[AnnData, DataFrame, np.ndarray],
gene_names: Optional[Iterable[str]] = None,
sample_names: Optional[Iterable[str]] = None
) -> Tuple[np.ndarray, list, list]:
"""Reduces :class:`~anndata.AnnData` and :class:`~pandas.DataFrame` to a :class:`~numpy.dnarray` and extracts
`gene_names` and `sample_names` from index and column names."""
if isinstance(data, AnnData):
if sample_names is None:
sample_names = list(data.obs_names)
if gene_names is None:
gene_names = list(data.var_names)
if issparse(data.X):
logg.hint("data was passed as sparse array, converting to dense")
raw_data = data.X.toarray()
else:
raw_data = data.X
else:
if isinstance(data, DataFrame):
if gene_names is None:
gene_names = list(data.columns)
if sample_names is None:
sample_names = list(data.index)
if issparse(data.values):
logg.hint("data was passed as sparse array, converting to dense")
raw_data = data.values.toarray()
else:
raw_data = data.values
elif isinstance(data, np.ndarray) or issparse(data):
if gene_names is None or sample_names is None:
raise ValueError("Provide gene names and sample names in ``gene_names`` and ``sample_names``")
if issparse(data):
logg.hint("data was passed as sparse array, converting to dense")
raw_data = data.toarray()
else:
raw_data = data
else:
raise ValueError("data can only be of type AnnData, DataFrame or ndarray")
logg.hint("passed data of shape {} x {} (samples x genes)".format(*raw_data.shape))
return raw_data, gene_names, sample_names | 03fdf88e3160d41f976d6ebdb336588958a16a91 | 3,631,780 |
def german_actionset_unaligned(german_X):
"""Generate an actionset for German data."""
# setup actionset
action_set = ActionSet(X = german_X)
immutable_attributes = ['Age', 'Single', 'JobClassIsSkilled', 'ForeignWorker', 'OwnsHouse', 'RentsHouse']
action_set[immutable_attributes].mutable = False
action_set['CriticalAccountOrLoansElsewhere'].step_direction = -1
action_set['CheckingAccountBalance_geq_0'].step_direction = 1
return action_set | 5166d4d07d127cc6fa0166a719edca0c78a34150 | 3,631,781 |
def recursively_contains(val1, val2, parent_key=None):
"""
Returns True if val1 is a subset of val2 both in its items as
well as the items's items if it's a list or a dictionary.
Returns False if there are items within val1 but not in val2.
"""
# If not same data type, fail
if type(val1) != type(val2):
if parent_key != None:
BuiltIn().log("Types %s and %s are NOT the same, for key %s" % (type(val1), type(val2), parent_key))
else:
BuiltIn().log("Types %s and %s are NOT the same, for expected value %s" % (type(val1), type(val2), val1))
return False
# If simple scalar type, just compare values
if is_scalar_type(val1):
is_contained = val1==val2
if not is_contained: BuiltIn().log("Scalar values aren't equal", 'DEBUG')
return is_contained
# If dictionary type
elif type(val1) is dict:
# All keys must be in
missing_keys = set(val1.keys()) - set(val2.keys())
if missing_keys != set():
BuiltIn().log("The keys %s are missing in other dictionary: \n%s" % (missing_keys, val2))
return False
else:
# All dictionary's values must be contained in other dictionary with same keys
# Use lazy evaluation to stop at first failure
# E.g. all( (x for x in [True, True, False, True, True] if print(x)==print()))
# will print True, True, False. It won't process the later 2x True.
return all(( (recursively_contains(v, val2[k], parent_key=k)) for k,v in val1.items() ))
elif type(val1) in [list, set, tuple]:
BuiltIn().log("About to check list %s" % val1)
return list_recursively_contains(val1, val2)
else:
BuiltIn().log("Type not handled", 'WARN')
BuiltIn().log("Should never get here", 'ERROR') | 7003eeb39fa5267a4399c574bac4c157c48b4e62 | 3,631,782 |
import email
import os
import mimetypes
def generate_email(sender, recipient, subject, body, attachment_path):
"""Creates an email with an attachement."""
# Basic Email formatting
message = email.message.EmailMessage()
message["From"] = sender
message["To"] = recipient
message["Subject"] = subject
message.set_content(body)
# Process the attachment and add it to the email
attachment_filename = os.path.basename(attachment_path)
mime_type, _ = mimetypes.guess_type(attachment_path)
mime_type, mime_subtype = mime_type.split('/', 1)
with open(attachment_path, 'rb') as ap:
message.add_attachment(ap.read(), maintype=mime_type, subtype=mime_subtype, filename=attachment_filename)
return message | fb4c15dad4fe643e8b3748413469d98d593cfa7d | 3,631,783 |
def test_mode(user, godmode=False, questions_list=None, quiz_id=None):
"""creates a trial question paper for the moderators"""
if questions_list is not None:
trial_course = Course.objects.create_trial_course(user)
trial_quiz = Quiz.objects.create_trial_quiz(trial_course, user)
trial_questionpaper = QuestionPaper.objects.create_trial_paper_to_test_questions(
trial_quiz, questions_list
)
else:
trial_quiz = Quiz.objects.create_trial_from_quiz(
quiz_id, user, godmode
)
trial_questionpaper = QuestionPaper.objects.create_trial_paper_to_test_quiz(
trial_quiz, quiz_id
)
return trial_questionpaper | 600a4391ffd016387f2210db62a3fe9e452cf55a | 3,631,784 |
from typing import Optional
def add_auth_token(auth_token: str, desc: Optional[str],
call_count_limit: Optional[int] = None,
call_count_limit_relative: bool = False) -> bool:
"""
Add or update an auth token to the DB. Local cache will be updated during next API request in is_auth_token_valid(...)!
:param auth_token: The auth token to add/update.
:param desc: The description to apply to the token. 'None' to leave existing description unchanged.
:param call_count_limit: The call count limit to place on the token. 'None' to make unlimited.
:param call_count_limit_relative: If True then the limit will be relative to the current count. Default is False!
:return: True/False indicating success of operation.
"""
try:
query = db.session.query(APIKeyData)
instance = query.get(ident=auth_token)
if instance:
if desc is not None:
instance.desc = desc
# Initialise the call_count if None.
if instance.call_count is None:
instance.call_count = 0
if (call_count_limit is None) or (call_count_limit_relative is False):
instance.call_count_limit = call_count_limit
else:
instance.call_count_limit = instance.call_count + call_count_limit
else:
instance = APIKeyData(auth_token, str(desc), 0, call_count_limit)
db.session.add(instance)
db.session.commit()
return True
except Exception:
db.session.rollback()
raise
finally:
db.session.close() | 243ea14badc4dd3f54f2f5147c38f81bf64be83f | 3,631,785 |
import re
def id_for_new_id_style(old_id, is_metabolite=False):
""" Get the new style id"""
new_id = old_id
def _join_parts(the_id, the_compartment):
if the_compartment:
the_id = the_id + '_' + the_compartment
return the_id
def _remove_d_underscore(s):
"""Removed repeated, leading, and trailing underscores."""
s = re.sub(r'_+', '_', s)
s = re.sub(r'^_+', '', s)
s = re.sub(r'_+$', '', s)
return s
# remove parentheses and brackets, for SBML & BiGG spec compatibility
new_id = re.sub(r'[^a-zA-Z0-9_]', '_', new_id)
# strip leading and trailing underscores
# new_id = re.sub(r'^_+', '', new_id)
# new_id = re.sub(r'_+$', '', new_id)
compartment_match = reg_compartment.match(new_id)
if compartment_match is None:
# still remove double underscores
new_id = _remove_d_underscore(new_id)
else:
base, compartment = compartment_match.groups()
chirality_match = reg_chirality.match(base)
if chirality_match is None:
new_id = _join_parts(_remove_d_underscore(base), compartment)
else:
new_base = '%s__%s' % (_remove_d_underscore(chirality_match.group(1)),
chirality_match.group(2))
new_id = _join_parts(new_base, compartment)
return new_id | 34c21ddfe20eb3e173c176763f6323d5cd4f3d3b | 3,631,786 |
def analysis_instance_start_success(instance_uuid, instance_name, records,
action=False, guest_hb=False):
"""
Analyze records and determine if instance is started
"""
always = True
possible_records \
= [(action, NFV_VIM.INSTANCE_NFVI_ACTION_START),
(always, NFV_VIM.INSTANCE_START_STATE),
(always, NFV_VIM.INSTANCE_START_CALLBACK),
(always, NFV_VIM.INSTANCE_START_STATE_INPROGRESS),
(always, NFV_VIM.INSTANCE_INITIAL_STATE)]
expected_records = list()
for allowed, data_type in possible_records:
if allowed:
expected_records.append(data_type)
return _analysis_instances_success(instance_uuid, instance_name, records,
expected_records) | effb9e52a48067160b6a468af6a35cc4a380070c | 3,631,787 |
import scipy
from typing import OrderedDict
def evaluate_on_semeval_2012_2(w):
"""
Simple method to score embedding using SimpleAnalogySolver
Parameters
----------
w : Embedding or dict
Embedding or dict instance.
Returns
-------
result: pandas.DataFrame
Results with spearman correlation per broad category with special key "all" for summary
spearman correlation
"""
if isinstance(w, dict):
w = Embedding.from_dict(w)
data = fetch_semeval_2012_2()
mean_vector = np.mean(w.vectors, axis=0, keepdims=True)
categories = data.y.keys()
results = defaultdict(list)
for c in categories:
# Get mean of left and right vector
prototypes = data.X_prot[c]
prot_left = np.mean(np.vstack(w.get(word, mean_vector) for word in prototypes[:, 0]), axis=0)
prot_right = np.mean(np.vstack(w.get(word, mean_vector) for word in prototypes[:, 1]), axis=0)
questions = data.X[c]
question_left, question_right = np.vstack(w.get(word, mean_vector) for word in questions[:, 0]), \
np.vstack(w.get(word, mean_vector) for word in questions[:, 1])
scores = np.dot(prot_left - prot_right, (question_left - question_right).T)
c_name = data.categories_names[c].split("_")[0]
# NaN happens when there are only 0s, which might happen for very rare words or
# very insufficient word vocabulary
cor = scipy.stats.spearmanr(scores, data.y[c]).correlation
results[c_name].append(0 if np.isnan(cor) else cor)
final_results = OrderedDict()
final_results['all'] = sum(sum(v) for v in results.values()) / len(categories)
for k in results:
final_results[k] = sum(results[k]) / len(results[k])
return pd.Series(final_results) | 5b6e6cee3a62af1aa5320ae7a549357194a2a334 | 3,631,788 |
from magichome import MagicHomeApi
def setup(hass, config):
"""Set up MagicHome Component."""
magichome = MagicHomeApi()
username = config[DOMAIN][CONF_USERNAME]
password = config[DOMAIN][CONF_PASSWORD]
company = config[DOMAIN][CONF_COMPANY]
platform = config[DOMAIN][CONF_PLATFORM]
hass.data[DATA_MAGICHOME] = magichome
magichome.init(username, password, company, platform)
hass.data[DOMAIN] = {"entities": {}}
def load_devices(device_list):
"""Load new devices by device_list."""
device_type_list = {}
for device in device_list:
dev_type = device.device_type()
if (
dev_type in MAGICHOME_TYPE_TO_HA
and device.object_id() not in hass.data[DOMAIN]["entities"]
):
ha_type = MAGICHOME_TYPE_TO_HA[dev_type]
if ha_type not in device_type_list:
device_type_list[ha_type] = []
device_type_list[ha_type].append(device.object_id())
hass.data[DOMAIN]["entities"][device.object_id()] = None
for ha_type, dev_ids in device_type_list.items():
discovery.load_platform(hass, ha_type, DOMAIN, {"dev_ids": dev_ids}, config)
device_list = magichome.get_all_devices()
load_devices(device_list)
def poll_devices_update(event_time):
"""Check if accesstoken is expired and pull device list from server."""
_LOGGER.debug("Pull devices from MagicHome.")
magichome.poll_devices_update()
# Add new discover device.
device_list = magichome.get_all_devices()
load_devices(device_list)
# Delete not exist device.
newlist_ids = []
for device in device_list:
newlist_ids.append(device.object_id())
for dev_id in list(hass.data[DOMAIN]["entities"]):
if dev_id not in newlist_ids:
dispatcher_send(hass, SIGNAL_DELETE_ENTITY, dev_id)
hass.data[DOMAIN]["entities"].pop(dev_id)
track_time_interval(hass, poll_devices_update, timedelta(minutes=5))
hass.services.register(DOMAIN, SERVICE_PULL_DEVICES, poll_devices_update)
def force_update(call):
"""Force all devices to pull data."""
dispatcher_send(hass, SIGNAL_UPDATE_ENTITY)
hass.services.register(DOMAIN, SERVICE_FORCE_UPDATE, force_update)
return True | bd2202684f875fa6f2619420de7a4ce95c78c332 | 3,631,789 |
def valid_lsi(addr):
"""Is the string a valid Local Scope Identifier?
>>> valid_lsi('1.0.0.1')
True
>>> valid_lsi('127.0.0.1')
False
>>> valid_lsi('1.0.1')
False
>>> valid_lsi('1.0.0.365')
False
>>> valid_lsi('1.foobar')
False
"""
parts = addr.split('.')
if not len(parts) == 4:
return False
if not int(parts[0]) == 1:
return False
in_range = all([0 <= int(x) < 256 for x in parts])
if not in_range:
return False
return True | 8a90547f239ea6d2a5aa971115c2015edc42932b | 3,631,790 |
def imshow_coocc(coocc, percent=True, ax=None):
"""visualize profile class co-occurrence matrix"""
ax = ax or plt.gca()
size = coocc.shape[0]
annot = (coocc*100).round().astype(int).values if percent else coocc.values
ax.imshow(coocc.T)
for x in range(size):
for y in range(size):
val = annot.T[x][y]
color = 'white' if percent and (val < 50) else 'black'
ax.annotate('{}'.format(val), xy=(y, x),
horizontalalignment='center',
verticalalignment='center', color=color)
ax.set_xticks(range(size))
ax.set_xticklabels(coocc.index)
ax.set_yticks(range(size))
ax.set_yticklabels(coocc.index)
return ax | 45f259dd2702714793be2dddccc1fdc8d77de55e | 3,631,791 |
def sub_m(D, C_lasso, C_group, C_ridge, eta=1e0):
"""Solve the Sub_m subproblem."""
return shrink(D, C_lasso * eta, C_group * eta, C_ridge * eta) | 7ed14e2f455e1af3645fdc4557ee5b7b4668a7b0 | 3,631,792 |
def grade(morse_code, inputs):
"""Grades how well the `inputs` represents the expected `morse_code`.
Returns a tuple with three elements. The first is a Boolean telling if we
consider the input good enough (this is the pass/fail evaluation). The next
two elements are strings to be show, respectively, in the top and bottom
lines of the display to give as feedback.
"""
# Did we get the right number of dits and dahs?
expected_inputs = len(morse_code) * 2 - 1
if len(inputs) > expected_inputs:
return (False, " Not good! ", " Extra dit-dahs ")
if len(inputs) < expected_inputs:
return (False, " Not good! ", "Too few dit-dahs")
# Check the sequence of dits and dahs. Don't be too critical about timing
# for now: simply require that every dit is shorter than every dah.
dit_lengths = [ ]
longest_dit = 0.0
dah_lengths = [ ]
shortest_dah = float("inf")
i = 0
for c in morse_code:
press_length = inputs[i*2]
if c == ".":
if press_length > longest_dit:
longest_dit = press_length
dit_lengths.append(inputs[i*2])
else:
if press_length < shortest_dah:
shortest_dah = press_length
dah_lengths.append(inputs[i*2])
if len(dit_lengths) > 0 and len(dah_lengths) > 0 and shortest_dah <= longest_dit:
return (False, "Not Good! Wrong", "dit-dah sequence")
# For the purposes of timing, spaces count as dits
if i < len(morse_code)-1:
dit_lengths.append(inputs[i*2 + 1])
i += 1
# Now check the dits and dahs lengths more carefully
time_unit = (sum(dit_lengths) + sum(dah_lengths)) / (len(dit_lengths) + 3*len(dah_lengths))
i = 0
worst_prec = 1.0
while i < len(inputs):
prec = 0.0
# Check the character
if morse_code[i//2] == ".":
prec = inputs[i] / time_unit
else:
prec = inputs[i] / (3 * time_unit)
if prec > 1.0:
prec = 1.0 / prec
if prec < worst_prec:
worst_prec = prec
# Check the space
if i+1 >= len(inputs):
break
prec = inputs[i+1] / time_unit
if prec > 1.0:
prec = 1.0 / prec
if prec < worst_prec:
worst_prec = prec
i += 2
if worst_prec < 0.35:
return (False, "Not good! Bad", "dit-dahs timing.")
elif worst_prec < 0.55:
return (True, "Good! Can better", "dit-dahs timing.")
elif worst_prec < 0.75:
return (True, "Good!", "Almost perfect!")
else:
return (True, "Great!", "Nailed it!") | 43038fa81ff9a5d39d337b38b5afed1c3ca57e4d | 3,631,793 |
def create_test_endpoint(client, ec2_client, name=None, tags=None):
"""Create an endpoint that can be used for testing purposes.
Can't be used for unit tests that need to know/test the arguments.
"""
if not tags:
tags = []
random_num = get_random_hex(10)
subnet_ids = create_subnets(ec2_client, create_vpc(ec2_client))
resolver_endpoint = client.create_resolver_endpoint(
CreatorRequestId=random_num,
Name=name if name else "X" + random_num,
SecurityGroupIds=[create_security_group(ec2_client)],
Direction="INBOUND",
IpAddresses=[
{"SubnetId": subnet_ids[0], "Ip": "10.0.1.200"},
{"SubnetId": subnet_ids[1], "Ip": "10.0.0.20"},
],
Tags=tags,
)
return resolver_endpoint["ResolverEndpoint"] | 8c266beec2d49d5139e08b42aad0df9a9e8bd400 | 3,631,794 |
def plot_clusters(estimator, X, chart=None, fig=None, axes=None,
n_rows=None, n_cols=None,
sample_labels=None, cluster_colors=None,
cluster_labels=None, center_colors=None,
center_labels=None,
center_width=3,
colormap=plt.cm.get_cmap('rainbow')):
"""Plot of the FDataGrid samples by clusters.
The clusters are calculated with the estimator passed as a parameter. If
the estimator is not fitted, the fit method is called.
Once each sample is assigned a label the plotting can be done.
Each group is assigned a color described in a leglend.
Args:
estimator (BaseEstimator object): estimator used to calculate the
clusters.
X (FDataGrd object): contains the samples which are grouped
into different clusters.
fig (figure object): figure over which the graphs are plotted in
case ax is not specified. If None and ax is also None, the figure
is initialized.
axes (list of axis objects): axis over where the graphs are plotted.
If None, see param fig.
n_rows (int): designates the number of rows of the figure to plot the
different dimensions of the image. Only specified if fig and
ax are None.
n_cols (int): designates the number of columns of the figure to plot
the different dimensions of the image. Only specified if fig
and ax are None.
sample_labels (list of str): contains in order the labels of each
sample of the fdatagrid.
cluster_colors (list of colors): contains in order the colors of each
cluster the samples of the fdatagrid are classified into.
cluster_labels (list of str): contains in order the names of each
cluster the samples of the fdatagrid are classified into.
center_colors (list of colors): contains in order the colors of each
centroid of the clusters the samples of the fdatagrid are
classified into.
center_labels (list of colors): contains in order the labels of each
centroid of the clusters the samples of the fdatagrid are
classified into.
center_width (int): width of the centroid curves.
colormap(colormap): colormap from which the colors of the plot are
taken. Defaults to `rainbow`.
Returns:
(tuple): tuple containing:
fig (figure object): figure object in which the graphs are plotted
in case ax is None.
ax (axes object): axes in which the graphs are plotted.
"""
_check_if_estimator(estimator)
try:
check_is_fitted(estimator)
estimator._check_test_data(X)
except NotFittedError:
estimator.fit(X)
if isinstance(estimator, FuzzyCMeans):
labels = np.argmax(estimator.labels_, axis=1)
else:
labels = estimator.labels_
return _plot_clusters(estimator=estimator, fdata=X,
fig=fig, axes=axes, n_rows=n_rows, n_cols=n_cols,
labels=labels, sample_labels=sample_labels,
cluster_colors=cluster_colors,
cluster_labels=cluster_labels,
center_colors=center_colors,
center_labels=center_labels,
center_width=center_width,
colormap=colormap) | 59fa26e28233815335e5377c4523e35c9b21e22e | 3,631,795 |
import requests
def head(url):
"""
Make a HEAD request to the URL. If we do not get a 404 then the URL is
valid. If there are any exceptions then return False.
"""
try:
resp = s.head(url, timeout=5, verify=False)
except requests.exceptions.RequestException:
return False
if resp.status_code in [301, 302]:
print('[+] Found {0} --> {1} ({2})'.format(url, resp.headers['location'], resp.status_code))
return url
elif resp.status_code != 404:
print('[+] Found {0} ({1})'.format(url, resp.status_code))
return url
else:
return None | e602cc7ad498cacbe7defc955464b701c316d8db | 3,631,796 |
import pathlib
def long_description():
"""Reads the README file"""
with open(pathlib.Path(WORKING_DIRECTORY, "README.rst")) as stream:
return stream.read() | 1573b8c5dba81e1f7e345b77a49085d0066c4dca | 3,631,797 |
import csv
import io
def load_embeddings(embeddings_path, aws=False):
"""Loads pre-trained word embeddings from tsv file.
Args:
embeddings_path - path to the embeddings file.
Returns:
embeddings - dict mapping words to vectors;
dim - dimension of the vectors.
"""
if aws:
embeddings = {}
reader = csv.reader(io.StringIO(embeddings_path.read().decode("utf-8")), delimiter="\t")
for line in reader:
word = line[0]
embedding = np.array(line[1:]).astype(np.float32)
embeddings[word] = embedding
dim = len(line) - 1
else:
embeddings = {}
with open(embeddings_path, newline='') as embedding_file:
reader = csv.reader(embedding_file, delimiter='\t')
for line in reader:
word = line[0]
embedding = np.array(line[1:]).astype(np.float32)
embeddings[word] = embedding
dim = len(line) - 1
return embeddings, dim | c2879b05f9110f64aacbc8253e1d095e05c16ee7 | 3,631,798 |
from typing import Dict
from typing import Any
import json
import requests
async def create_check_request(
installation_url: str, repo_name: str, token: str, check_name: str, head_sha: str
) -> Dict[str, Any]:
"""
Initiate Check after Pull Request was created.
It contains terminal hash from PR, name, unique github id.
"""
org_name = installation_url.rstrip("/").split("/")[-1]
url = f" https://api.github.com/repos/{org_name}/{repo_name}/check-runs"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"token {token}",
}
data = {
"name": check_name,
"head_sha": head_sha,
}
data_str = json.dumps(data)
try:
r = requests.post(url, data=data_str, headers=headers, timeout=2)
r.raise_for_status()
response_body = r.json()
except Exception as e:
logger.error(repr(e))
raise GitHubAPIFailed("Error due creating check requests via GitHub API")
return response_body | 5afa8efeb6a438520f75cbb0fa486c34102f94b5 | 3,631,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.