content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def list_project_milestones(request):
"""
list project specific milestones
"""
project_id = request.GET.get('project_id')
project = Project.objects.get(id=project_id)
template = loader.get_template('project_management/list_project_milestones.html')
open_status = Status.objects.get(name="Open")
if Milestone.objects.filter(project_id=project.id, status=open_status).exists():
open_milestones = Milestone.objects.filter(project_id=project.id, status=open_status)
open_count = Milestone.objects.filter(project_id=project.id, status=open_status).count()
else:
open_milestones = ""
open_count = 0
onhold_status = Status.objects.get(name="Onhold")
if Milestone.objects.filter(project_id=project.id, status=onhold_status).exists():
onhold_count = Milestone.objects.filter(project_id=project.id, status=onhold_status).count()
else:
onhold_count = 0
terminated_status = Status.objects.get(name="Terminated")
if Milestone.objects.filter(project_id=project.id, status=terminated_status).exists():
terminated_count = Milestone.objects.filter(project_id=project.id, status=terminated_status).count()
else:
terminated_count = 0
completed_status = Status.objects.get(name="Completed")
if Milestone.objects.filter(project_id=project.id, status=completed_status).exists():
completed_count = Milestone.objects.filter(project_id=project.id, status=completed_status).count()
else:
completed_count = 0
context = {
'project_id': project.id,
'project_name': project.name,
'open_milestones': open_milestones,
'completed_count': completed_count,
'onhold_count': onhold_count,
'terminated_count': terminated_count,
'open_count': open_count
}
return HttpResponse(template.render(context, request)) | 5,327,600 |
def build_update_fn(loss_fn, *, scan_mode: bool = False):
"""Build a simple update function.
*Note*: The output of ``loss_fn`` must be ``(loss, (aux, model))``.
Arguments:
loss_fn: The loss function.
scan_mode: If true, use `(model, optimizer)` as a single argument.
Example:
>>> def mse_loss(model, x, y):
... y_hat = model(x)
... loss = jnp.mean(jnp.square(y - y_hat))
... return loss, (loss, model)
...
>>> update_fn = pax.utils.build_update_fn(mse_loss)
>>> net = pax.Linear(2, 2)
>>> optimizer = opax.adam(1e-4)(net.parameters())
>>> x = jnp.ones((32, 2))
>>> y = jnp.zeros((32, 2))
>>> net, optimizer, loss = update_fn(net, optimizer, x, y)
"""
# pylint: disable=import-outside-toplevel
from opax import apply_updates, transform_gradients
def _update_fn(model: T, optimizer: O, *inputs, **kwinputs) -> Tuple[T, O, Any]:
"""An update function.
Note that: ``model`` and ``optimizer`` have internal states.
We have to return them in the output as jax transformations
(e.g., ``jax.grad`` and ``jax.jit``) requires pure functions.
Arguments:
model_and_optimizer: (a callable pax.Module, an optimizer),
inputs: input batch.
Returns:
model_and_optimizer: updated (model, optimizer),
aux: the aux info.
"""
assert isinstance(model, Module)
assert isinstance(optimizer, Module)
model_treedef = jax.tree_structure(model)
grads, (aux, model) = grad(loss_fn, has_aux=True)(model, *inputs, **kwinputs)
if jax.tree_structure(model) != model_treedef:
raise ValueError("Expecting an updated model in the auxiliary output.")
params = select_parameters(model)
updates, optimizer = transform_gradients(grads, optimizer, params=params)
params = apply_updates(params, updates=updates)
model = update_parameters(model, params=params)
return model, optimizer, aux
def _update_fn_scan(
model_and_optimizer: Union[C, Tuple[T, O]], *inputs, **kwinputs
) -> Tuple[C, Any]:
model, optimizer = model_and_optimizer
model, optimizer, aux = _update_fn(model, optimizer, *inputs, **kwinputs)
return (model, optimizer), aux
return _update_fn_scan if scan_mode else _update_fn | 5,327,601 |
def plot_model_fits(fit_ds, plot_models='all', plot_total=True, background_models=[],
overlay_data=True, pts_per_plot=200, show_legend=True,
selections=None, omissions=None, ranges=None, **kwargs):
"""
Plots individual models
Parameters
----------
plot_models : str or list of str
names of models to plot
plot_total : bool
wheter to also plot the sum of all of the models
background_models : str or list of str
models to use as a background, being added to each of the individual
models to be plotted
overlay_data : bool
whether to overlay the raw data
pts_per_plot : int
Number of points to use in the fit curves
show_legend : bool
whether to show the legend on the plot
"""
xlabel = fit_ds.xda.name if fit_ds.xda.name is not None else fit_ds.attrs['xname']
# check that all models are valid
if plot_models == 'all':
plot_models = list(x for x in fit_ds.attrs['models'].keys() if x not in background_models)
elif isinstance(plot_models, str):
plot_models = [plot_models]
if isinstance(background_models, str):
background_models = [background_models]
for m in (plot_models+background_models):
if m not in fit_ds.attrs['models']:
raise ValueError(f"{m} is not a model of this system! Included models are: {fit_ds.attrs['models'].keys()}")
bg_models = [fit_ds.attrs['models'][mod] for mod in background_models]
fg_models = [fit_ds.attrs['models'][mod] for mod in plot_models]
selections = {} if selections is None else selections
omissions = {} if omissions is None else omissions
ranges = {} if ranges is None else ranges
for kw in kwargs:
if kw in fit_ds.yda.dims:
selections[kw] = kwargs[kw]
xselections = {dim: sel for dim, sel in selections.items() if dim in fit_ds.xda.dims}
xomissions = {dim: sel for dim, sel in omissions.items() if dim in fit_ds.xda.dims}
xranges = {dim: sel for dim, sel in ranges.items() if dim in fit_ds.xda.dims}
yselections = {dim: sel for dim, sel in selections.items() if dim in fit_ds.yda.dims}
yomissions = {dim: sel for dim, sel in omissions.items() if dim in fit_ds.yda.dims}
yranges = {dim: sel for dim, sel in ranges.items() if dim in fit_ds.yda.dims}
pselections = {dim: sel for dim, sel in selections.items() if dim in fit_ds.popt.dims and dim != "param"}
pomissions = {dim: sel for dim, sel in omissions.items() if dim in fit_ds.popt.dims and dim != "param"}
pranges = {dim: sel for dim, sel in ranges.items() if dim in fit_ds.popt.dims and dim != "param"}
xda = da_filter(fit_ds.xda, selections=xselections, omissions=xomissions, ranges=xranges)
yda = da_filter(fit_ds.yda, selections=yselections, omissions=yomissions, ranges=yranges)
pda = da_filter(fit_ds.popt, selections=pselections, omissions=pomissions, ranges=pranges)
if not np.all(np.isnan(fit_ds.yerr_da)):
yeda = da_filter(fit_ds.yerr_da, selections=yselections, omissions=yomissions, ranges=yranges)
else:
yeda = None
combo_dims, coord_combos = gen_coord_combo(yda, fit_ds.attrs['xname'])
# dimensions to show in the title
dims_with_many_values = [dim for dim in pda.dims if len(pda[dim])>1 and dim != "param"]
# Determine which kwargs can be passed to plot
if np.all(np.isnan(fit_ds.yerr_da)):
plot_argspec = getfullargspec(Line2D)
plot_kwargs = {k: v for k, v in kwargs.items() if k in plot_argspec.args}
else:
ebar_argspec = getfullargspec(plt.errorbar)
plot_kwargs = {k: v for k, v in kwargs.items() if k in ebar_argspec.args}
for combo in coord_combos:
selection_dict = dict(zip(combo_dims, combo))
xselection_dict = {k: v for k, v in selection_dict.items() if k in xda.dims}
all_params = {pn : float(pda.sel(**selection_dict, param=pn).values) for pn in fit_ds.attrs['param_names']}
data_dom = xda.sel(xselection_dict).values.copy()
fit_dom = np.linspace(data_dom.min(), data_dom.max(), pts_per_plot)
if np.all(np.isnan(list(all_params.values()))):
continue
background = np.zeros(fit_dom.size)
for m in bg_models:
bgparams = [all_params[p.name] for p in m.params]
background += m(fit_dom, *bgparams)
if overlay_data:
data_range = yda.sel(selection_dict).values.copy()
if yeda is not None:
yerr = yeda.sel(selection_dict).values.copy()
plt.errorbar(data_dom, data_range, yerr, **plot_kwargs)
else:
plt.plot(data_dom, data_range, **plot_kwargs)
if plot_total:
modparams = pda.sel(selection_dict).values
plt.plot(fit_dom, fit_ds.attrs['fit_func'](fit_dom, *modparams), label='total')
for m in fg_models:
modparams = [all_params[p.name] for p in m.params]
plt.plot(fit_dom, m(fit_dom, *modparams) + background, label=m.name)
# add labels and make the title reflect the current selection
plt.xlabel(xlabel)
if fit_ds.attrs['yname'] is not None:
plt.ylabel(fit_ds.attrs['yname'])
title_str = ''
for dim in dims_with_many_values:
title_str += f'{dim}: {selection_dict[dim]}, '
try:
plt.title(title_str[:-2]) # get rid of trailing comma and space
except:
plt.title('')
# add legend if requested
if show_legend:
plt.legend()
plt.show() | 5,327,602 |
def _get_perf_hint(hint, index: int, _default=None):
"""
Extracts a "performance hint" value -- specified as either a scalar or 2-tuple -- for
either the left or right Dataset in a merge.
Parameters
----------
hint : scalar or 2-tuple of scalars, optional
index : int
Indicates whether the hint value is being extracted for the left or right Dataset.
0 = left, 1 = right.
_default : optional
Optional default value, returned if `hint` is None.
Returns
-------
Any
The extracted performance hint value.
"""
if hint is None:
return _default
elif isinstance(hint, tuple):
return hint[index]
else:
return hint | 5,327,603 |
def synchronized_limit(lock):
"""
Synchronization decorator; provide thread-safe locking on a function
http://code.activestate.com/recipes/465057/
"""
def wrap(f):
def synchronize(*args, **kw):
if lock[1] < 10:
lock[1] += 1
lock[0].acquire()
try:
return f(*args, **kw)
finally:
lock[1] -= 1
lock[0].release()
else:
raise Exception('Too busy')
return synchronize
return wrap | 5,327,604 |
def fast_warp(img, tf, output_shape=(50, 50), mode='constant', order=1):
"""
This wrapper function is faster than skimage.transform.warp
"""
m = tf._matrix
res = np.zeros(shape=(output_shape[0], output_shape[1], 3), dtype=floatX)
from scipy.ndimage import affine_transform
trans, offset = m[:2, :2], (m[0, 2], m[1, 2])
res[:, :, 0] = affine_transform(img[:, :, 0].T, trans, offset=offset, output_shape=output_shape, mode=mode, order=order)
res[:, :, 1] = affine_transform(img[:, :, 1].T, trans, offset=offset, output_shape=output_shape, mode=mode, order=order)
res[:, :, 2] = affine_transform(img[:, :, 2].T, trans, offset=offset, output_shape=output_shape, mode=mode, order=order)
return res | 5,327,605 |
def _get_detections(generator, model, score_threshold=0.05, max_detections=400, save_path=None):
""" Get the detections from the model using the generator.
The result is a list of lists such that the size is:
all_detections[num_images][num_classes] = detections[num_detections, 4 + num_classes]
# Arguments
generator : The generator used to run images through the model.
model : The model to run on the images.
score_threshold : The score confidence threshold to use.
max_detections : The maximum number of detections to use per image.
save_path : The path to save the images with visualized detections to.
# Returns
A list of lists containing the detections for each image in the generator.
"""
all_detections = [[None for i in range(generator.num_classes()) if generator.has_label(i)] for j in range(generator.size())]
for i in progressbar.progressbar(range(generator.size()), prefix='Running network: '):
raw_image = generator.load_image(i)
image = generator.preprocess_image(raw_image.copy())
image, scale = generator.resize_image(image)
if keras.backend.image_data_format() == 'channels_first':
image = image.transpose((2, 0, 1))
# run network
boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))[:3]
# correct boxes for image scale
boxes /= scale
# select indices which have a score above the threshold
indices = np.where(scores[0, :] > score_threshold)[0]
# select those scores
scores = scores[0][indices]
# find the order with which to sort the scores
scores_sort = np.argsort(-scores)[:max_detections]
# select detections
image_boxes = boxes[0, indices[scores_sort], :]
image_scores = scores[scores_sort]
image_labels = labels[0, indices[scores_sort]]
image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1)
# if save_path is not None:
# draw_annotations(raw_image, generator.load_annotations(i), label_to_name=generator.label_to_name)
# draw_detections(raw_image, image_boxes, image_scores, image_labels, label_to_name=generator.label_to_name)
#
# cv2.imwrite(os.path.join(save_path, '{}.png'.format(i)), raw_image)
# copy detections to all_detections
for label in range(generator.num_classes()):
if not generator.has_label(label):
continue
all_detections[i][label] = image_detections[image_detections[:, -1] == label, :-1]
return all_detections | 5,327,606 |
def LikeArticle(browser):
"""
Like the article that has already been navigated to.
browser: selenium driver used to interact with the page.
"""
likeButtonXPath = '//div[@data-source="post_actions_footer"]/button'
numLikes = 0
try:
numLikesElement = browser.find_element_by_xpath(likeButtonXPath+"/following-sibling::button")
numLikes = int(numLikesElement.text)
except:
pass
try:
likeButton = browser.find_element_by_xpath(likeButtonXPath)
buttonStatus = likeButton.get_attribute("data-action")
if likeButton.is_displayed() and buttonStatus == "upvote":
if numLikes < MAX_LIKES_ON_POST:
if VERBOSE:
print 'Liking the article : \"'+browser.title+'\"'
likeButton.click()
elif VERBOSE:
print 'Article \"'+browser.title+'\" has more likes than your threshold.'
elif VERBOSE:
print 'Article \"'+browser.title+'\" is already liked.'
except:
if VERBOSE:
print 'Exception thrown when trying to like the article: '+browser.current_url
pass | 5,327,607 |
def BF (mu, s2, noise_var=None, pps=None):
"""
Buzzi-Ferraris et al.'s design criterion.
- Buzzi-Ferraris and Forzatti (1983)
Sequential experimental design for model discrimination
in the case of multiple responses.
Chem. Eng. Sci. 39(1):81-85
- Buzzi-Ferraris et al. (1984)
Sequential experimental design for model discrimination
in the case of multiple responses.
Chem. Eng. Sci. 39(1):81-85
- Buzzi-Ferraris et al. (1990)
An improved version of sequential design criterion for
discrimination among rival multiresponse models.
Chem. Eng. Sci. 45(2):477-481
"""
mu, s2, noise_var, _, n, M, _ = _reshape(mu, s2, noise_var, None)
s2 += noise_var
dc = np.zeros(n)
for i in range(M-1):
for j in range(i+1,M):
iSij = np.linalg.inv(s2[:,i] + s2[:,j])
t1 = np.trace( np.matmul(noise_var, iSij), axis1=1, axis2=2 )
r1 = np.expand_dims(mu[:,i] - mu[:,j],2)
t2 = np.sum( r1 * np.matmul(iSij, r1), axis=(1,2) )
dc += t1 + t2
return dc | 5,327,608 |
def do_cg_delete(cs, args):
"""Remove one or more consistency groups."""
failure_count = 0
kwargs = {}
if args.force is not None:
kwargs['force'] = args.force
for consistency_group in args.consistency_group:
try:
cg_ref = _find_consistency_group(cs, consistency_group)
cs.consistency_groups.delete(cg_ref, **kwargs)
except Exception as e:
failure_count += 1
print("Delete for consistency group %s failed: %s" % (
consistency_group, e), file=sys.stderr)
if failure_count == len(args.consistency_group):
raise exceptions.CommandError("Unable to delete any of the specified "
"consistency groups.") | 5,327,609 |
def deploy(usr, pwd, path=getcwd(), venv=None):
"""release on `pypi.org`"""
log(INFO, ICONS["deploy"] + 'deploy release on `pypi.org`')
# check dist
module('twine', 'check --strict dist/*', path=path, venv=venv)
# push to pypi.org
return module("twine", "upload -u %s -p %s dist/*" % (usr, pwd),
level=LEVEL, path=path, venv=venv) | 5,327,610 |
def process_files(userglob):
"""Accepts a string that is used as a file glob. Each file is opened
and processed through extract_data(). Implemented as a generator
and each output is a tuple with the frame number prepending the output
from extract_data().
"""
# get the specified file list
images = glob(userglob)
images.sort()
frame = 0
for pic in images:
tmp = Image.open(pic)
data = extract_data(tmp)
data = (frame, data[0], data[1], data[2], data[3])
frame += 1
yield data | 5,327,611 |
def most_interval_scheduling(interval_list):
"""
最多区间调度:优先选择'end'值小的区间
Args:
interval_list(list): 区间列表
Returns:
scheduling_list(list): 去重实体列表
"""
scheduling_list = list()
sorted_interval_list = sorted(interval_list,
key=lambda x: x['end'])
size = len(sorted_interval_list)
scheduling_list.append(sorted_interval_list[0])
for i in range(1, size):
if scheduling_list[-1]['end'] <= sorted_interval_list[i]['start']:
scheduling_list.append(sorted_interval_list[i])
return scheduling_list | 5,327,612 |
def kernel_program(inputfile, dimData, Materials, dict_nset_data, \
dict_elset_matID={}, dict_elset_dload={}):
"""The kernel_program should be called by the job script (e.g., Job-1.py)
where the user defines:
- inputfile: the name of the input file
- dimData: the dimensional data (see class dimension_data)
- Materials: the list of materials used in the analysis (see package Elements)
- dict_nset_data: the dictionary of nset_data (for bcds and concentrated loads)
where the keys are nset names read from inputfile and values are nset_data as
defined in the class nset_data
and optionally:
- dict_elset_matID: a dictionary where each key is an elset name defined in
inputfile, and its value is the corresponding index of material in the Materials
list for elements in this elset. This dictionary needs to be defined when
multiple materials/material sections are present in the model
- dict_elset_dload: a dictionary where each key is an elset name defined in
inputfile, and its value is the corresponding dload_data (see class dload_data)
for all elements in this elset, meaning that these elements are subjected to
the distributed loading defined by this dload_data. This is needed when
distributed loading is present in the model"""
###########################################################################
# Preprocessing
###########################################################################
# Read data from Abaqus input file and form abaqus parts
parts = read_abaqus.read_parts_from_inputfile(inputfile)
# check if there is only one part
# in the future, consider making a loop over all parts
if(not len(parts)==1):
raise ValueError('Only a single part is supported!')
# verification of dimensional parameters before proceeding
verify_dimensional_parameters(parts[0], dimData)
# form lists of nodes and elem_lists (eltype and elem indices of this type)
nodes = form_nodes(parts[0])
elem_lists = form_elem_lists(parts[0], dimData.NDOF_NODE, dimData.ELEM_TYPES,\
dict_elset_matID)
# form lists of bcds and cloads
[bcd_dofs, bcd_values, cload_dofs, cload_values] = \
form_bcds_cloads(parts[0], dict_nset_data, dimData.NDOF_NODE)
# form lists of elset for distributed loads
list_dload_data = form_list_dload_data(parts[0], dict_elset_dload)
###########################################################################
# Assembler
# obtain the full stiffness matrix K and external distributed force vector f
###########################################################################
# form the list of all the elems for assembly
elems = []
for elist in elem_lists:
# verify material type before assembly
for elem in elist.elems:
elem.verify_material(Materials[elem.matID])
elems.extend(elist.elems)
# call assembler
[K, f] = assembler(nodes, elems, dimData.NDOF_NODE, Materials, list_dload_data)
###########################################################################
# Solver
# modify the stiffness matrix and force vector based on applied bcds and loads
# obtain dof vector a and reaction force vector RF, both size ndof by 1
###########################################################################
[a, RF] = solver(K, f, bcd_dofs, bcd_values, cload_dofs, cload_values)
return [parts, nodes, elem_lists, f, a, RF] | 5,327,613 |
def filter_fasta(infa, outfa, regex=".*", v=False, force=False):
"""Filter fasta file based on regex.
Parameters
----------
infa : str
Filename of input fasta file.
outfa : str
Filename of output fasta file. Cannot be the same as infa.
regex : str, optional
Regular expression used for selecting sequences.
v : bool, optional
If set to True, select all sequence *not* matching regex.
force : bool, optional
If set to True, overwrite outfa if it already exists.
Returns
-------
fasta : Fasta instance
pyfaidx Fasta instance of newly created file
"""
if infa == outfa:
raise ValueError("Input and output FASTA are the same file.")
if os.path.exists(outfa):
if force:
os.unlink(outfa)
if os.path.exists(outfa + ".fai"):
os.unlink(outfa + ".fai")
else:
raise ValueError(
"{} already exists, set force to True to overwrite".format(outfa))
filt_function = re.compile(regex).search
fa = Fasta(infa, filt_function=filt_function)
seqs = fa.keys()
if v:
original_fa = Fasta(infa)
seqs = [s for s in original_fa.keys() if s not in seqs]
fa = original_fa
if len(seqs) == 0:
raise ValueError("No sequences left after filtering!")
with open(outfa, "w") as out:
for chrom in seqs:
out.write(">{}\n".format(fa[chrom].name))
out.write("{}\n".format(fa[chrom][:].seq))
return Fasta(outfa) | 5,327,614 |
def test_list_byte_enumeration_1_nistxml_sv_iv_list_byte_enumeration_2_3(mode, save_output, output_format):
"""
Type list/byte is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/byte/Schema+Instance/NISTSchema-SV-IV-list-byte-enumeration-2.xsd",
instance="nistData/list/byte/Schema+Instance/NISTXML-SV-IV-list-byte-enumeration-2-3.xml",
class_name="NistschemaSvIvListByteEnumeration2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,327,615 |
def rmse_metric(predicted: Collection,
actual: Collection) -> float:
"""
Root-mean-square error metric.
Args:
predicted (list): prediction values.
actual (list): reference values.
Returns:
root-mean-square-error metric.
"""
return np.sqrt(np.mean(np.subtract(predicted, actual) ** 2)) | 5,327,616 |
def set_user_data(session_id, user_data):
"""
this function temporarily stores data transmitted by user, when POSTed
by the user device; the data is then picked up by the page ajax
polling mechanism
"""
return session_id if red.set(K_USER_DATA.format(session_id), json.dumps(user_data), ex=30) else None | 5,327,617 |
def find_keys(info: dict) -> dict:
"""Determines all the keys and their parent keys.
"""
avail_keys = {}
def if_dict(dct: dict, prev_key: str):
for key in dct.keys():
if key not in avail_keys:
avail_keys[key] = prev_key
if type(dct[key]) == dict:
if_dict(dct[key], key + '[].')
elif type(dct[key]) == list:
for item in dct[key]:
if type(item) == dict:
if_dict(item, key + '[].')
if_dict(info, '')
# print(avail_keys)
return avail_keys | 5,327,618 |
def valid(exc, cur1, cur2=None, exclude=None, exclude_cur=None):
"""
Find if the given exc satisfies currency 1
(currency 2) (and is not exclude) (and currency is not exclude)
"""
if exclude is not None and exc == exclude:
return False
curs = [exc.to_currency, exc.from_currency]
if exclude_cur is not None and exclude_cur in curs:
return False
if cur2 is not None:
return cur1 in curs and cur2 in curs
return cur1 in curs | 5,327,619 |
def custom(value):
"""Parse custom parameters
value: string containing custom parameters"""
files = []
if os.path.isfile(value):
files.append(value)
else:
files += value.split(",")
for filename in files:
customs = {}
if os.path.isfile(filename):
execfile(filename, customs, customs)
for name, val in customs.iteritems():
setCustom(name, val)
else:
print "Could not find custom file", filename | 5,327,620 |
def arc(
x: float, y: float, radius: float, start: float, stop: float, quantization: float = 0.1,
) -> np.ndarray:
"""Build a circular arc path. Zero angles refer to east of unit circle and positive values
extend counter-clockwise.
Args:
x: center X coordinate
y: center Y coordinate
radius: circle radius
start: start angle (degree)
stop: stop angle (degree)
quantization: maximum length of linear segment
Returns:
arc path
"""
def normalize_angle(a):
while a > 360:
a -= 360
while a < 0:
a += 360
return a
start = normalize_angle(start)
stop = normalize_angle(stop)
if stop < start:
stop += 360
elif start == stop:
raise ValueError("start and stop angles must have different values")
n = math.ceil((stop - start) / 180 * math.pi * radius / quantization)
angle = np.linspace(start, stop, n)
angle[angle == 360] = 0
angle *= math.pi / 180
return radius * (np.cos(-angle) + 1j * np.sin(-angle)) + complex(x, y) | 5,327,621 |
def prepare_watermark(path_mark, inver_style):
"""
reads the image in greyscale, create an alpha mask according to the scale
invert_style determines whether the key info letters/shapes are darker or lighter than the rest
""" | 5,327,622 |
def build_feature_columns(schema):
"""Build feature columns as input to the model."""
# non-numeric columns
exclude = ['customer_id', 'brand', 'promo_sensitive', 'weight', 'label']
# numeric feature columns
numeric_column_names = [col for col in schema.names if col not in exclude]
numeric_columns = [
tf.feature_column.numeric_column(col) for col in numeric_column_names
]
# identity column
identity_column = tf.feature_column.categorical_column_with_identity(
key='promo_sensitive', num_buckets=2)
# DNNClassifier only accepts dense columns
indicator_column = tf.feature_column.indicator_column(identity_column)
# numeric weight column
weight_column = tf.feature_column.numeric_column('weight')
feature_columns = numeric_columns + [indicator_column]
return feature_columns, weight_column | 5,327,623 |
def registerSampleData():
"""
Add data sets to Sample Data module.
"""
# It is always recommended to provide sample data for users to make it easy to try the module,
# but if no sample data is available then this method (and associated startupCompeted signal connection) can be removed.
import SampleData
iconsPath = os.path.join(os.path.dirname(__file__), 'Resources/Icons')
# To ensure that the source code repository remains small (can be downloaded and installed quickly)
# it is recommended to store data sets that are larger than a few MB in a Github release.
# WaveModeling1
SampleData.SampleDataLogic.registerCustomSampleDataSource(
# Category and sample name displayed in Sample Data module
category='WaveModeling',
sampleName='WaveModeling1',
# Thumbnail should have size of approximately 260x280 pixels and stored in Resources/Icons folder.
# It can be created by Screen Capture module, "Capture all views" option enabled, "Number of images" set to "Single".
thumbnailFileName=os.path.join(iconsPath, 'WaveModeling1.png'),
# Download URL and target file name
uris="https://github.com/Slicer/SlicerTestingData/releases/download/SHA256/998cb522173839c78657f4bc0ea907cea09fd04e44601f17c82ea27927937b95",
fileNames='WaveModeling1.nrrd',
# Checksum to ensure file integrity. Can be computed by this command:
# import hashlib; print(hashlib.sha256(open(filename, "rb").read()).hexdigest())
checksums = 'SHA256:998cb522173839c78657f4bc0ea907cea09fd04e44601f17c82ea27927937b95',
# This node name will be used when the data set is loaded
nodeNames='WaveModeling1'
)
# WaveModeling2
SampleData.SampleDataLogic.registerCustomSampleDataSource(
# Category and sample name displayed in Sample Data module
category='WaveModeling',
sampleName='WaveModeling2',
thumbnailFileName=os.path.join(iconsPath, 'WaveModeling2.png'),
# Download URL and target file name
uris="https://github.com/Slicer/SlicerTestingData/releases/download/SHA256/1a64f3f422eb3d1c9b093d1a18da354b13bcf307907c66317e2463ee530b7a97",
fileNames='WaveModeling2.nrrd',
checksums = 'SHA256:1a64f3f422eb3d1c9b093d1a18da354b13bcf307907c66317e2463ee530b7a97',
# This node name will be used when the data set is loaded
nodeNames='WaveModeling2'
) | 5,327,624 |
def get_mol_func(smiles_type):
"""
Returns a function pointer that converts a given SMILES type to a mol object.
:param smiles_type: The SMILES type to convert VALUES=(deepsmiles.*, smiles, scaffold).
:return : A function pointer.
"""
if smiles_type.startswith("deepsmiles"):
_, deepsmiles_type = smiles_type.split(".")
return lambda deepsmi: to_mol(from_deepsmiles(deepsmi, converter=deepsmiles_type))
else:
return to_mol | 5,327,625 |
def index():
"""Return to the homepage."""
return render_template("index.html") | 5,327,626 |
def species_thermo_value(spc_dct):
""" species enthalpy at 298
"""
return spc_dct['H298'] | 5,327,627 |
def save_solution_1d(iMax, dX, U, OutFile):
"""Save the 1d solution to a specified file."""
print(f"IMAX= {iMax}", file=OutFile)
print(f"Grid Step= {dX:>6.4f}", file=OutFile)
print(f'{"X":^14} {"U":^14}', file=OutFile)
for i in range(0, iMax):
print(f"{dX*i:>12.8e} {U[i]:>12.8e}", file=OutFile) | 5,327,628 |
def invest_validator(validate_func):
"""Decorator to enforce characteristics of validation inputs and outputs.
Attributes of inputs and outputs that are enforced are:
* ``args`` parameter to ``validate`` must be a ``dict``
* ``limit_to`` parameter to ``validate`` must be either ``None`` or a
string (``str`` or ``unicode``) that exists in the ``args`` dict.
* All keys in ``args`` must be strings
* Decorated ``validate`` func must return a list of 2-tuples, where
each 2-tuple conforms to these rules:
* The first element of the 2-tuple is an iterable of strings.
It is an error for the first element to be a string.
* The second element of the 2-tuple is a string error message.
In addition, this validates the ``n_workers`` argument if it's included.
Raises:
AssertionError when an invalid format is found.
Example:
from natcap.invest import validation
@validation.invest_validator
def validate(args, limit_to=None):
# do your validation here
"""
def _wrapped_validate_func(args, limit_to=None):
validate_func_args = inspect.getfullargspec(validate_func)
assert validate_func_args.args == ['args', 'limit_to'], (
'validate has invalid parameters: parameters are: %s.' % (
validate_func_args.args))
assert isinstance(args, dict), 'args parameter must be a dictionary.'
assert (isinstance(limit_to, type(None)) or
isinstance(limit_to, str)), (
'limit_to parameter must be either a string key or None.')
if limit_to is not None:
assert limit_to in args, ('limit_to key "%s" must exist in args.'
% limit_to)
for key, value in args.items():
assert isinstance(key, str), (
'All args keys must be strings.')
# Pytest in importlib mode makes it impossible for test modules to
# import one another. This causes a problem in test_validation.py,
# which gets imported into itself here and fails.
# Since this decorator might not be needed in the future,
# just ignore failed imports; assume they have no ARGS_SPEC.
try:
model_module = importlib.import_module(validate_func.__module__)
except:
LOGGER.warning('Unable to import module %s: assuming no ARGS_SPEC.',
validate_func.__module__)
model_module = None
# If the module has an ARGS_SPEC defined, validate against that.
if hasattr(model_module, 'ARGS_SPEC'):
LOGGER.debug('Using ARG_SPEC for validation')
args_spec = getattr(model_module, 'ARGS_SPEC')['args']
if limit_to is None:
LOGGER.info('Starting whole-model validation with ARGS_SPEC')
warnings_ = validate_func(args)
else:
LOGGER.info('Starting single-input validation with ARGS_SPEC')
args_key_spec = args_spec[limit_to]
args_value = args[limit_to]
error_msg = None
# We're only validating a single input. This is not officially
# supported in the validation function, but we can make it work
# within this decorator.
try:
if args_key_spec['required'] is True:
if args_value in ('', None):
error_msg = "Value is required"
except KeyError:
# If required is not defined in the args_spec, we default
# to False. If 'required' is an expression, we can't
# validate that outside of whole-model validation.
pass
# If the input is not required and does not have a value, no
# need to validate it.
if args_value not in ('', None):
input_type = args_key_spec['type']
validator_func = _VALIDATION_FUNCS[input_type]
try:
validation_options = (
args_key_spec['validation_options'])
except KeyError:
validation_options = {}
error_msg = (
validator_func(args_value, **validation_options))
if error_msg is None:
warnings_ = []
else:
warnings_ = [([limit_to], error_msg)]
else: # args_spec is not defined for this function.
LOGGER.warning('ARGS_SPEC not defined for this model')
warnings_ = validate_func(args, limit_to)
LOGGER.debug('Validation warnings: %s',
pprint.pformat(warnings_))
return warnings_
return _wrapped_validate_func | 5,327,629 |
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Flood integration."""
hass.data.setdefault(DOMAIN, {})
return True | 5,327,630 |
def typehint(x, typedict):
"""Replace the dtypes in `x` keyed by `typedict` with the dtypes in
`typedict`.
"""
dtype = x.dtype
lhs = dict(zip(dtype.fields.keys(), map(first, dtype.fields.values())))
dtype_list = list(merge(lhs, typedict).items())
return x.astype(np.dtype(sort_dtype_items(dtype_list, dtype.names))) | 5,327,631 |
def balance(samples, labels, balance_factor, adjust_func):
"""create a balanced dataset by subsampling classes or generating new
samples"""
grouped = group_by_label(samples, labels)
if balance_factor <= 1.0:
largest_group_size = max([len(x[1]) for x in grouped])
target_group_size = int(largest_group_size * balance_factor)
else:
target_group_size = int(balance_factor)
grouped_balanced = []
for label, group in grouped:
if len(group) > target_group_size:
# print(label, 1.0)
group_resized = random.sample(group, target_group_size)
else:
# print(label, (len(group) * 1.0) / target_group_size)
group_resized = [x for x in group]
while len(group_resized) < target_group_size:
group_resized.append(adjust_func(random.choice(group)))
grouped_balanced.append((label, group_resized))
pairs = [(y, x[0]) for x in grouped_balanced for y in x[1]]
return zip(*pairs) | 5,327,632 |
def make_util_private_h(cfile):
""" Create util_private.h """
import urllib, posixpath
hfile = posixpath.join(posixpath.dirname(cfile), 'util_private.h')
fp = file(hfile, 'w')
try:
print >> fp, """/*
* XXX This file is autogenerated by setup.py, don't edit XXX
*/
#ifndef WTF_UTIL_PRIVATE_H
#define WTF_UTIL_PRIVATE_H
#define WTF_SAFE_CHAR (1 << 0)
#define WTF_HEX_DIGIT (1 << 1)
#define WTF_IS_SAFE_CHAR(table, c) \\
((table)[(unsigned char)(c) & 0xFF] & WTF_SAFE_CHAR)
#define WTF_IS_HEX_DIGIT(c) \\
(wtf_charmask[(unsigned char)(c) & 0xFF] & WTF_HEX_DIGIT)
#define WTF_HEX_VALUE(c) (wtf_hextable[(unsigned char)c & 0xFF])
#define WTF_IS_LATIN1(u) (!((u) & ~((Py_UNICODE)0xFF)))
static const char *wtf_hex_digits = "0123456789ABCDEF";
#define WTF_HEXDIGIT_HIGH(c) \\
(wtf_hex_digits[((((unsigned char)c) & 0xF0) >> 4)])
#define WTF_HEXDIGIT_LOW(c) (wtf_hex_digits[((unsigned char)c) & 0xF])
static const unsigned char wtf_charmask[256] = {"""
for x in range(16):
line = []
for y in range(16):
mask = int(chr(x * 16 + y) in urllib.always_safe)
if chr(x*16 + y) in 'abcdefABCDEF0123456789':
mask |= 2
if mask < 10:
mask = ' ' + str(mask)
line.append(str(mask))
line.append('')
print >> fp, ', '.join(line)
print >> fp, """};
static const unsigned char wtf_hextable[256] = {"""
for x in range(16):
line = []
for y in range(16):
c = chr(x*16 + y)
if c in 'abcdef':
line.append(str('abcdef'.index(c) + 10))
elif c in 'ABCDEF':
line.append(str('ABCDEF'.index(c) + 10))
elif c in '0123456789':
line.append(' ' + str(int(c)))
else:
line.append(' 0')
line.append('')
print >> fp, ', '.join(line)
print >> fp, """};
#endif"""
finally:
fp.close() | 5,327,633 |
def generate_sha1(string, salt=None):
"""
Generates a sha1 hash for supplied string. Doesn't need to be very secure
because it's not used for password checking. We got Django for that.
:param string:
The string that needs to be encrypted.
:param salt:
Optionally define your own salt. If none is supplied, will use a random
string of 5 characters.
:return: Tuple containing the salt and hash.
"""
if not salt:
salt = sha_constructor(str(random.random())).hexdigest()[:5]
hash = sha_constructor(salt + str(string)).hexdigest()
return (salt, hash) | 5,327,634 |
def sentences_from_doc(ttree_doc, language, selector):
"""Given a Treex document, return a list of sentences in the given language and selector."""
return [bundle.get_zone(language, selector).sentence for bundle in ttree_doc.bundles] | 5,327,635 |
def managed_session(
factory: sessionmaker, **kwargs,
) -> t.ContextManager[Session]:
"""Wraps a session in a context manager to manage session lifetime.
Makes sure to commit if the code in the context runs successfully,
otherwise it rolls back. Also ensures that the session is closed.
Parameters
----------
factory: sessionmaker
SQLAlchemy session factory to use.
kwargs: Dict
Keyword arguments to pass to the session factory.
Returns
-------
Session
Managed session object.
"""
session = factory(**kwargs)
try:
yield session
session.commit()
except Exception as error:
session.rollback()
raise error
finally:
session.close() | 5,327,636 |
def train_test_split(data_dir):
"""Function thats splits a folder with brat files into train/test based on csv file"""
csv_split = 'train-test-split.csv'
with open(data_dir+csv_split, newline='') as f:
reader = csv.reader(f, delimiter=';')
for row in reader:
if row[1] == 'TRAIN':
if os.path.isfile(data_dir+row[0]+'.txt') and os.path.isfile(data_dir+row[0]+'.ann'):
os.rename(data_dir + row[0] + '.txt', data_dir + 'train/' + row[0] + '.txt')
os.rename(data_dir + row[0] + '.ann', data_dir + 'train/' + row[0] + '.ann')
elif row[1] == 'TEST':
if os.path.isfile(data_dir + row[0]+'.txt') and os.path.isfile(data_dir+row[0]+'.ann'):
os.rename(data_dir + row[0] + '.txt', data_dir + 'test/' + row[0] + '.txt')
os.rename(data_dir + row[0] + '.ann', data_dir + 'test/' + row[0] + '.ann') | 5,327,637 |
def demo_func(par):
"""Test function to optimize."""
x = par['x']
y = par['y']
z = par['z']
p = par['p']
s = par['str']
funcs = {
'sin': math.sin,
'cos': math.cos,
}
return (x + (-y) * z) / ((funcs[s](p) ** 2) + 1) | 5,327,638 |
def file_to_list(filename):
"""
Read in a one-column txt file to a list
:param filename:
:return: A list where each line is an element
"""
with open(filename, 'r') as fin:
alist = [line.strip() for line in fin]
return alist | 5,327,639 |
def regular_ticket_price(distance_in_km: int) -> float:
""" calculate the regular ticket price based on the given distance
:int distance_in_km:
"""
# source --> Tarife 601 Chapter 10.1.3 on https://www.allianceswisspass.ch/de/Themen/TarifeVorschriften
price_ticket_per_km = {range(1, 5): 44.51,
range(5, 15): 42.30,
range(15, 49): 37.24,
range(49, 151): 26.46,
range(151, 201): 25.71,
range(201, 251): 22.85,
range(251, 301): 20.63,
range(301, 481): 20.09,
range(481, 1501): 19.85,
}
price = calculate_price(distance_in_km, price_ticket_per_km)
if price < MINDESTPRICE_IN_CHF:
price = MINDESTPRICE_IN_CHF
return price | 5,327,640 |
def find_cal_indices(datetimes):
"""
Cal events are any time a standard is injected and being quantified by the system. Here, they're separated as though
any calibration data that's more than 60s away from the previous cal data is a new event.
:param epoch_time: array of epoch times for all of the supplied data
:return: list of cal events indices, where each index is the beginning of a new cal event
"""
diff = datetimes.diff()
indices = diff.loc[diff > pd.Timedelta(seconds=60)].index.values.tolist() # subtract one from all indices
return indices | 5,327,641 |
def rename(isamAppliance, id, new_name, check_mode=False, force=False):
"""
Rename a Password Strength
"""
if force is True or _check(isamAppliance, id) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put(
"Rename a Password Strength",
"/wga/pwd_strength/{0}".format(id),
{
'id': id,
'new_name': new_name
})
return isamAppliance.create_return_object() | 5,327,642 |
def reinvoke_on_edit(ctx, *additional_messages: discord.Message, timeout: float = 600) -> None:
# noinspection PyUnresolvedReferences
"""
Watches a given context for a given period of time. If the message that
invoked the context is edited within the time period, then the invoking message
plus any additional messages are deleted. The context's command is then reinvoked
with the new message body.
Parameters:
ctx:
A :class:`discord.ext.commands.Context` to listen to. Create one with
`bot.get_context` if you are in an event instead.
additional_messages:
Any additional messages to also destroy on close.
timeout:
The timeout to wait for before the call terminates. This defaults to `None`, which is
a special case depending on whether or not the `ctx` that was passed was actually a
`BaseNavigator` object. If the latter holds, then the timeout will trigger as soon as
the navigator timeout triggers.
Note:
To invoke this on a response that is being paginated using the `libneko.pagination` module, you
should attempt to invoke it like so::
>>> factory = ...
>>> nav = factory.build()
>>> nav.start(ctx)
>>> reinvoke_on_edit(ctx, *nav.all_messages)
>>>
>>> # or if you just have a nav
>>>
>>> nav = StringNavigator(...)
>>> nav.start()
>>> reinvoke_on_edit(ctx, *nav.all_messages)
"""
if ctx.command is None:
raise ValueError("Cannot reinvoke a non-valid command or non-command invocation")
async def handle_wait_for_edit_or_close():
try:
# Triggered when we should kill our events.
event = asyncio.Event()
def set_on_exit(f):
@neko3.functional.wraps(f)
async def wrapper():
r = await f()
event.set()
return r
return wrapper
@set_on_exit
async def wait_for_close():
try:
await ctx.bot.wait_for("message_delete", check=lambda m: m.id == ctx.message.id, timeout=timeout)
except (asyncio.CancelledError, asyncio.TimeoutError):
pass
@set_on_exit
async def wait_for_edit():
try:
def predicate(before, after):
try:
# Only respond to this message
if after.id != ctx.message.id:
return False
elif before.content == after.content:
# Again, something went weird.
return False
elif not after.content.startswith(ctx.prefix):
return False
else:
# Ensure same command.
invoked = ctx.message.content[len(ctx.prefix) :].lstrip()
return invoked.startswith(ctx.invoked_with)
except Exception:
traceback.print_exc()
_, after = await ctx.bot.wait_for("message_edit", check=predicate)
new_ctx = await ctx.bot.get_context(after)
asyncio.ensure_future(asyncio.gather(*[m.delete() for m in additional_messages]), loop=ctx.bot.loop)
ctx.bot.loop.create_task(ctx.command.reinvoke(new_ctx))
except asyncio.CancelledError:
pass
except Exception:
traceback.print_exc()
tasks = [ctx.bot.loop.create_task(wait_for_close()), ctx.bot.loop.create_task(wait_for_edit())]
# On either of these events triggering, we kill the lot.
await event.wait()
for task in tasks:
try:
task.cancel()
task.result()
except Exception:
pass
except Exception:
traceback.print_exc()
ctx.bot.loop.create_task(handle_wait_for_edit_or_close()) | 5,327,643 |
def compile_test_programs():
"""Invokes CMake and make to create the test binaries.
Calls a debug build as I don't want optimizations to get in the way."""
subprocess.call(["cmake", "-DCMAKE_BUILD_TYPE=Debug", ts.TEST_PROGRAMS_FOLDER], cwd=ts.TEST_PROGRAMS_FOLDER)
subprocess.call(["make"], cwd=ts.TEST_PROGRAMS_FOLDER)
print "All test programs compiled." | 5,327,644 |
def gen_model_forms(form, model):
"""Creates a dict of forms. model_forms[0] is a blank form used for adding
new model objects. model_forms[m.pk] is an editing form pre-populated
the fields of m"""
model_forms = {0: form()}
for m in model.objects.all():
model_forms[m.pk] = form(instance=m)
return model_forms | 5,327,645 |
def _state_size_with_prefix(state_size, prefix=None):
"""Helper function that enables int or TensorShape shape specification.
This function takes a size specification, which can be an integer or a
TensorShape, and converts it into a list of integers. One may specify any
additional dimensions that precede the final state size specification.
Args:
state_size: TensorShape or int that specifies the size of a tensor.
prefix: optional additional list of dimensions to prepend.
Returns:
result_state_size: list of dimensions the resulting tensor size.
"""
result_state_size = tensor_shape.as_shape(state_size).as_list()
if prefix is not None:
if not isinstance(prefix, list):
raise TypeError("prefix of _state_size_with_prefix should be a list.")
result_state_size = prefix + result_state_size
return result_state_size | 5,327,646 |
def as_pandas(cursor, coerce_float=False):
"""Return a pandas `DataFrame` out of an impyla cursor.
This will pull the entire result set into memory. For richer pandas-like
functionality on distributed data sets, see the Ibis project.
Parameters
----------
cursor : `HiveServer2Cursor`
The cursor object that has a result set waiting to be fetched.
coerce_float : bool, optional
Attempt to convert values of non-string, non-numeric objects to floating
point.
Returns
-------
DataFrame
"""
from pandas import DataFrame # pylint: disable=import-error
names = [metadata[0] for metadata in cursor.description]
return DataFrame.from_records(cursor.fetchall(), columns=names,
coerce_float=coerce_float) | 5,327,647 |
def test_values():
""" Test LDAPEntry's values method. """
entry = LDAPEntry("cn=test")
entry["cn"] = "test"
entry["sn"] = "Test"
assert len(entry.values()) == 3
assert entry.dn in entry.values()
assert entry["cn"] in entry.values()
assert entry["sn"] in entry.values()
assert len(list(entry.values(exclude_dn=True))) == 2
assert entry.dn not in entry.values(exclude_dn=True)
assert entry["cn"] in entry.values(exclude_dn=True)
assert entry["sn"] in entry.values(exclude_dn=True) | 5,327,648 |
def check_ip(ip, network_range):
"""
Test if the IP is in range
Range is expected to be in CIDR notation format. If no MASK is
given /32 is used. It return True if the IP is in the range.
"""
netItem = str(network_range).split('/')
rangeIP = netItem[0]
if len(netItem) == 2:
rangeMask = int(netItem[1])
else:
rangeMask = 32
try:
ripInt = ip2int(rangeIP)
ipInt = ip2int(ip)
result = not ((ipInt ^ ripInt) & 0xFFFFFFFF << (32 - rangeMask));
except:
result = False
return result | 5,327,649 |
async def index(_request: HttpRequest) -> HttpResponse:
"""A request handler which provides an index of the compression methods"""
html = """
<!DOCTYPE html>
<html>
<body>
<ul>
<li><a href='/gzip'>gzip</a></li>
<li><a href='/deflate'>deflate</a></li>
<li><a href='/compress'>compress</a></li>
</ul>
</body>
</html>
"""
return HttpResponse(
200,
[(b'content-type', b'text/html')],
text_writer(html)
) | 5,327,650 |
def ifft_function(G,Fs,axis=0):
"""
This function gives the IDFT
Arguments
---------------------------
G : double
DFT (complex Fourier coefficients)
Fs : double
sample rate, maximum frequency of G times 2 (=F_nyquist*2)
axis : double
the axis on which the IDFT operates
Returns
---------------------------
t : double
time axis
x : double
time series
Reference:
"""
G=np.atleast_2d(G)
n=np.shape(G)
n_points=n[axis]
G=np.fft.ifft( np.fft.ifftshift(G,None,axis) )/n_points
dt=1/np.double(Fs)
t=np.arange(0,dt*n_points,dt)
return t,x | 5,327,651 |
def test_atomic_duration_min_inclusive_nistxml_sv_iv_atomic_duration_min_inclusive_1_2(mode, save_output, output_format):
"""
Type atomic/duration is restricted by facet minInclusive with value
P1970Y01M01DT00H00M00S.
"""
assert_bindings(
schema="nistData/atomic/duration/Schema+Instance/NISTSchema-SV-IV-atomic-duration-minInclusive-1.xsd",
instance="nistData/atomic/duration/Schema+Instance/NISTXML-SV-IV-atomic-duration-minInclusive-1-2.xml",
class_name="NistschemaSvIvAtomicDurationMinInclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,327,652 |
def configure(**config):
"""Configure the module using a set of configuration options.
:param **config: Dict of configuration options.
"""
global _ROOTWRAPPER
# Don't break existing deploys...
rw_conf_file = config.get('rootwrap_config', '/etc/nova/rootwrap.conf')
if config.get('use_rootwrap_daemon', False):
_ROOTWRAPPER = RootwrapDaemonHelper(rw_conf_file)
else:
if config.get('disable_rootwrap', False):
cmd = 'sudo'
else:
cmd = 'sudo nova-rootwrap %s' % rw_conf_file
_ROOTWRAPPER = RootwrapProcessHelper(root_helper=cmd) | 5,327,653 |
def user_tweets_stats_grouped_new(_, group_type):
"""
Args:
_: Http Request (ignored in this function)
group_type: Keyword defining group label (day,month,year)
Returns: Activities grouped by (day or month or year) wrapped on response's object
"""
error_messages = []
success_messages = []
status = HTTP_200_OK
index_per_type = {
'year': 0,
'month': 1,
'day': 2
}
types = ["year", "month", "day"]
success, data, message = queries.user_tweets_stats_grouped(types[:index_per_type[group_type] + 1], accum=False)
if success:
success_messages.append(message)
else:
error_messages.append(message)
status = HTTP_403_FORBIDDEN
return create_response(data=data, error_messages=error_messages, success_messages=success_messages, status=status) | 5,327,654 |
def test_setext_headings_extra_23():
"""
Test case extra 23: SetExt heading ends with 2+ spaces as in a hard line break, but not since end of paragraph
"""
# Arrange
source_markdown = """what? no line break?\a\a\a
---""".replace(
"\a", " "
)
expected_tokens = [
"[setext(2,1):-:3::(1,1): ]",
"[text(1,1):what? no line break?:]",
"[end-setext::]",
]
expected_gfm = """<h2>what? no line break?</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens) | 5,327,655 |
def analyse_editing_percent(pileup_file, out_file, summary_file=None, add_headers=False, summary_only=False, min_editing=0,
max_noise=100, min_reads=1, edit_tag='' ):
"""
analyses pileup file editing sites and summarises it
@param pileup_file: input pileup file name
@param out_file: name of file to write new lines
@param summary_file: File to put the summary_string
@param add_headers: Boolean, whether to add csv like headers to new pileups
@param summary_only: Boolean, whether to only generate summary file and not generate edited pileup files
@param min_editing: minimal editing threshold, percent
@param max_noise: maximal noise threshold, percent
@param min_reads: minimal reads per site to be considered an editing site, int
@param edit_tag: tag to add to sites that are classified as editing sites
@param kwargs:
@return: dict with histogram of site editing types, side effect creates out_file a pile up file with tags
indicating noise, editing percent and wether a given site was classified as an editing site
"""
# with open in read and out write
with open(pileup_file, 'r') as in_fp, \
open(out_file, 'w') as out_fp:
#"r","y","s","w","k","m","b","d","h","v","n"
# intitalize zero counts in summary dict
#summary_dict = {(nuc1, nuc2): 0 for nuc1, nuc2 in itertools.product('acgtACGT', repeat=2) if nuc1.upper() != nuc2.upper()}
#TODO why do we need lower case references?
summary_dict = {(nuc1, nuc2): 0 for nuc1, nuc2 in itertools.product('ACGTN', repeat=2) if nuc1.upper() != nuc2.upper()}
#add RYSWKMBDHV to string if needed
summary_dict['unchanged'] = 0
if add_headers:
out_fp.writelines([get_header_line(edit_tag)])
out_fp.write("\n")
list_headers = ["editing_min_percent", "noise_percent"]
if edit_tag:
list_headers.append("const_tag")
pileup_gen = class_generator(Pileup_line, file=in_fp)
line_num = -1
for line_num, pileup_line in enumerate(pileup_gen):
edit_type, new_pileup_line = is_pileup_line_edited(pileup_line,
read_thresh_hold=min_reads,
editing_min_percent_threshold=float(min_editing),
noise_percent_threshold=float(max_noise),
const_tag=edit_tag
)
summary_dict[edit_type] += 1
# add empty tag if we make it into a csv so the columns will align
if add_headers and edit_type == 'unchanged' and edit_tag != '':
new_pileup_line.tags["const_tag"] = ''
# if not summarise only, print the pileup line
if not summary_only:
#out_fp.write("\n")
if not add_headers:
out_fp.writelines([str(new_pileup_line)])
out_fp.write("\n")
else:
out_fp.writelines([new_pileup_line.line_to_csv_with_short_tags(list_headers)])
out_fp.write("\n")
# dont skip file, create empty summary
#if line_num == -1 :
# open(summary_file, "w").close()
# return #when the file is empty - skip the file!
##out_fp.write("\n")
# make summary counts into percentage
total_line_number = line_num + 1
summary_dict_sub = {key: float(val) / total_line_number if total_line_number != 0 else 0 for key, val in summary_dict.items() }
# printing the summary format to an individual file
with open(summary_file, "w") as file1:
summer_str = individual_summary_format(pileup_file, summary_dict_sub)
file1.write(summer_str)
return summer_str | 5,327,656 |
def login_required(func, *args, **kwargs):
"""
This is a decorator that can be applied to a Controller method that needs a logged in user.
The inner method receives the Controller instance and checks if the user is logged in
using the `request.is_authenticated` Boolean on the Controller instance
:param func: The is the function being decorated.
:return: Either the method that is decorated (if user is logged in) else `unauthenticated` response (HTTP 401).
"""
def inner(controller_obj, *args, **kwargs):
if controller_obj.request.is_authenticated:
return func(controller_obj, *args, **kwargs)
else:
return response.json({
"message": "unauthenticated"
}, status=401)
return inner | 5,327,657 |
def preProcessImage(rgbImage):
""" Preprocess the input RGB image
@rgbImage: Input RGB Image
"""
# Color space conversion
img_gray = cv2.cvtColor(rgbImage, cv2.COLOR_BGR2GRAY)
img_hsv = cv2. cvtColor(rgbImage, cv2.COLOR_BGR2HLS)
ysize, xsize = getShape(img_gray)
#Detecting yellow and white colors
low_yellow = np.array([20, 100, 100])
high_yellow = np.array([30, 255, 255])
mask_yellow = cv2.inRange(img_hsv, low_yellow, high_yellow)
mask_white = cv2.inRange(img_gray, 200, 255)
mask_yw = cv2.bitwise_or(mask_yellow, mask_white)
mask_onimage = cv2.bitwise_and(img_gray, mask_yw)
#Smoothing for removing noise
gray_blur = cv2.GaussianBlur(mask_onimage, (5,5), 0)
return gray_blur, xsize, ysize | 5,327,658 |
def distribute():
"""Run all cpu experiments on a single process. Distribute gpu experiments over all available gpus.
Args:
device_count (int, optional): If device count is 0, uses only cpu else spawn processes according
to number of gpus available on the machine. Defaults to 0.
"""
sweep_config = OmegaConf.load("tools/benchmarking/benchmark_params.yaml")
devices = sweep_config.hardware
if not torch.cuda.is_available() and "gpu" in devices:
logger.warning("Config requested GPU benchmarking but torch could not detect any cuda enabled devices")
elif {"cpu", "gpu"}.issubset(devices):
# Create process for gpu and cpu
with ProcessPoolExecutor(max_workers=2, mp_context=multiprocessing.get_context("spawn")) as executor:
jobs = [executor.submit(compute_on_cpu), executor.submit(distribute_over_gpus)]
for job in as_completed(jobs):
try:
job.result()
except Exception as exception:
raise Exception(f"Error occurred while computing benchmark on device {job}") from exception
elif "cpu" in devices:
compute_on_cpu()
elif "gpu" in devices:
distribute_over_gpus()
if "wandb" in sweep_config.writer:
upload_to_wandb(team="anomalib") | 5,327,659 |
def consume_messages(consumer: Consumer, num_expected: int, serialize: bool = True) -> List[Dict[str, Any]]:
"""helper function for polling 'everything' off a topic"""
start = time.time()
consumed_messages = []
while (time.time() - start) < POLL_TIMEOUT:
message = consumer.poll(1)
if message is None:
continue
if message.error():
logger.error(message.error())
else:
_msg = message.value().decode("utf-8")
if serialize:
msg = json.loads(_msg)
else:
msg = _msg
consumed_messages.append(msg)
if num_expected == len(consumed_messages):
break
consumer.close()
return consumed_messages | 5,327,660 |
def get_compressed_size(data, compression, block_size=DEFAULT_BLOCK_SIZE):
"""
Returns the number of bytes required when the given data is
compressed.
Parameters
----------
data : buffer
compression : str
The type of compression to use.
block_size : int, optional
Input data will be split into blocks of this size (in bytes) before the compression.
Returns
-------
bytes : int
"""
compression = validate(compression)
encoder = _get_encoder(compression)
l = 0
for i in range(0, len(data), block_size):
l += len(encoder.compress(data[i:i+block_size]))
if hasattr(encoder, "flush"):
l += len(encoder.flush())
return l | 5,327,661 |
def flat_command(bias=False,
flat_map=False,
return_shortname=False,
dm_num=1):
"""
Creates a DmCommand object for a flat command.
:param bias: Boolean flag for whether to apply a bias.
:param flat_map: Boolean flag for whether to apply a flat_map.
:param return_shortname: Boolean flag that will return a string that describes the object as the second parameter.
:param dm_num: 1 or 2, for DM1 or DM2.
:return: DmCommand object, and optional descriptive string (good for filename).
"""
short_name = "flat"
# Bias.
if flat_map:
short_name += "_flat_map"
if bias:
short_name += "_bias"
num_actuators_pupil = CONFIG_INI.getint(config_name, 'dm_length_actuators')
zero_array = np.zeros((num_actuators_pupil, num_actuators_pupil))
dm_command_object = DmCommand(zero_array, dm_num, flat_map=flat_map, bias=bias)
if return_shortname:
return dm_command_object, short_name
else:
return dm_command_object | 5,327,662 |
def plot_graph_routes(
G,
routes,
bbox=None,
fig_height=6,
fig_width=None,
margin=0.02,
bgcolor="w",
axis_off=True,
show=True,
save=False,
close=True,
file_format="png",
filename="temp",
dpi=300,
annotate=False,
node_color="#999999",
node_size=15,
node_alpha=1,
node_edgecolor="none",
node_zorder=1,
edge_color="#999999",
edge_linewidth=1,
edge_alpha=1,
use_geom=True,
orig_dest_points=None,
route_color="r",
route_linewidth=4,
route_alpha=0.5,
orig_dest_node_alpha=0.5,
orig_dest_node_size=100,
orig_dest_node_color="r",
orig_dest_point_color="b",
):
"""
Plot several routes along a networkx spatial graph.
Parameters
----------
G : networkx.MultiDiGraph
input graph
routes : list
the routes as a list of lists of nodes
bbox : tuple
bounding box as north,south,east,west - if None will calculate from
spatial extents of data. if passing a bbox, you probably also want to
pass margin=0 to constrain it.
fig_height : int
matplotlib figure height in inches
fig_width : int
matplotlib figure width in inches
margin : float
relative margin around the figure
axis_off : bool
if True turn off the matplotlib axis
bgcolor : string
the background color of the figure and axis
show : bool
if True, show the figure
save : bool
if True, save the figure as an image file to disk
close : bool
close the figure (only if show equals False) to prevent display
file_format : string
the format of the file to save (e.g., 'jpg', 'png', 'svg')
filename : string
the name of the file if saving
dpi : int
the resolution of the image file if saving
annotate : bool
if True, annotate the nodes in the figure
node_color : string
the color of the nodes
node_size : int
the size of the nodes
node_alpha : float
the opacity of the nodes
node_edgecolor : string
the color of the node's marker's border
node_zorder : int
zorder to plot nodes, edges are always 2, so make node_zorder 1 to plot
nodes beneath them or 3 to plot nodes atop them
edge_color : string
the color of the edges' lines
edge_linewidth : float
the width of the edges' lines
edge_alpha : float
the opacity of the edges' lines
use_geom : bool
if True, use the spatial geometry attribute of the edges to draw
geographically accurate edges, rather than just lines straight from node
to node
orig_dest_points : list of tuples
optional, a group of (lat, lng) points to plot instead of the
origins and destinations of each route nodes
route_color : string
the color of the route
route_linewidth : int
the width of the route line
route_alpha : float
the opacity of the route line
orig_dest_node_alpha : float
the opacity of the origin and destination nodes
orig_dest_node_size : int
the size of the origin and destination nodes
orig_dest_node_color : string
the color of the origin and destination nodes
orig_dest_point_color : string
the color of the origin and destination points if being plotted instead
of nodes
Returns
-------
fig, ax : tuple
"""
# plot the graph but not the routes
fig, ax = plot_graph(
G,
bbox=bbox,
fig_height=fig_height,
fig_width=fig_width,
margin=margin,
axis_off=axis_off,
bgcolor=bgcolor,
show=False,
save=False,
close=False,
filename=filename,
dpi=dpi,
annotate=annotate,
node_color=node_color,
node_size=node_size,
node_alpha=node_alpha,
node_edgecolor=node_edgecolor,
node_zorder=node_zorder,
edge_color=edge_color,
edge_linewidth=edge_linewidth,
edge_alpha=edge_alpha,
use_geom=use_geom,
)
# save coordinates of the given reference points
orig_dest_points_lats = []
orig_dest_points_lons = []
if orig_dest_points is None:
# if caller didn't pass points, use the first and last node in each route as
# origin/destination points
for route in routes:
origin_node = route[0]
destination_node = route[-1]
orig_dest_points_lats.append(G.nodes[origin_node]["y"])
orig_dest_points_lats.append(G.nodes[destination_node]["y"])
orig_dest_points_lons.append(G.nodes[origin_node]["x"])
orig_dest_points_lons.append(G.nodes[destination_node]["x"])
else:
# otherwise, use the passed points as origin/destination points
for point in orig_dest_points:
orig_dest_points_lats.append(point[0])
orig_dest_points_lons.append(point[1])
orig_dest_node_color = orig_dest_point_color
# scatter the origin and destination points
ax.scatter(
orig_dest_points_lons,
orig_dest_points_lats,
s=orig_dest_node_size,
c=orig_dest_node_color,
alpha=orig_dest_node_alpha,
edgecolor=node_edgecolor,
zorder=4,
)
# plot the routes lines
lines = []
for route in routes:
lines.extend(_node_list_to_coordinate_lines(G, route, use_geom))
# add the lines to the axis as a linecollection
lc = LineCollection(
lines, colors=route_color, linewidths=route_linewidth, alpha=route_alpha, zorder=3
)
ax.add_collection(lc)
# save and show the figure as specified
fig, ax = _save_and_show(fig, ax, save, show, close, filename, file_format, dpi, axis_off)
return fig, ax | 5,327,663 |
def _parse_book_info(html):
"""解析豆瓣图书信息(作者,出版社,出版年,定价)
:param html(string): 图书信息部分的原始html
"""
end_flag = 'END_FLAG'
html = html.replace('<br>', end_flag)
html = html.replace('<br/>', end_flag)
doc = lxml.html.fromstring(html)
text = doc.text_content()
pattern = r'{}[::](.*?){}'
return {
key: re.search(
pattern.format(column, end_flag), text, re.I | re.DOTALL
)
.group(1)
.strip()
for key, column in [
('author', '作者'),
('press', '出版社'),
('publish_date', '出版年'),
('price', '定价'),
]
} | 5,327,664 |
def offers(request, region_slug, language_code=None):
"""
Function to iterate through all offers related to a region and adds them to a JSON.
Returns:
[String]: [description]
"""
region = Region.objects.get(slug=region_slug)
result = []
for offer in region.offers.all():
result.append(transform_offer(offer))
return JsonResponse(result, safe=False) | 5,327,665 |
def scan_to_scan(vol_names, bidir=False, batch_size=1, prob_same=0, no_warp=False, **kwargs):
"""
Generator for scan-to-scan registration.
Parameters:
vol_names: List of volume files to load, or list of preloaded volumes.
bidir: Yield input image as output for bidirectional models. Default is False.
batch_size: Batch size. Default is 1.
prob_same: Induced probability that source and target inputs are the same. Default is 0.
no_warp: Excludes null warp in output list if set to True (for affine training).
Default if False.
kwargs: Forwarded to the internal volgen generator.
"""
zeros = None
gen = volgen(vol_names, batch_size=batch_size, **kwargs)
while True:
scan1 = next(gen)[0]
scan2 = next(gen)[0]
# some induced chance of making source and target equal
if prob_same > 0 and np.random.rand() < prob_same:
if np.random.rand() > 0.5:
scan1 = scan2
else:
scan2 = scan1
# cache zeros
if not no_warp and zeros is None:
shape = scan1.shape[1:-1]
zeros = np.zeros((batch_size, *shape, len(shape)))
invols = [scan1, scan2]
outvols = [scan2, scan1] if bidir else [scan2]
if not no_warp:
outvols.append(zeros)
yield (invols, outvols) | 5,327,666 |
def provision_new_database_for_variant_warehouse(db_name):
"""Create a variant warehouse database of the specified name and shared the collections"""
# Passing the secrets_file override the password already in the uri
db_handle = MongoDatabase(
uri=cfg['mongodb']['mongo_admin_uri'],
secrets_file=cfg['mongodb']['mongo_admin_secrets_file'],
db_name=db_name
)
if len(db_handle.get_collection_names()) > 0:
logger.info(f'Found existing database named {db_name}.')
else:
db_handle.enable_sharding()
db_handle.shard_collections(collections_shard_key_map,
collections_to_shard=collections_shard_key_map.keys())
logger.info(f'Created new database named {db_name}.') | 5,327,667 |
def get_ellipse(mu: np.ndarray, cov: np.ndarray, draw_legend: bool = True):
"""
Draw an ellipse centered at given location and according to specified covariance matrix
Parameters
----------
mu : ndarray of shape (2,)
Center of ellipse
cov: ndarray of shape (2,2)
Covariance of Gaussian
Returns
-------
scatter: A plotly trace object of the ellipse
"""
l1, l2 = tuple(np.linalg.eigvalsh(cov)[::-1])
theta = atan2(l1 - cov[0, 0], cov[0, 1]) if cov[0, 1] != 0 else (
np.pi / 2 if cov[0, 0] < cov[1, 1] else 0)
t = np.linspace(0, 2 * pi, 100)
xs = (l1 * np.cos(theta) * np.cos(t)) - (l2 * np.sin(theta) * np.sin(t))
ys = (l1 * np.sin(theta) * np.cos(t)) + (l2 * np.cos(theta) * np.sin(t))
return go.Scatter(x=mu[0] + xs, y=mu[1] + ys, mode="lines",
marker_color="black", showlegend=draw_legend,
name="covariance") | 5,327,668 |
def load_hashes(
filename: str, hash_algorithm_names: Sequence[str]
) -> HashResult:
"""
Load the size and hash hex digests for the given file.
"""
# See https://github.com/python/typeshed/issues/2928
hashes: List['hashlib._hashlib._HASH'] = [] # type: ignore
for name in hash_algorithm_names:
hashes.append(hashlib.new(name))
size = 0
with open(filename, 'rb') as inp:
data = inp.read(_BUFFER_SIZE)
for hashf in hashes:
hashf.update(data)
size += len(data)
digests: Dict[str, str] = {}
for idx in range(len(hash_algorithm_names)):
hash_name = hash_algorithm_names[idx]
hashf = hashes[idx]
digests[hash_name] = hashf.hexdigest() # type: ignore
return (size, digests) | 5,327,669 |
def ucs(st: Pixel, end: Pixel, data: np.ndarray):
"""
Iterative method to find a Dijkstra path, if one exists from current to end vertex
:param startKey: start pixel point key
:param endKey: end pixel point key
:return: path
"""
q = PriorityQueue()
startPriorityPixel = PixelPriority(st, 0, 0) # start priority pixel with 0 priority
q.put((0, startPriorityPixel))
lowest = startPriorityPixel
visited = dict()
while lowest.pxl != end:
if q.empty(): # No way to get to end
return [], -1
thisDistace = lowest.distance
for u in lowest.pxl.getNeighbors():
if u is not None and (u.x, u.y) not in visited:
showImage(data, u.y, u.x)
visited[(u.x, u.y)] = 1
# distance travelled from start pixel to current pixel
dist = sqrt(pow(u.x - lowest.pxl.x, 2) + pow(u.y - lowest.pxl.y, 2) + \
pow(u.elevation - lowest.pxl.elevation, 2))
newDistance = thisDistace + dist
priority = newDistance
priorityPixel = PixelPriority(u, newDistance, priority)
priorityPixel.predecessor = lowest
q.put((priority, priorityPixel))
lowest = q.get()[1]
path = []
if lowest.distance != 0: # We found the end, but it never got connected.
lst = lowest
while lst is not None:
path.insert(0, lst.pxl)
lst = lst.predecessor
return path | 5,327,670 |
def tvdb_refresh_token(token: str) -> str:
"""
Refreshes JWT token.
Online docs: api.thetvdb.com/swagger#!/Authentication/get_refresh_token.
"""
url = "https://api.thetvdb.com/refresh_token"
headers = {"Authorization": f"Bearer {token}"}
status, content = request_json(url, headers=headers, cache=False)
if status == 401:
raise MnamerException("invalid token")
elif status != 200 or not content.get("token"): # pragma: no cover
raise MnamerNetworkException("TVDb down or unavailable?")
return content["token"] | 5,327,671 |
def cli():
"""
A utility for creating, updating, listing and deleting AWS CloudFormation stacks.
"""
logger.debug('cli() called') | 5,327,672 |
def seek_and_destroy():
"""
You will be provided with an initial array (the first argument in the destroyer function),
followed by one or more arguments. Remove all elements from the initial array that are of the
same value as these arguments.
"""
return | 5,327,673 |
def get_data():
""" _ _ _ """
df_hospital = download_hospital_admissions()
#sliding_r_df = walkingR(df_hospital, "Hospital_admission")
df_lcps = download_lcps()
df_mob_r = download_mob_r()
df_gemeente_per_dag = download_gemeente_per_dag()
df_reprogetal = download_reproductiegetal()
df_uitgevoerde_testen = download_uitgevoerde_testen()
type_of_join = "outer"
df = pd.merge(df_mob_r, df_hospital, how=type_of_join, left_on = 'date',
right_on="Date_of_statistics")
#df = df_hospital
df.loc[df['date'].isnull(),'date'] = df['Date_of_statistics']
df = pd.merge(df, df_lcps, how=type_of_join, left_on = 'date', right_on="Datum")
df.loc[df['date'].isnull(),'date'] = df['Datum']
#df = pd.merge(df, sliding_r_df, how=type_of_join, left_on = 'date', right_on="date_sR", left_index=True )
df = pd.merge(df, df_gemeente_per_dag, how=type_of_join, left_on = 'date', right_on="Date_of_publication",
left_index=True )
df = pd.merge(df, df_reprogetal, how=type_of_join, left_on = 'date', right_on="Date",
left_index=True )
df = pd.merge(df, df_uitgevoerde_testen, how=type_of_join, left_on = 'date', right_on="Date_of_statistics",
left_index=True )
df = df.sort_values(by=['date'])
df = splitupweekweekend(df)
df, werkdagen, weekend_ = last_manipulations(df, None, None)
df.set_index('date')
return df, werkdagen, weekend_ | 5,327,674 |
def load_bin_file(bin_file, dtype="float32"):
"""Load data from bin file"""
data = np.fromfile(bin_file, dtype=dtype)
return data | 5,327,675 |
def add_user(id: int):
"""
Add user
---
description: Add new user
summary: New user
tags:
- user
responses:
'200':
description: Created successfull
'403':
description: Data not corrent
parameters:
- name: id
in: path
description: id of the user
required: true
"""
pass | 5,327,676 |
def nearest_value(array, value):
"""
Searches array for the closest value to a given target.
Arguments:
array {NumPy Array} -- A NumPy array of numbers.
value {float/int} -- The target value.
Returns:
float/int -- The closest value to the target value found in the array.
"""
return array[np.abs(array - value).argmin()] | 5,327,677 |
def task(pid):
"""Synchronous non-deterministic task.
"""
sleep(0.2) # random.randint(0, 2) *
print('Task %s done' % pid) | 5,327,678 |
def write_manifest(data, manifest_name):
"""
Write the game metadata to a YAML manifest file
"""
path = data.get('path')
if not path:
raise GamePathError('No path found in game data.\nData=' +\
'\n'.join(['%s:\t%s' % (k, v) for k, v in data.iteritems()]))
path = get_absolute_path(path)
# write the YAML data
try:
f = open(join_path(path, manifest_name), 'w')
try:
yaml.dump(data, f, default_flow_style=False)
finally:
f.close()
except IOError as e:
LOG.error('Failed writing manifest: %s', str(e))
raise GamePathError('Failed writing manifest file.') | 5,327,679 |
def interpret_go_point(s, size):
"""Convert a raw SGF Go Point, Move, or Stone value to coordinates.
s -- 8-bit string
size -- board size (int)
Returns a pair (row, col), or None for a pass.
Raises ValueError if the string is malformed or the coordinates are out of
range.
Only supports board sizes up to 26.
The returned coordinates are in the GTP coordinate system (as in the rest
of gomill), where (0, 0) is the lower left.
"""
if s == b"" or (s == b"tt" and size <= 19):
return None
# May propagate ValueError
col_s, row_s = s
col = _bytestring_ord(col_s) - 97 # 97 == ord("a")
row = size - _bytestring_ord(row_s) + 96
if not ((0 <= col < size) and (0 <= row < size)):
raise ValueError
return row, col | 5,327,680 |
def get_domain_name_for(host_string):
"""
Replaces namespace:serviceName syntax with serviceName.namespace one,
appending default as namespace if None exists
"""
return ".".join(
reversed(
("%s%s" % (("" if ":" in host_string else "default:"), host_string)).split(
":"
)
)
) | 5,327,681 |
def bbox_classify(bboxes, possible_k):
"""bbox: x, y, w, h
return: best kmeans score anchor classes [(w1, h1), (w2, h2), ...]
"""
anchors = [bbox[2:4] for bbox in bboxes]
return anchors_classify(anchors, possible_k) | 5,327,682 |
def cleanFAAText(origText):
"""Take FAA text message and trim whitespace from end.
FAA text messages have all sorts of trailing whitespace
issues. We split the message into lines and remove all
right trailing whitespace. We then recombine them into
a uniform version with no trailing whitespace.
The final line will not have a newline character at the
end.
Args:
origText (str): Message text as it comes from the FAA.
Returns:
str: Cleaned up text as described above.
"""
lines = origText.split('\n')
numLines = len(lines)
# Remove empty line at end if present
if lines[-1] == '':
numLines -= 1
for i in range(0, numLines):
lines[i] = lines[i].rstrip()
newText = '\n'.join(lines).rstrip()
return newText | 5,327,683 |
def LoadModel(gd_file, ckpt_file):
"""Load the model from GraphDef and Checkpoint.
Args: gd_file: GraphDef proto text file. ckpt_file: TensorFlow Checkpoint file.
Returns: TensorFlow session and tensors dict."""
with tf.Graph().as_default():
#class FastGFile: File I/O wrappers without thread locking.
with tf.gfile.FastGFile(gd_file, 'r') as f:
# Py 2: s = f.read().decode()
s = f.read()
# Serialized version of Graph
gd = tf.GraphDef()
# Merges an ASCII representation of a protocol message into a message.
text_format.Merge(s, gd)
tf.logging.info('Recovering Graph %s', gd_file)
t = {}
[t['states_init'], t['lstm/lstm_0/control_dependency'],
t['lstm/lstm_1/control_dependency'], t['softmax_out'], t['class_ids_out'],
t['class_weights_out'], t['log_perplexity_out'], t['inputs_in'],
t['targets_in'], t['target_weights_in'], t['char_inputs_in'],
t['all_embs'], t['softmax_weights'], t['global_step']
] = tf.import_graph_def(gd, {}, ['states_init',
'lstm/lstm_0/control_dependency:0',
'lstm/lstm_1/control_dependency:0',
'softmax_out:0',
'class_ids_out:0',
'class_weights_out:0',
'log_perplexity_out:0',
'inputs_in:0',
'targets_in:0',
'target_weights_in:0',
'char_inputs_in:0',
'all_embs_out:0',
'Reshape_3:0',
'global_step:0'], name='')
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run('save/restore_all', {'save/Const:0': ckpt_file})
sess.run(t['states_init'])
return sess, t | 5,327,684 |
def get_additional_rent(offer_markup):
""" Searches for additional rental costs
:param offer_markup:
:type offer_markup: str
:return: Additional rent
:rtype: int
"""
html_parser = BeautifulSoup(offer_markup, "html.parser")
table = html_parser.find_all(class_="item")
for element in table:
if "Czynsz" in element.text:
return int(("".join(re.findall(r'\d+', element.text))))
return | 5,327,685 |
def make_known_disease_variants_filter(sample_ids_list=None):
""" Function for retrieving known disease variants by presence in Clinvar and Cosmic."""
result = {
"$or":
[
{
"$and":
[
{"clinvar.rcv.accession": {"$exists": True}},
{"clinvar.rcv.clinical_significance": {"$nin": ["Benign", "Likely benign"]}}
]
},
{"cosmic.cosmic_id": {"$exists": True}}
]
}
if sample_ids_list is not None:
result = _append_sample_id_constraint_if_needed([result], sample_ids_list)
return result | 5,327,686 |
def main():
"""
in_file = '/Users/pereza1/Projects/Jo/data/gecko_proper_excel/mouse_library_A_gecko.xlsx'
header = True
sequence_field = 0
"""
# user inputs
in_file,outdir,kmer_counts_file,trie_file,mismatch_score,pam_score,header,sequence_field,cpf1 = arg_parser()
# data read in
data = sequence_file_read_in(in_file)
# sequence data extraction
sequence_data = sequence_data_extraction(data,header,sequence_field)
sequence_data = sequence_data.reshape(sequence_data.shape[0],1)
final_header = np.array(['sequence']).reshape(1,1)
# compute or load kmer dictionary object
kmer_dictionary = kmer_exact_occurrence_dictionary(kmer_counts_file)
kmer_dictionary_cursor = kmer_dictionary.cursor()
# load CFD scoring matrices
mm_scores, pam_scores = get_mm_pam_scores(mismatch_score, pam_score)
# load trie
tr = load_trie(trie_file)
# compute specificity score and mismatch neighborhoods
sequence_data,final_header = compute_specificity_score_and_mismatch_neighborhoods(sequence_data,final_header,
kmer_dictionary_cursor,tr,mm_scores,
pam_scores,cpf1)
# generate final feature arrays
final_feature_array = np.concatenate((final_header,sequence_data),0)
#final_feature_array_standardized = np.concatenate((final_header,sequence_data_standardized),0)
sys.stdout.write('final feature arrays generated\n')
# write output to csv
column_length = final_feature_array.shape[1]
np.savetxt('%s/raw_features_computed_%s.csv' % (outdir,in_file.split('/')[-1].split('.')[0]), final_feature_array,
fmt='%' + '%ss' % (column_length), delimiter=',')
#np.savetxt('%s/standarized_features_computed_%s.csv' % (outdir,in_file.split('/')[-1].split('.')[0]), final_feature_array_standardized,
# fmt='%' + '%ss' % (column_length), delimiter=',')
sys.stdout.write('final arrays written to csv\n%s\n' % ('%s/features_computed_%s.csv' % (outdir,in_file.split('/')[-1].split('.')[0])))
# close the kmer_dictionary db
kmer_dictionary.close()
# completion stdout
sys.stdout.write('feature generation for %s complete\n' % (in_file)) | 5,327,687 |
def sharedArray(dtype, dims):
"""Create a shared numpy array."""
mpArray = multiprocessing.Array(dtype, int(np.prod(dims)), lock=False)
return np.frombuffer(mpArray, dtype=dtype).reshape(dims) | 5,327,688 |
def _read_hyperparameters(idx, hist):
"""Read hyperparameters as a dictionary from the specified history dataset."""
return hist.iloc[idx, 2:].to_dict() | 5,327,689 |
def test_register_short_password(client, post_data, cleanup_dummy_user):
"""Register a user with too short a password"""
with fml_testing.mock_sends(
UserCreateV1({"msg": {"agent": "dummy", "user": "dummy"}})
):
post_data["register-password"] = post_data["register-password_confirm"] = "42"
result = client.post('/', data=post_data)
assert_redirects_with_flash(
result,
expected_url="/",
expected_message=(
'Your account has been created, but the password you chose does not comply '
'with the policy (Constraint violation: Password is too short) and has thus '
'been set as expired. You will be asked to change it after logging in.'
),
expected_category="warning",
) | 5,327,690 |
def row_annotation(name=None, fn_require=None):
"""
Function decorator for methods in a subclass of BaseMTSchema.
Allows the function to be treated like an row_annotation with annotation name and value.
@row_annotation()
def a(self):
return 'a_val'
@row_annotation(name='b', fn_require=a)
def b_1(self):
return 'b_val'
Will generate a mt with rows of {a: 'a_val', 'b': 'b_val'} if the function is called.
TODO: Consider changing fn_require to be a list of requirements.
When calling the function with annotation already set in the MT, the default behavior is to
skip unless an overwrite=True is passed into the call.
:param name: name in the final MT. If not provided, uses the function name.
:param fn_require: method name strings in class that are dependencies.
:return:
"""
def mt_prop_wrapper(func):
annotation_name = name or func.__name__
# fn_require checking, done when declared, not called.
if fn_require:
if not callable(fn_require):
raise ValueError('Schema: dependency %s is not of type function.' % fn_require)
if not hasattr(fn_require, 'mt_cls_meta'):
raise ValueError('Schema: dependency %s is not a row annotation method.' % fn_require.__name__)
@wraps(func)
def wrapper(self, *args, overwrite=False, **kwargs):
# Called already.
instance_metadata = self.mt_instance_meta['row_annotations'][wrapper.__name__]
if instance_metadata['annotated'] > 0:
return self
# MT already has annotation, so only continue if overwrite requested.
if annotation_name in self.mt.rows()._fields:
logger.warning('MT using schema class %s already has %s annotation.' % (self.__class__, annotation_name))
if not overwrite:
return self
logger.info('Overwriting matrix table annotation %s' % annotation_name)
if fn_require:
getattr(self, fn_require.__name__)()
try:
func_ret = func(self, *args, **kwargs)
# Do not annotate when RowAnnotationOmit raised.
except RowAnnotationOmit:
return self
annotation = {annotation_name: func_ret}
self.mt = self.mt.annotate_rows(**annotation)
instance_metadata['annotated'] += 1
instance_metadata['result'] = func_ret
return self
wrapper.mt_cls_meta = {
'annotated_name': annotation_name
}
return wrapper
return mt_prop_wrapper | 5,327,691 |
def _alert(sound=None, player=None):
"""Play an alert sound for the OS."""
if sound is None and Options.sound is not None:
sound = Options.sound
if player is None and Options.player is not None:
player = Options.player
if player is not None and sound is not None:
try:
if player == 'play':
subprocess.call([player, '-q', sound])
else:
subprocess.call([player, sound])
except Exception:
pass | 5,327,692 |
def test_parse_correctness(
format: str,
quoted: str,
unquoted: str,
) -> None:
"""
Quoted strings parse correctly
"""
if format in QUAD_FORMATS:
data = f'<example:Subject> <example:Predicate> "{quoted}" <example:Graph>.'
else:
data = f'<example:Subject> <example:Predicate> "{quoted}".'
graph = ConjunctiveGraph()
graph.parse(data=data, format=format)
objs = list(graph.objects())
assert len(objs) == 1
obj = objs[0]
assert isinstance(obj, Literal)
assert isinstance(obj.value, str)
assert obj.value == unquoted | 5,327,693 |
def create_user_table():
"""Creates user table"""
User.metadata.create_all(engine) | 5,327,694 |
async def test_turn_away_mode_on_cooling(hass, setup_comp_3):
"""Test the setting away mode when cooling."""
_setup_switch(hass, True)
_setup_sensor(hass, 25)
await hass.async_block_till_done()
await common.async_set_temperature(hass, 19)
await common.async_set_preset_mode(hass, PRESET_AWAY)
state = hass.states.get(ENTITY)
assert 30 == state.attributes.get("temperature") | 5,327,695 |
def path_exists_case_insensitive(path, root="/"):
"""
Checks if a `path` exists in given `root` directory, similar to
`os.path.exists` but case-insensitive. If there are multiple
case-insensitive matches, the first one is returned. If there is no match,
an empty string is returned.
:param str path: Relative path of item to find in the `root` directory.
:param str root: Directory where we will look for `path`.
:return: Absolute and case-sensitive path to search result on hard drive.
:rtype: str
"""
if not osp.isdir(root):
raise ValueError("'{0}' is not a directory.".format(root))
if path in ["", "/"]:
return root
path_list = path.lstrip(osp.sep).split(osp.sep)
path_list_lower = [x.lower() for x in path_list]
i = 0
local_paths = []
for root, dirs, files in os.walk(root):
for d in list(dirs):
if not d.lower() == path_list_lower[i]:
dirs.remove(d)
for f in list(files):
if not f.lower() == path_list_lower[i]:
files.remove(f)
local_paths = [osp.join(root, name) for name in dirs + files]
i += 1
if i == len(path_list_lower):
break
if len(local_paths) == 0:
return ''
else:
return local_paths[0] | 5,327,696 |
def parse_time(date_time, time_zone):
"""Returns the seconds between now and the scheduled time."""
now = pendulum.now(time_zone)
update = pendulum.parse(date_time, tz=time_zone)
# If a time zone is not specified, it will be set to local.
# When passing only time information the date will default to today.
# The time will be set to 00:00:00 if it's not specified.
# A future date is needed.
secs = update - now
if secs.seconds < 0:
raise ScheduleError(ScheduleError.pastDateError)
return secs.seconds | 5,327,697 |
def ls(request):
"""
List a directory on the server.
"""
dir = request.GET.get("dir", "")
root = os.path.relpath(os.path.join(
settings.MEDIA_ROOT,
settings.USER_FILES_PATH
))
fulldir = os.path.join(root, dir)
response = HttpResponse(mimetype="application/json")
simplejson.dump(entry_info(fulldir), response)
return response | 5,327,698 |
def test_ping():
"""
Builds the topology described on the following schema and ping h2 from h1.
::
+------+ +------+
| | +------+ +------+ | |
| h1 <-----> s1 <-----> s2 <-----> h2 |
| | +------+ +------+ | |
+------+ +------+
"""
# Setup which shell to use
shell = 'bash'
# Build topology
platform = DockerPlatform(None, None)
platform.pre_build()
h1 = Node(identifier='hs1', type='host')
h2 = Node(identifier='hs2', type='host')
s1 = Node(identifier='sw1', type='host')
s2 = Node(identifier='sw2', type='host')
hs1 = platform.add_node(h1)
hs2 = platform.add_node(h2)
sw1 = platform.add_node(s1)
sw2 = platform.add_node(s2)
s1p1 = BidirectionalPort(identifier='sw1-3')
s1p2 = BidirectionalPort(identifier='sw1-4')
platform.add_biport(s1, s1p1)
platform.add_biport(s1, s1p2)
s2p1 = BidirectionalPort(identifier='sw2-3')
s2p2 = BidirectionalPort(identifier='sw2-4')
platform.add_biport(s2, s2p1)
platform.add_biport(s2, s2p2)
h1p1 = BidirectionalPort(identifier='hs1-1')
h2p1 = BidirectionalPort(identifier='hs2-1')
platform.add_biport(h1, h1p1)
platform.add_biport(h2, h2p1)
link1 = BidirectionalLink(identifier='link1')
platform.add_bilink((s1, s1p1), (h1, h1p1), link1)
link2 = BidirectionalLink(identifier='link2')
platform.add_bilink((s1, s1p2), (s2, s2p1), link2)
link3 = BidirectionalLink(identifier='link3')
platform.add_bilink((s2, s2p2), (h2, h2p1), link3)
platform.post_build()
# Ping test
###########
# Configure IP and bring UP host 1 interfaces
hs1('ip link set dev hs1-1 up', shell=shell)
hs1('ip addr add 10.0.10.1/24 dev hs1-1', shell=shell)
# Configure IP and bring UP host 2 interfaces
hs2('ip link set dev hs2-1 up', shell=shell)
hs2('ip addr add 10.0.30.1/24 dev hs2-1', shell=shell)
# Configure IP and bring UP switch 1 interfaces
sw1('ip link set dev sw1-3 up', shell=shell)
sw1('ip link set dev sw1-4 up', shell=shell)
sw1('ip addr add 10.0.10.2/24 dev sw1-3', shell=shell)
sw1('ip addr add 10.0.20.1/24 dev sw1-4', shell=shell)
# Configure IP and bring UP switch 2 interfaces
sw2('ip link set dev sw2-3 up', shell=shell)
sw2('ip addr add 10.0.20.2/24 dev sw2-3', shell=shell)
sw2('ip link set dev sw2-4 up', shell=shell)
sw2('ip addr add 10.0.30.2/24 dev sw2-4', shell=shell)
# Set static routes in switches
sw1('ip route add 10.0.30.0/24 via 10.0.20.2', shell=shell)
sw2('ip route add 10.0.10.0/24 via 10.0.20.1', shell=shell)
# Set gateway in hosts
hs1('ip route add default via 10.0.10.2', shell=shell)
hs2('ip route add default via 10.0.30.2', shell=shell)
ping_result = hs1('ping -c 1 10.0.30.1', shell=shell)
platform.destroy()
assert '1 packets transmitted, 1 received' in ping_result | 5,327,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.