content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from datetime import datetime
def _event_last_observed(event: EventsV1Event) -> datetime.datetime:
"""
Returns the last time an event was observed
"""
if event.series:
series_data: EventsV1EventSeries = event.series
return series_data.last_observed_time
if event.event_time:
return event.event_time
# Fall back to event creation
return event.metadata.creation_timestamp | c8a26c067df0375923b3aa57afaa48f5c5fc0cf8 | 29,900 |
import numpy
def bytes(length):
"""Returns random bytes.
.. seealso:: :func:`numpy.random.bytes`
"""
return numpy.bytes(length) | 44055f168dbd9e6b2e2481f8d57b9edef0110430 | 29,901 |
def init_json():
"""
This function init the JSON dict.
Return : Dictionnary
"""
data_json = {}
data_json['login'] = ""
data_json['hash'] = ""
data_json['duration'] = 0
data_json['nbFiles'] = 0
data_json['nbVirus'] = 0
data_json['nbErrors'] = 0
data_json['uuidUsb'] = ""
data_json['viruses'] = []
return data_json | 9411f3a525df9e68f53fba94da679bd8d5b34013 | 29,902 |
def django_popup_view_field_javascript():
"""
Return HTML for django_popup_view_field JavaScript.
Adjust url in settings.
**Tag name**::
django_popup_view_field_javascript
**Usage**::
{% django_popup_view_field_javascript %}
"""
temp = loader.get_template('django_popup_view_field/scripts_include.html')
return temp.render({}) | f2cf0631139ade2044aa577f906b4b8568b33713 | 29,903 |
import re
import os
def get_version(verbose=0):
""" Extract version information from source code """
matcher = re.compile('[\t ]*#define[\t ]+OPENQL_VERSION_STRING[\t ]+"(.*)"')
version = None
with open(os.path.join(inc_dir, 'ql', 'version.h'), 'r') as f:
for ln in f:
m = matcher.match(ln)
if m:
version = m.group(1)
break
if verbose:
print('get_version: %s' % version)
return version | c80ee3543973879e11f47a178394b7c23dea501f | 29,904 |
def apply_filters(row):
"""Applies filters to the input data and returns transformed row."""
return {k: COLUMN_FILTERS.get(k, lambda x: x)(v) for k,v in row.items()} | 2f31dda70bedc35f8b1b66e5906c38c5d3202e2b | 29,905 |
def save_convergence_statistics(
inputs, results, dmf=None, display=True, json_path=None, report_path=None
):
""" """
s = Stats(inputs, results)
if display:
s.report()
if report_path:
with open(report_path, "w") as f:
s.report(f)
if json_path is not None:
with open(json_path, "w") as f:
s.to_json(f)
if dmf is not None:
s.to_dmf(dmf)
return s | e06891c917abc3678e8fbcf0ff81cae46ac2e04a | 29,906 |
import pkgutil
import platform
import sklearn
from importlib import import_module
from operator import itemgetter
from sklearn.utils.testing import ignore_warnings
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
RegressorMixin,
TransformerMixin,
ClusterMixin,
)
def _backported_all_estimators(type_filter=None):
"""
Backported from scikit-learn 0.23.2:
https://github.com/scikit-learn/scikit-learn/blob/0.23.2/sklearn/utils/__init__.py#L1146
Use this backported `all_estimators` in old versions of sklearn because:
1. An inferior version of `all_estimators` that old versions of sklearn use for testing,
might function differently from a newer version.
2. This backported `all_estimators` works on old versions of sklearn that don’t even define
the testing utility variant of `all_estimators`.
========== original docstring ==========
Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
# lazy import to avoid circular imports from sklearn.base
# pylint: disable=no-name-in-module, import-error
IS_PYPY = platform.python_implementation() == "PyPy"
def is_abstract(c):
if not (hasattr(c, "__abstractmethods__")):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
modules_to_ignore = {"tests", "externals", "setup", "conftest"}
root = sklearn.__path__[0] # sklearn package
# Ignore deprecation warnings triggered at import time and from walking
# packages
with ignore_warnings(category=FutureWarning):
for _, modname, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
mod_parts = modname.split(".")
if any(part in modules_to_ignore for part in mod_parts) or "._" in modname:
continue
module = import_module(modname)
classes = inspect.getmembers(module, inspect.isclass)
classes = [(name, est_cls) for name, est_cls in classes if not name.startswith("_")]
# TODO: Remove when FeatureHasher is implemented in PYPY
# Skips FeatureHasher for PYPY
if IS_PYPY and "feature_extraction" in modname:
classes = [(name, est_cls) for name, est_cls in classes if name == "FeatureHasher"]
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [
c for c in all_classes if (issubclass(c[1], BaseEstimator) and c[0] != "BaseEstimator")
]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {
"classifier": ClassifierMixin,
"regressor": RegressorMixin,
"transformer": TransformerMixin,
"cluster": ClusterMixin,
}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError(
"Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or "
"None, got"
" %s." % repr(type_filter)
)
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(estimators), key=itemgetter(0)) | b0171c3e820fc4e686882f3f0d2a10f5b8ccf4c6 | 29,907 |
def max_size(resize_info):
"""
リサイズ情報から結合先として必要な画像サイズを計算して返す
:param resize_info: リサイズ情報
:return: width, height
"""
max_w, max_h = 0, 0
for name, info in resize_info.items():
pos = info['pos']
size = info['size']
max_w = max(max_w, pos[0] + size[0])
max_h = max(max_h, pos[1] + size[1])
return max_w, max_h | 1e28f993b3b0fac077f234b6388a2d9042396f6b | 29,908 |
def get_thru(path, specs=range(10), cameras='brz'):
"""Calculate the throughput in each camera for a single exposure.
See https://github.com/desihub/desispec/blob/master/bin/desi_average_flux_calibration
and DESI-6043.
The result includes the instrument throughput as well as the fiber acceptance
loss and atmospheric extinction.
"""
calibs = {c:CoAdd(c) for c in cameras}
exptime = None
primary_area = 8.659e4 # cm2
for (FCAL,), camera, spec in iterspecs(path, 'fluxcalib'):
if exptime is None:
hdr = FCAL[0].read_header()
exptime = hdr['EXPTIME']
else:
if FCAL[0].read_header()['EXPTIME'] != exptime:
raise RuntimeError(f'EXPTIME mismatch for fluxcalib in {path}')
fluxcalib, ivar = FCAL['FLUXCALIB'].read(), FCAL['IVAR'].read()
calibs[camera] += Spectrum(camera, np.median(fluxcalib, axis=0), np.median(ivar, axis=0))
for camera in cameras:
# Convert from (1e17 elec cm2 s / erg) to (elec/phot)
calibs[camera] /= (M1_area * exptime) / (1e17 * erg_per_photon[cslice[camera]])
return calibs | ec123a4f8843fcd5eb4aef87bf77b936687c544d | 29,909 |
def quicksort(lyst):
"""This is a quicksort
"""
def partition_helper(lyst, first, last):
pivot = lyst[first]
left = (first + 1)
right = last
done = False
while not done:
while left <= right and lyst[left] <= pivot:
left += 1
while right >= left and lyst[right] >= pivot:
right -= 1
if right < left:
done = True
else:
lyst[left], lyst[right] = lyst[right], lyst[left]
lyst[first], lyst[right] = lyst[right], lyst[first]
return right
def quicksort_helper(lyst, first, last):
if first < last:
splitpoint = partition_helper(lyst, first, last)
quicksort_helper(lyst, first, (splitpoint-1))
quicksort_helper(lyst, (splitpoint+1), last)
return lyst
quicksort_helper(lyst, 0, (len(lyst)-1))
return lyst | 33385c01b877a86a2970f33dc4d0bd9d456dc983 | 29,910 |
def status_parameter_error():
"""Returns the value returned by the function calls to the library in case of parameter error. """
r = call_c_function(petlink32_c.status_parameter_error, [{'name': 'return_value', 'type': 'int', 'value': None}])
return r.return_value | fbbdcd85fde27f200c1c749e03234baa8d725f1d | 29,911 |
def df_to_dict(df: DataFrame) -> list[dict]:
"""DataFrame 转 dict"""
# 拿到表头,转换为字典结构
head_list = list(df.columns)
list_dic = []
for i in df.values:
a_line = dict(zip(head_list, i))
list_dic.append(a_line)
return list_dic | c52e628a78e2bd863a4a9926e739bff53195910a | 29,912 |
def parse_describeprocess(html_response):
"""Parse WPS DescribeProcess response.
Parameters
----------
html_response : string
xml document from a DescribeProcess WPS request.
Returns
-------
out : list of dict
'identifier' : ProcessDescription -> ows:Identifier
'inputs' : ProcessDescription -> DataInputs -> Input -> ows:Identifier
'ouputs' : ProcessDescription -> ProcessOutputs -> Output ->
ows:Identifier
"""
# XML structure:
# wps:ProcessDescriptions
# ProcessDescription (list)
# ows:Identifier (text)
# ows:Title (text)
# DataInputs (optional)
# Input (list)
# ows:Identifier (text)
# ows:Title (text)
# LiteralData (xor)
# ows:DataType
# [...]
# ProcessOutputs
# Output (list)
# ows:Identifier (text)
# ows:Title (text)
# LiteralOutput (xor)
# ows:DataType
# [...]
processes = []
process_descriptions = xml_children_as_dict(
etree.fromstring(html_response))
for process_description_el in process_descriptions['ProcessDescription']:
d = {'inputs': [], 'outputs': []}
process_description = xml_children_as_dict(process_description_el)
d['identifier'] = process_description['ows:Identifier'][0].text
if 'DataInputs' in process_description:
data_inputs = xml_children_as_dict(
process_description['DataInputs'][0])
for input_element in data_inputs['Input']:
input1 = xml_children_as_dict(input_element)
d['inputs'].append(input1['ows:Identifier'][0].text)
process_outputs = xml_children_as_dict(
process_description['ProcessOutputs'][0])
for output_element in process_outputs['Output']:
output1 = xml_children_as_dict(output_element)
d['outputs'].append(output1['ows:Identifier'][0].text)
processes.append(d)
return processes | 80f7e866424ffa8cbdae2a94d31f6044722648c6 | 29,913 |
def variational_implicit_step(system, dt, p, x, z, t):
"""
For Lagrangian functions of the form
L(j) = 1/(2h^2) (x_{j+1} - x_j)^2 - 1/2 (V(x_j) + V(x_{j+1})) - 1/2 (F(z_j) + F(z_{j+1}))
"""
tnew = t + dt
xnew = (
x + (dt - 0.5 * dt ** 2 * system.Fz(z, t)) * p - 0.5 * dt ** 2 * system.Vq(x, t)
)
(znew,) = fsolve(
lambda znew: z
- znew
+ 0.5 * dt * np.linalg.norm((xnew - x) / dt) ** 2
- 0.5
* dt
* (
system.V(x, t)
+ system.V(xnew, tnew)
+ system.F(z, t)
+ system.F(znew, tnew)
),
[z],
)
pnew = (
(1.0 - 0.5 * dt * system.Fz(z, t)) * p
- 0.5 * dt * (system.Vq(x, t) + system.Vq(xnew, tnew))
) / (1.0 + 0.5 * dt * system.Fz(znew, tnew))
return pnew, xnew, znew, tnew | c32e13ae37873983a3a88b91e7f0bfdf7ba9d043 | 29,914 |
def parse_regex(re):
"""Parse binary form for regular expression into canonical string.
The input binary format is the one stored in the sandbox profile
file. The out format is a canonical regular expression string using
standard ASCII characters and metacharacters such as ^, $, +, *, etc.
"""
regex_list = create_regex_list(re)
g = Graph()
g.fill_from_regex_list(regex_list)
g.reduce()
g.convert_to_canonical()
g.simplify()
g.combine_start_end_nodes()
logger.debug(g)
return g.regex
#return [ g.unified_regex ] | 595ff793e4e5b4f1eeaf0c60d0db631565fea078 | 29,915 |
def _validate_time_mode(mode, **kwargs):
"""Validate time mode."""
return mode | e30fd9071bde102b4986fe9ef846a812f7c08ff7 | 29,916 |
def flatten() -> GraphBuilder:
"""
dl.flatten layer builder
"""
def graph_builder(prev_layer: Metadata) -> Metadata:
metadata = {}
init_regularized_nodes(metadata, prev_layer)
graph = prev_layer['graph']
metadata['units'] = (np.prod(prev_layer['units']),)
metadata['graph'] = rc.flatten(graph)
return metadata
return graph_builder | bf00faa00f5059c33887acafb0665ae37dc970e8 | 29,917 |
def _important() -> str:
"""Returns a query term matching messages that are important."""
return 'is:important' | dced06645f5311b321d42cd3892627df5b30faec | 29,918 |
async def root():
""" Dependency is "static". Value of Depends doesn't get passed into function
we still get redirected half the time though """
return {"message": "Hello World"} | 6d3b634444240275f56d30aa0c1fe3b3bb84ce24 | 29,919 |
from typing import Hashable
import encodings
def line_width(et: pd.DataFrame, lw_by: Hashable):
"""Default edge line width function."""
if lw_by is not None:
return encodings.data_linewidth(et[lw_by], et[lw_by])
return pd.Series([1] * len(et), name="lw") | 064f90d4974f64d9be99090c77cf24d30a34a9f0 | 29,920 |
def run_state_machine(ctx, callback):
"""Run the libmongocrypt state machine until completion.
:Parameters:
- `ctx`: A :class:`MongoCryptContext`.
- `callback`: A :class:`MongoCryptCallback`.
:Returns:
The completed libmongocrypt operation.
"""
while True:
state = ctx.state
# Check for terminal states first.
if state == lib.MONGOCRYPT_CTX_ERROR:
ctx._raise_from_status()
elif state == lib.MONGOCRYPT_CTX_READY:
return ctx.finish()
elif state == lib.MONGOCRYPT_CTX_DONE:
return None
if state == lib.MONGOCRYPT_CTX_NEED_MONGO_COLLINFO:
list_colls_filter = ctx.mongo_operation()
coll_info = callback.collection_info(
ctx.database, list_colls_filter)
if coll_info:
ctx.add_mongo_operation_result(coll_info)
ctx.complete_mongo_operation()
elif state == lib.MONGOCRYPT_CTX_NEED_MONGO_MARKINGS:
mongocryptd_cmd = ctx.mongo_operation()
result = callback.mark_command(ctx.database, mongocryptd_cmd)
ctx.add_mongo_operation_result(result)
ctx.complete_mongo_operation()
elif state == lib.MONGOCRYPT_CTX_NEED_MONGO_KEYS:
key_filter = ctx.mongo_operation()
for key in callback.fetch_keys(key_filter):
ctx.add_mongo_operation_result(key)
ctx.complete_mongo_operation()
elif state == lib.MONGOCRYPT_CTX_NEED_KMS:
for kms_ctx in ctx.kms_contexts():
with kms_ctx:
callback.kms_request(kms_ctx)
ctx.complete_kms()
else:
raise MongoCryptError('unknown state: %r' % (state,)) | 37da937db46bea8e7952e72753ab27543215f8fe | 29,921 |
def remove_na_arraylike(arr):
"""
Return array-like containing only true/non-NaN values, possibly empty.
"""
if is_extension_array_dtype(arr):
return arr[notna(arr)]
else:
return arr[notna(lib.values_from_object(arr))] | e89d3218d053852ddbc553223d035c71615f7c21 | 29,922 |
def conv_nested(image, kernel):
"""A naive implementation of convolution filter.
This is a naive implementation of convolution using 4 nested for-loops.
This function computes convolution of an image with a kernel and outputs
the result that has the same shape as the input image.
Args:
image: numpy array of shape (Hi, Wi).
kernel: numpy array of shape (Hk, Wk).
Returns:
out: numpy array of shape (Hi, Wi).
"""
Hi, Wi = image.shape
Hk, Wk = kernel.shape
out = np.zeros((Hi, Wi))
### YOUR CODE HERE
x_h = int(Hk / 2)
y_h = int(Wk / 2)
for hi in range(Hi):
for wi in range(Wi):
for x in range(Hk):
for y in range(Wk):
v_h = hi - x_h + x
v_w = wi - y_h + y
if v_h >= 0 and v_h < Hi and v_w >= 0 and v_w < Wi:
out[hi, wi] = out[hi, wi] + image[v_h, v_w] * kernel[Hk - x - 1, Wk - y -1]
### END YOUR CODE
return out | acfa5f275bc15a39357390ac356f7ee681dbf31a | 29,923 |
def getLatest(df):
"""
This get the data of the last day from the dataframe and append it to the details
"""
df_info = df.iloc[:,0:5]
df_last = df.iloc[:,-1]
df_info['latest'] = df_last
return df_info | f42cae0552a4ac791d3499fa2ca1417a80a970ac | 29,924 |
import time
def setup_camera(is_fullscreen = True):
"""
Setup the PiCam to default PSVD settings, and return the camera as
an object.
Keyword Arguments:
is_fullscreen -- Boolean value. True for fullscreen, false for window.
"""
# ensure that camera is correctly installed and set it up to output to a
# window and turn off AWB and exposure modes. If camera does not exist
# print error message and quit program.
camera = picamera.PiCamera()
camera.resolution = s.PICTURE_RESOLUTION
camera.preview_fullscreen = is_fullscreen
camera.awb_mode = "off"
#camera.exposure_mode = "off"
if not is_fullscreen: camera.preview_window = s.CAMERA_WINDOW_SIZE
time.sleep(s.WAKEUP_DELAY) # camera wake-up time: 2 s
return camera | 301d046541c0463e8e3ab58fe429a3c47cbd960e | 29,925 |
def CalculateGearyAutoMutability(ProteinSequence):
"""
####################################################################################
Calculte the GearyAuto Autocorrelation descriptors based on
Mutability.
Usage:
result=CalculateGearyAutoMutability(protein)
Input: protein is a pure protein sequence.
Output: result is a dict form containing 30 Geary Autocorrelation
descriptors based on Mutability.
####################################################################################
"""
result=CalculateEachGearyAuto(ProteinSequence,_Mutability,'_Mutability')
return result | a9a7f92d742736f7a66c8bdc06980f393922ea4a | 29,926 |
import torch
def get_mask_from_lengths_window_and_time_step(lengths, attention_window_size,
time_step):
"""
One for mask and 0 for not mask
Args:
lengths:
attention_window_size:
time_step: zero-indexed
Returns:
"""
# Mask all initially.
max_len = torch.max(lengths).item()
B = len(lengths)
mask = torch.cuda.BoolTensor(B, max_len)
mask[:] = 1
for ii in range(B):
# Note that the current code actually have a minor side effect,
# where the utterances that are shorter than the longest one will
# still have their actual last time step unmasked when the decoding
# passes beyond that time step. I keep this bug here simply because
# it will prevent numeric errors when computing the attention weights.
max_idx = lengths[ii] - 1
# >=0, <= the actual sequence end idx (length-1) (not covered here)
start_idx = min([max([0, time_step-attention_window_size]), max_idx])
# <=length-1
end_idx = min([time_step+attention_window_size, max_idx])
if start_idx > end_idx:
continue
mask[ii, start_idx:(end_idx+1)] = 0
return mask | a0c1fc6c273bd4fb10871c7fbf130e0113a65a71 | 29,927 |
def hamming_distance(a, b):
"""
Returns the hamming distance between sequence a and b. Sequences must be 1D and have the same length.
"""
return np.count_nonzero(a != b) | fe895c87867999159c57f23f96eab9d7b41edb8e | 29,928 |
from typing import Dict
from typing import Union
from typing import List
def optimizer_to_map(vertices, optimizer: g2o.SparseOptimizer, is_sparse_bundle_adjustment=False) -> \
Dict[str, Union[List, np.ndarray]]:
"""Convert a :class: g2o.SparseOptimizer to a dictionary containing locations of the phone, tags, and waypoints.
Args:
vertices: A dictionary of vertices. This is used to lookup the type of vertex pulled from the optimizer.
optimizer: a :class: g2o.SparseOptimizer containing a map.
is_sparse_bundle_adjustment: True if the optimizer is based on sparse bundle adjustment and False otherwise.
Returns:
A dictionary with fields 'locations', 'tags', and 'waypoints'. The 'locations' key covers a (n, 8) array
containing x, y, z, qx, qy, qz, qw locations of the phone as well as the vertex uid at n points. The 'tags' and
'waypoints' keys cover the locations of the tags and waypoints in the same format.
"""
locations = np.reshape([], [0, 9])
tagpoints = np.reshape([], [0, 3])
tags = np.reshape([], [0, 8])
waypoints = np.reshape([], [0, 8])
waypoint_metadata = []
exaggerate_tag_corners = True
for i in optimizer.vertices():
mode = vertices[i].mode
if mode == VertexType.TAGPOINT:
tag_vert = find_connected_tag_vert(optimizer, optimizer.vertex(i))
if tag_vert is None:
# TODO: double-check that the right way to handle this case is to continue
continue
location = optimizer.vertex(i).estimate()
if exaggerate_tag_corners:
location = location * np.array([10, 10, 1])
tagpoints = np.vstack((tagpoints, tag_vert.estimate().inverse() * location))
else:
location = optimizer.vertex(i).estimate().translation()
rotation = optimizer.vertex(i).estimate().rotation().coeffs()
if mode == VertexType.ODOMETRY:
pose = np.concatenate([location, rotation, [i], [vertices[i].meta_data['pose_id']]])
locations = np.vstack([locations, pose])
elif mode == VertexType.TAG:
pose = np.concatenate([location, rotation, [i]])
if is_sparse_bundle_adjustment:
# adjusts tag based on the position of the tag center
pose[:-1] = (SE3Quat([0, 0, 1, 0, 0, 0, 1]).inverse() * SE3Quat(vertices[i].estimate)).to_vector()
if 'tag_id' in vertices[i].meta_data:
pose[-1] = vertices[i].meta_data['tag_id']
tags = np.vstack([tags, pose])
elif mode == VertexType.WAYPOINT:
pose = np.concatenate([location, rotation, [i]])
waypoints = np.vstack([waypoints, pose])
waypoint_metadata.append(vertices[i].meta_data)
# convert to array for sorting
locations = np.array(locations)
locations = locations[locations[:, -1].argsort()]
return {'locations': locations, 'tags': np.array(tags), 'tagpoints': tagpoints,
'waypoints': [waypoint_metadata, np.array(waypoints)]} | 6931a115fab8bb65834936b972d9fa51229458c9 | 29,929 |
def generate_url(mbid, level):
"""Generates AcousticBrainz end point url for given MBID.
"""
return ACOUSTIC_BASE + mbid + level | 96fe05dc3274730196dbc764c3e8f58f32b81a5f | 29,930 |
from typing import Sequence
from typing import List
def averaged_knots_unconstrained(n: int, p: int, t: Sequence[float]) -> List[
float]:
"""
Returns an averaged knot vector from parametrization vector `t` for an unconstrained B-spline.
Args:
n: count of control points - 1
p: degree
t: parametrization vector, normalized [0..1]
"""
assert t[0] == 0.0
assert t[-1] == 1.0
knots = [0.0] * (p + 1)
knots.extend(sum(t[j: j + p]) / p for j in range(1, n - p + 1))
if knots[-1] > 1.0:
raise ValueError('Normalized [0..1] values required')
knots.extend([1.0] * (p + 1))
return knots | 6da79c699a3420270efc938a6f0de659d4886060 | 29,931 |
def converge_launch_stack(desired_state, stacks):
"""
Create steps that indicate how to transition from the state provided
by the given parameters to the :obj:`DesiredStackGroupState` described by
``desired_state``.
See note [Converging stacks] for more information.
:param DesiredStackGroupState desired_state: The desired group state.
:param set stacks: a set of :obj:`HeatStack` instances.
This must only contain stacks that are being managed for the specified
group.
:rtype: :obj:`pbag` of `IStep`
"""
config = desired_state.stack_config
by_state = groupby(lambda stack: stack.get_state(), stacks)
stacks_complete = by_state.get(StackState.CREATE_UPDATE_COMPLETE, [])
stacks_failed = by_state.get(StackState.CREATE_UPDATE_FAILED, [])
stacks_check_complete = by_state.get(StackState.CHECK_COMPLETE, [])
stacks_check_failed = by_state.get(StackState.CHECK_FAILED, [])
stacks_in_progress = by_state.get(StackState.IN_PROGRESS, [])
stacks_delete_in_progress = by_state.get(StackState.DELETE_IN_PROGRESS, [])
stacks_delete_failed = by_state.get(StackState.DELETE_FAILED, [])
stacks_good = stacks_complete + stacks_check_complete
stacks_amiss = (stacks_failed +
stacks_check_failed +
stacks_in_progress +
stacks_delete_in_progress)
if stacks_delete_failed:
reasons = [ErrorReason.String("Stacks in DELETE_FAILED found.")]
return pbag([FailConvergence(reasons)])
# If there are no stacks in CHECK_* or other work to be done, we assume
# we're at the beginning of a convergence cycle and need to perform stack
# checks.
if stacks_complete and not (stacks_check_complete or stacks_amiss):
return pbag([CheckStack(stack) for stack in stacks_complete])
# Otherwise, if all stacks are in a good state and we have the right number
# of stacks, we call update on the stacks in CHECK_COMPLETE and return
# SUCCESS without waiting for it to finish (calling update on a stack in
# CREATE_COMPLETE is essentially a no-op) so that there will be no stacks
# in CREATE_* the next time otter tries to converge this group. This will
# cause all of the stacks to be checked at that time and let otter know
# if there are any stacks that have fallen into an error state.
elif not stacks_amiss and len(stacks_good) == desired_state.capacity:
return pbag([UpdateStack(stack=stack, stack_config=config, retry=False)
for stack in stacks_check_complete])
def get_create_steps():
create_stack = CreateStack(stack_config=config)
good_or_fixable_stack_count = (len(stacks_good) +
len(stacks_in_progress) +
len(stacks_check_failed))
return [create_stack] * (desired_state.capacity -
good_or_fixable_stack_count)
def get_scale_down_steps():
stacks_in_preferred_order = (
stacks_good + stacks_in_progress + stacks_check_failed)
unneeded_stacks = stacks_in_preferred_order[desired_state.capacity:]
return map(DeleteStack, unneeded_stacks)
def get_fix_steps(scale_down_steps):
num_stacks_to_update = len(stacks_check_failed) - len(scale_down_steps)
stacks_to_update = (stacks_check_failed[:num_stacks_to_update]
if num_stacks_to_update > 0 else [])
return [UpdateStack(stack=s, stack_config=config)
for s in stacks_to_update]
create_steps = get_create_steps()
scale_down_steps = get_scale_down_steps()
fix_steps = get_fix_steps(scale_down_steps)
delete_stacks_failed_steps = map(DeleteStack, stacks_failed)
converge_later = (
[ConvergeLater([ErrorReason.String("Waiting for stacks to finish.")])]
if stacks_delete_in_progress or stacks_in_progress
else [])
return pbag(create_steps +
fix_steps +
scale_down_steps +
delete_stacks_failed_steps +
converge_later) | 3f615b38d3e303a63873f3dc8ed2d3699460f3b8 | 29,932 |
import os
def dir_content(path):
"""
returns the content of given path, excluding unreadable files
and dotfiles (unless SHOW_ALL is True)
"""
ret = []
for item in listdir(path):
full_path = join_dirs(path, item)
if os.access(full_path, os.R_OK) and (SHOW_ALL or item[0] != '.'):
ret.append(full_path)
return ret | e8c56e64c08dced954e0e0a92cc16939f883e942 | 29,933 |
def strip_long_text(text, max_len, append=u'…'):
"""Returns text which len is less or equal max_len.
If text is stripped, then `append` is added,
but resulting text will have `max_len` length anyway.
"""
if len(text) < max_len - 1:
return text
return text[:max_len - len(append)] + append | 02ce128f1de1dbeb2a2dcef5bc2b6eb8745322d3 | 29,934 |
import typing
import numpy
def function_rescale(data_and_metadata_in: _DataAndMetadataLike,
data_range: typing.Optional[DataRangeType] = None,
in_range: typing.Optional[DataRangeType] = None) -> DataAndMetadata.DataAndMetadata:
"""Rescale data and update intensity calibration.
rescale(a, (0.0, 1.0))
"""
data_and_metadata = DataAndMetadata.promote_ndarray(data_and_metadata_in)
if not Image.is_data_valid(data_and_metadata.data):
raise ValueError("Rescale: invalid data")
used_data_range = data_range if data_range is not None else (0.0, 1.0)
def calculate_data() -> _ImageDataType:
data = data_and_metadata.data
assert data is not None
data_ptp = numpy.ptp(data) if in_range is None else in_range[1] - in_range[0]
data_ptp_i = 1.0 / data_ptp if data_ptp != 0.0 else 1.0
if in_range is not None:
data_min = in_range[0]
else:
data_min = numpy.amin(data)
data_span = used_data_range[1] - used_data_range[0]
if data_span == 1.0 and used_data_range[0] == 0.0:
return typing.cast(_ImageDataType, (data - data_min) * data_ptp_i)
else:
m = data_span * data_ptp_i
return typing.cast(_ImageDataType, (data - data_min) * m + used_data_range[0])
intensity_calibration = Calibration.Calibration()
return DataAndMetadata.new_data_and_metadata(calculate_data(), intensity_calibration=intensity_calibration, dimensional_calibrations=data_and_metadata.dimensional_calibrations) | e312ee866c142b6d2166c450831ce5308d263aaa | 29,935 |
def svn_diff_parse_next_patch(*args):
"""
svn_diff_parse_next_patch(svn_patch_file_t patch_file, svn_boolean_t reverse,
svn_boolean_t ignore_whitespace, apr_pool_t result_pool,
apr_pool_t scratch_pool) -> svn_error_t
"""
return _diff.svn_diff_parse_next_patch(*args) | 49c2797c2798a0870cbc68e6ffe917d353cf41bb | 29,936 |
def Sub(inputs, **kwargs):
"""Calculate A - B.
Parameters
----------
inputs : list of Tensor
The inputs, represent A and B respectively.
Returns
-------
Tensor
The output tensor.
"""
CheckInputs(inputs, 2)
arguments = ParseArguments(locals())
output = Tensor.CreateOperator(nout=1, op_type='Add', **arguments)
if inputs[0].shape is not None:
output.shape = inputs[0].shape[:]
return output | 9da04c8898bb58db0db49eb46bb69b3591f6c99d | 29,937 |
def psf_1d(x, *p):
"""[summary]
Arguments:
x {[type]} -- [description]
Returns:
[type] -- [description]
"""
A, x0, alpha, C = p
r = (x - x0) * alpha
y = np.zeros(r.shape)
y[r!=0] = A * (2 * special.j1(r[r!=0]) / r[r!=0])**2
y[r==0] = A
return y + C | 06b5d6a56db19ebc7d43b1d4a95805559e549273 | 29,938 |
import functools
import time
def with_retries(func):
"""Wrapper for _http_request adding support for retries."""
@functools.wraps(func)
def wrapper(self, url, method, **kwargs):
if self.conflict_max_retries is None:
self.conflict_max_retries = DEFAULT_MAX_RETRIES
if self.conflict_retry_interval is None:
self.conflict_retry_interval = DEFAULT_RETRY_INTERVAL
num_attempts = self.conflict_max_retries + 1
for attempt in range(1, num_attempts + 1):
try:
return func(self, url, method, **kwargs)
except _RETRY_EXCEPTIONS as error:
msg = ("Error contacting Ironic server: %(error)s. "
"Attempt %(attempt)d of %(total)d" %
{'attempt': attempt,
'total': num_attempts,
'error': error})
if attempt == num_attempts:
LOG.error(msg)
raise
else:
LOG.debug(msg)
time.sleep(self.conflict_retry_interval)
return wrapper | bb5b225f2eccf75d71d3cfb67cb1d5517e66e086 | 29,939 |
import os
import json
def load_json(path):
"""Load JSON file into Python object."""
with open(os.path.expanduser(path), "r") as file_data:
data = json.load(file_data)
return data | e71c3dbc040a5425dd16ed35267dc9fa681b3322 | 29,940 |
from datetime import datetime
import time
def arrow_format(jinja_ctx, context, *args, **kw):
"""Format datetime using Arrow formatter string.
Context must be a time/datetime object.
:term:`Arrow` is a Python helper library for parsing and formatting datetimes.
Example:
.. code-block:: html+jinja
<li>
Offer created at {{ offer.created_at|arrow_format('YYYYMMDDHHMMss') }}
</li>
`See Arrow formatting <http://crsmithdev.com/arrow/>`__.
"""
assert len(args) == 1, "We take exactly one formatter argument, got {}".format(args)
assert isinstance(context, (datetime.datetime, datetime.time)), "Got context {}".format(context)
return time.arrow_format(dt=context, dt_format=args[0]) | 739a1b97499a614dfabe9321ccd18126d1ebcad9 | 29,941 |
def jsonify_query_result(conn, query):
"""deprecated"""
res = query.all()
#res = conn.execute(query)
#return [dict(r) for r in res]
return [r._asdict() for r in res] | ca11226c6f6fc731089f1d257db02a6cb83bd145 | 29,942 |
def _postfix_queue(token_expr):
"""
Form postfix queue from tokenized expression using shunting-yard algorithm.
If expression have function, then presence of arguments for that function
added before function token.
If function have few arguments then RPN algorithm will pop them from stack
until comma will be top token on stack
:return: queue of tokens ready for reverse polish calculation
"""
stack = deque()
queue = deque()
have_args = deque()
for token in token_expr:
if token.type in {'FLOAT', 'INTEGER', 'CONST', 'COMPLEX'}:
queue.append(token)
elif token.type == 'FUNC':
stack.append(token)
# If function have no arguments we append False before FUNC
if token_expr[token.index + 1].type == 'RPARENT':
have_args.append(False)
else:
have_args.append(True)
elif not stack:
stack.append(token)
elif token.type == 'COMMA':
while stack[-1].type != 'FUNC':
queue.append(stack.pop())
queue.append(token)
elif token.type == 'LPARENT':
stack.append(token)
elif token.type == 'RPARENT':
while stack[-1].type not in {'LPARENT', 'FUNC'}:
queue.append(stack.pop())
if not stack:
raise ArithmeticError("Parentheses error")
if stack[-1].type == 'FUNC':
queue.append(_Token('', 'ARGS', have_args.pop()))
queue.append(stack.pop())
else:
stack.pop()
elif token.type in {'UMINUS', 'UPLUS'} and stack[-1].type == 'POWER':
# From Python docs: The power operator binds more tightly
# than unary operators on its left;
# it binds less tightly than unary operators on its right.
stack.append(token)
elif token.precedence == stack[-1].precedence and \
token.type in {'POWER', 'UMINUS', 'UPLUS'}:
# Right-to-Left association operations
stack.append(token)
elif token.precedence <= stack[-1].precedence:
while stack:
if token.precedence <= stack[-1].precedence:
queue.append(stack.pop())
continue
else:
break
stack.append(token)
else:
stack.append(token)
while stack:
queue.append(stack.pop())
return queue | 777c87bf1c4ac123f65e4061c1e5e72f5abe90d9 | 29,943 |
def get_from_list_to_examples(task_proc):
"""
Return a function that converts 2d list (from csv) into example list
This can be different between DataProcessors
"""
if isinstance(task_proc, DefaultProcessor):
return lambda l: task_proc._create_examples(l, "test")
else:
raise NotImplementedError('from_list_to_examples for %s is required '%(type(FLAGS.task_proc))) | e42d050074b9f83127cca95261d5b1300a5b453a | 29,944 |
from pathlib import Path
import yaml
def read_rules(rule_file=None):
"""Read rule from rule yaml file.
Args:
rule_file (str, optional): The path of rule yaml file. Defaults to None.
Returns:
dict: dict object read from yaml file
"""
default_rule_file = Path(__file__).parent / 'rule/default_rule.yaml'
p = Path(rule_file) if rule_file else default_rule_file
if not p.is_file():
logger.error('DataDiagnosis: invalid rule file path - {}'.format(str(p.resolve())))
return None
baseline = None
with p.open() as f:
baseline = yaml.load(f, Loader=yaml.SafeLoader)
return baseline | 24def1cb09f1ecc464d38b866397d9821ac24293 | 29,945 |
from pymatgen.core.structure import Structure
def localized_rattle(
structure: Structure,
defect_coords: np.array,
stdev: float = 0.25,
):
"""
Given a pymnatgen structure, it applies a random distortion to the coordinates of the atoms in a radius 5 A from defect atom.
Random distortion chosen from a gaussian with a standard deviation of stdev.
Args:
structure :
Structure
defect_coords (np.array):
cartesian coordinates of defect
stdev (float):
standard dev of the gaussian used for rattle (in A)
(default: 0.25)
Returns:
rattled structure"""
aaa = AseAtomsAdaptor()
structure_copy = structure.copy()
# Classify sites in 2 lists: inside or outside 5 A sphere
sites_inside_cutoff, sites_outside_cutoff = [], []
for site in structure_copy:
distance, image = site.distance_and_image_from_frac_coords(defect_coords)[:2]
if distance < 5:
sites_inside_cutoff.append(site)
else:
sites_outside_cutoff.append(site)
# Apply rattle to sites within 5 A sphere
structure_inside_cutoff = structure_copy.from_sites(sites_inside_cutoff)
ase_struct = aaa.get_atoms(structure_inside_cutoff)
ase_struct.rattle(stdev=stdev)
rattled_structure = aaa.get_structure(ase_struct)
# Add the sites outside the 5 A sphere to the rattled structure
[ rattled_structure.append(site_outside_cutoff.specie, site_outside_cutoff.frac_coords) for site_outside_cutoff in sites_outside_cutoff ]
return rattled_structure | 35be05f136c5f1050394a0ac335ad281c0e855c3 | 29,946 |
def calculate_jaccard(set_1, set_2) -> float:
"""Calculate the jaccard similarity between two sets.
:param set set_1: set 1
:param set set_2: set 2
"""
intersection = len(set_1.intersection(set_2))
smaller_set = min(len(set_1), len(set_2))
return intersection / smaller_set | eddb25b2fdc0dd5b5d2505fc52cb1353ce74f89d | 29,947 |
def bad_request(e) -> 'template':
"""Displays a custom 400 error handler page."""
return render_template('error_handler.html', code = 400, message = "Bad request", url = url_for('main.profile_page'), back_to = 'Profile Page'), 400 | e9f302cea41e6a3b51044be28850b948153170e7 | 29,948 |
def quarter_of_year(datetimes):
"""Return the quarter of the year of given dates."""
return ((_month_of_year(datetimes) - 1) // 3 + 1).astype(int) | 49f3d8d63f9cc5c73b2c0c6cf9859c8be53e567e | 29,949 |
def runner_entrypoint(args):
""" Run bonobo using the python command entrypoint directly (bonobo.commands.entrypoint). """
return entrypoint(args) | f1eda845fa442d831f12b501a45dff3c91297f2a | 29,950 |
def interface_details():
"""Get interface details, CLI view"""
if success_login_form is None:
return redirect(url_for('base_blueprint.login'))
else:
return render_template('more_int_detials.html',
details=GetDetails.more_int_details(device, username, password, ssh_port, request.form.get('details'))) | f5643644583babb92a188d04c77a59582db69b52 | 29,951 |
def complement_sequence(sequence: str, reverse: bool = False) -> str:
"""Complement the given sequence, with optional reversing.
Args:
sequence: Input sequence
reverse: Whether or not to perform reverse complementation
Returns:
Complemented (and optionally reversed) string
"""
sequence = sequence.upper()
if reverse:
sequence = reversed(sequence)
return ''.join(NUCLEOTIDE_COMPLEMENT[c] for c in sequence) | 74c85857f3abf669cfaa43d05d1f0190a448bb54 | 29,952 |
def calc_llr(tree_dict: StrDict) -> int:
"""
Calculate the longest linear route for a synthetic route
:param tree_dict: the route
"""
return calc_depth(tree_dict) // 2 | 18c4cf62e434ee1e7c5902871feb323c1b72a96d | 29,953 |
def _GetArmVersion(arch):
"""Returns arm_version for the GN build with the given architecture."""
if arch == 'armeabi':
return 6
elif arch == 'armeabi-v7a':
return 7
elif arch in ['arm64-v8a', 'x86', 'x86_64']:
return None
else:
raise Exception('Unknown arch: ' + arch) | fbad0d1066fe4a7e81d2341291b436f5dd98fff0 | 29,954 |
def package_copy(r, id, type, revision_number=None, version_name=None):
"""
Copy package - create a duplicate of the Package, set author as current user
"""
revision = get_package_revision(id, type, revision_number, version_name)
""" it may be useful to copy your own package ...
if r.user.pk == revision.author.pk:
return HttpResponseForbidden('You are the author of this %s' % revision.package.get_type_name())
"""
try:
package = Package.objects.get(
full_name=revision.package.get_copied_full_name(),
author__username=r.user.username
)
return HttpResponseForbidden(
'You already have a %s with that name' % revision.package.get_type_name()
)
except:
package = revision.package.copy(r.user)
revision.save_new_revision(package)
return render_to_response("json/%s_copied.json" % package.get_type_name(),
{'revision': revision},
context_instance=RequestContext(r),
mimetype='application/json') | 8f244a6e8b1309b8b129f316698047b6c78f0186 | 29,955 |
def contract(equation, *operands, **kwargs):
"""
Wrapper around :func:`opt_einsum.contract` that caches contraction paths.
:param bool cache_path: whether to cache the contraction path.
Defaults to True.
"""
backend = kwargs.pop('backend', 'numpy')
cache_path = kwargs.pop('cache_path', True)
if not cache_path:
return opt_einsum.contract(equation, *operands, backend=backend, **kwargs)
# memoize the contraction path
out = kwargs.pop('out', None)
kwargs_key = tuple(kwargs.items())
shapes = tuple(tuple(t.shape) for t in operands)
key = equation, shapes, kwargs_key
if key in _PATH_CACHE:
expr = _PATH_CACHE[key]
else:
expr = opt_einsum.contract_expression(equation, *shapes, **kwargs)
_PATH_CACHE[key] = expr
return expr(*operands, backend=backend, out=out) | c7ac17fcee8eef036181e0ee2a96d0b0d4a38593 | 29,956 |
import array
def _compute_jn_pcoa_avg_ranges(jn_flipped_matrices, method):
"""Computes PCoA average and ranges for jackknife plotting
returns 1) an array of jn_averages
2) an array of upper values of the ranges
3) an array of lower values for the ranges
method: the method by which to calculate the range
IQR: Interquartile Range
ideal fourths: Ideal fourths method as implemented in scipy
"""
x,y = shape(jn_flipped_matrices[0])
all_flat_matrices = [matrix.ravel() for matrix in jn_flipped_matrices]
summary_matrix = vstack(all_flat_matrices)
matrix_sum = numpy_sum(summary_matrix, axis=0)
matrix_average = matrix_sum / float(len(jn_flipped_matrices))
matrix_average = matrix_average.reshape(x,y)
if method == 'IQR':
result = matrix_IQR(summary_matrix)
matrix_low = result[0].reshape(x,y)
matrix_high = result[1].reshape(x,y)
elif method == 'ideal_fourths':
result = idealfourths(summary_matrix, axis=0)
matrix_low = result[0].reshape(x,y)
matrix_high = result[1].reshape(x,y)
elif method == "sdev":
# calculate std error for each sample in each dimension
sdevs = zeros(shape=[x,y])
for j in xrange(y):
for i in xrange(x):
vals = array([pcoa[i][j] for pcoa in jn_flipped_matrices])
sdevs[i,j] = vals.std(ddof=1)
matrix_low = -sdevs/2
matrix_high = sdevs/2
return matrix_average, matrix_low, matrix_high | bba0003df771b60a55b11b67a7df7cb36039d69f | 29,957 |
from typing import List
def get_readmission_label_keys(time_windows: List[int]) -> List[str]:
"""Get label keys for readmission.
Args:
time_windows: list<int> of the considered time windows (in days) for
readmission.
Returns:
list<str> of labels for readmission within X days
"""
return [f"{READMISSION_LABEL_BASE}_{t}_days" for t in time_windows] | 1ba53ef818aadb719832d23afb250fb817b1e087 | 29,958 |
import subprocess
import os
import sys
def runTool (toolFileName):
""" Call an external application called toolFileName.
Note that .exe extension may be omitted for windows applications.
Include any arguments in arguments parameter.
Example:
returnString = te.runTool (['myplugin', 'arg1', 'arg2'])
If the external tool writes to stdout, this will be captured and returned.
:param toolFileName: argument to external tool
:returns: String return by external tool, if any.
"""
try:
p = os.path.dirname(sys.executable)
root, waste = os.path.split(p)
toolFileName[0] = root + '\\telluriumTools\\' + toolFileName[0] + '\\' + toolFileName[0] + '.exe'
return subprocess.check_output(toolFileName)
except subprocess.CalledProcessError as e:
raise Exception('Tool failed to run correctly or could not be found') | 0526ae67d8a9778fe97336b71508679c7bf08791 | 29,959 |
def multiplication(image1, image2):
""" Multiply (pixel-wise) the two input images and return the result.
<gui>
<item name="image1" type="Image" label="Image 1"/>
<item name="image2" type="Image" label="Image 2"/>
<item name="result" type="Image" role="return"
initializer="output=True" label="Result"/>
</gui>
"""
return pixelwise_operation(image1, image2, itk.MultiplyImageFilter) | 2813758eba743155960d617eb03aafa937a4cfc0 | 29,960 |
def ioat_scan_accel_engine(client, pci_whitelist):
"""Scan and enable IOAT accel engine.
Args:
pci_whitelist: Python list of PCI addresses in
domain:bus:device.function format or
domain.bus.device.function format
"""
params = {}
if pci_whitelist:
params['pci_whitelist'] = pci_whitelist
return client.call('ioat_scan_accel_engine', params) | 714e40288b2ba141d113c0951bf2c171ebcc76d3 | 29,961 |
import zipfile
import csv
def load_test(tstfile):
"""Load a test from file.
This reads a test from a csv file.
Parameters
----------
tstfile : :class:`str`
Path to the file
"""
# default version string
version_string = "1.0.0"
try:
with zipfile.ZipFile(tstfile, "r") as zfile:
info = TxtIO(zfile.open("info.csv"))
data = csv.reader(info)
first_line = _nextr(data)
if first_line[0] == "wtp-version":
version_string = first_line[1]
header = _nextr(data)
else:
header = first_line
version = version_parse(version_string)
_check_version(version)
if header[0] != "Testtype":
raise ValueError(
f"load_test: expected 'Testtype' but got '{header[0]}'"
)
if header[1] == "PumpingTest":
routine = _load_pumping_test
else:
raise ValueError(f"load_test: unknown test type '{header[1]}'")
except Exception as exc:
raise LoadError(f"load_test: couldn't load test '{tstfile}'") from exc
return routine(tstfile) | c8b4e7f2dfc7e627afd1ae58a64723a4deed8248 | 29,962 |
def reorder_instruments(curr_instruments):
"""
Dialog to remove and add instruments at certain indexes.
:param curr_instruments: initial list of instruments
:return: The list of instruments in the new order
"""
while True:
instruments_with_indexes(curr_instruments)
tmp_instruments = [instrument for instrument in curr_instruments]
old_idx = prompt("Enter the index of the instrument to move or [enter] to finish: ",
validator=IndexValidator(len(tmp_instruments) - 1)) or None
if old_idx is None:
break
move_instrument = tmp_instruments.pop(int(old_idx))
instruments_with_indexes(tmp_instruments)
new_idx = prompt(f"Enter the index to insert {move_instrument.part_name()}: ",
validator=IndexValidator(len(tmp_instruments), allow_empty=False))
tmp_instruments.insert(int(new_idx), move_instrument)
print("New instrument order: ")
instruments_with_indexes(tmp_instruments)
correct = prompt("Is this correct? [Y/n] ", default='Y', validator=YNValidator())
if answered_yes(correct):
curr_instruments = [instrument for instrument in tmp_instruments]
return curr_instruments | 03c042e086d99c9e5ab52c37a2272af82411c777 | 29,963 |
def compute_propeller_with_normal_position(arg_class, cabin_arr):
"""
compute propeller array and connected arm array
:param cabin_arr: numpy array of cabin
:param arg_class: argument class
:return: propeller_arr, arm_arr
"""
l1 = arg_class.l1
l2 = arg_class.l2
l3 = arg_class.l3
# fuselage length
l = l1 + l2 + l3
# propeller setting ratio
txs = arg_class.txs # the ratio of setting position corresponding to overall length
angles = arg_class.angles # angle of arm which is connected with a propeller
# outer line of the collection of propeller
radius = arg_class.radius
# the radius of each propeller
pr = arg_class.pr
# the arm length
lp = arg_class.lp
# setting shift
zdiffp = arg_class.zdiffp
# setting coefficient for arm on z axis
k = arg_class.k
# argm radius
arm_r = arg_class.arm_r
# propeller number(because of symmetric, get the half number of propellers)
half_propeller_number = len(txs)
# coords of joint point
joint_points = []
for idx in range(half_propeller_number):
point = [l * txs[idx], np.max(cabin_arr[:, 1]), idx * zdiffp]
joint_points.append(point)
# coords of propellers at left side
propeller_arr_l = []
# coords of propellers at right side
propeller_arr_r = []
for angle, joint_point in zip(angles, joint_points):
angle = 180 - angle
angle = angle * np.pi / 180.0
# get center coords
center = np.array([joint_point[0] + (radius + pr) * np.cos(angle),
joint_point[1] + (radius + pr) * np.sin(angle),
joint_point[2]])
# z range
z = np.linspace(-k * lp + joint_point[2], (1 - k) * lp + joint_point[2], 30)
for zi in z:
# x range(create circle)
x = np.linspace(center[0] - pr, center[0] + pr, 30)
for xi in x:
target = np.sqrt(pr ** 2 - (xi - center[0]) ** 2)
yui = center[1] + target
yli = center[1] - target
# left side
plu = [xi, yui, zi]
pll = [xi, yli, zi]
propeller_arr_l.append(plu)
propeller_arr_l.append(pll)
# right side
pru = [xi, -yui, zi]
prl = [xi, -yli, zi]
propeller_arr_r.append(pru)
propeller_arr_r.append(prl)
propeller_arr_r = np.array(propeller_arr_r)
propeller_arr_l = np.array(propeller_arr_l)
# put together propeller arr
propeller_arr = np.concatenate([propeller_arr_l, propeller_arr_r], axis=0)
# create arm
arm_arr = []
# right part
x = np.linspace(0, radius + pr, 30)
for xi in x:
y = np.linspace(-arm_r, arm_r, 30)
for yi in y:
target = np.sqrt(arm_r ** 2 - yi ** 2)
zui = target
zli = -target
pu = [xi, yi, zui]
pl = [xi, yi, zli]
for idx in range(half_propeller_number):
rep_j = joint_points[idx]
angle = angles[idx]
# turn over 3d on z axis against upper part
angle_u = -1 * (180 - angle) * np.pi / 180.0
t_arr_u = turnover_3d(angle_u, np.array([0, 0, 1]))
# turn over 3d on z axis against left part
angle_l = 180 * np.pi / 180.0
t_arr_l = turnover_3d(angle_l, np.array([0, 0, 1]))
puu = np.dot(t_arr_u.T, np.array(pu)) + np.array(rep_j)
pll = np.dot(t_arr_l.T, puu) + np.array([l, 0, -2 * zdiffp * idx + (half_propeller_number - 1) * zdiffp])
arm_arr.append(puu.tolist())
arm_arr.append(pll.tolist())
arm_arr = np.array(arm_arr)
return propeller_arr, arm_arr | c58aa5939f1b4fef05c9bfa09781310a9b64ab52 | 29,964 |
def get_dims_linear(weight_mat_layers, weight_dict):
"""
Returns a list of dimensions of layers of an mlp in decreasing order.
"""
dims = []
for ix, layer in enumerate(weight_mat_layers):
dim_out, dim_in = weight_dict[layer].shape
if ix == 0:
dims.extend([dim_in, dim_out])
else:
dims.append(dim_out)
return dims | eba82695a5c3bd1f850703b172e1f0a7b84fa010 | 29,965 |
def yices_distinct(n, arg):
"""Returns (distinct arg[0] ... arg[n-1])."""
return libyices.yices_distinct(n, arg) | 7d37cf6a2193cb4bb0d1d46f4f9986afbe35ad50 | 29,966 |
def is_cog_contributor():
"""Check if whoever used the command is in the bots contributors."""
async def predicate(ctx):
if str(ctx.author.id) in ctx.bot.contributors:
return True
else:
raise NotAContributorError(f"Command {ctx.command.name} raised an error: {str(ctx.author)} is not a contributor.")
return commands.check(predicate) | d0a7d8096f03ce1bbeed2e6c6265c46d0ae1022a | 29,967 |
def tuplify2d(x):
"""Convert ``x`` to a tuple of length two.
It performs the following conversion:
.. code-block:: python
x => x if isinstance(x, tuple) and len(x) == 2
x => (x, x) if not isinstance(x, tuple)
Args:
x (any): the object to be converted
Returns:
tuple:
"""
if isinstance(x, tuple):
assert len(x) == 2
return x
return (x, x) | 64170b14dbe7eb8885d21f45acff6b43979f1219 | 29,968 |
import os
def get_scene_info(path):
"""Extract information about the landsat scene from the file name"""
fname = os.path.basename(path)
parts = fname.split('_')
output = {}
output['sensor'] = parts[0]
output['lpath' ] = parts[2][0:3]
output['lrow' ] = parts[2][3:6]
output['date' ] = parts[3]
return output | 4a1bbad4d8b9b2b1ad21ca78ca7a046d92232699 | 29,969 |
def init_mako(app, **kw):
"""
Initializes the Mako TemplateLookup based on the application configuration
and updates the _request_ctx_stack before each request
"""
def get_first(dicts, keys, default=None):
# look in one or more dictionaries returning the first found value
for d in dicts:
found = filter(lambda x: x in d, keys)
if found:
return d[found[0]]
return default
dirs = get_first([kw, app.config],
map(lambda x: 'MAKO_%s' % x, ('DIRS', 'DIRECTORIES', 'DIR', 'DIRECTORY')),
default='.')
if type(dirs) == str:
dirs = dirs.split(' ')
get = app.config.get
kw['input_encoding'] = kw.pop('input_encoding', get('MAKO_INPUT_ENCODING', 'utf-8'))
kw['output_encoding'] = kw.pop('output_encoding', get('MAKO_OUTPUT_ENCODING', 'utf-8'))
kw['module_directory'] = kw.pop('module_directory', get('MAKO_CACHEDIR', None))
kw['collection_size'] = kw.pop('collection_size', get('MAKO_CACHESIZE', -1))
kw['imports'] = kw.pop('imports', get('MAKO_IMPORTS', None))
lookup = TemplateLookup(directories=dirs, **kw)
@app.before_request
def before_request():
_request_ctx_stack.top._mako_lookup = lookup
return app | 60713b06cde3be9eca72207aea69a30d9061cffc | 29,970 |
import time
def erase_devices():
"""Erase all the drives on this server.
This method performs sanitize erase on all the supported physical drives
in this server. This erase cannot be performed on logical drives.
:returns: a dictionary of controllers with drives and the erase status.
:raises exception.HPSSAException, if none of the drives support
sanitize erase.
"""
server = objects.Server()
for controller in server.controllers:
drives = [x for x in controller.unassigned_physical_drives
if (x.get_physical_drive_dict().get('erase_status', '')
== 'OK')]
if drives:
controller.erase_devices(drives)
while not has_erase_completed():
time.sleep(300)
server.refresh()
status = {}
for controller in server.controllers:
drive_status = {x.id: x.erase_status
for x in controller.unassigned_physical_drives}
sanitize_supported = controller.properties.get(
'Sanitize Erase Supported', 'False')
if sanitize_supported == 'False':
msg = ("Drives overwritten with zeros because sanitize erase "
"is not supported on the controller.")
else:
msg = ("Sanitize Erase performed on the disks attached to "
"the controller.")
drive_status.update({'Summary': msg})
status[controller.id] = drive_status
return status | 5f9a7a2328b24cb0fb45ea560f570b596c0326d7 | 29,971 |
def earlyon(time,duration,*args):
"""
Some lights have a slight delay before they turn on (capacitors that need
to be charged up?). This takes the current time and subtracts that delay
so the code looks like they turn on at the right time, but we really send
the command a little bit early to give the illusion that they're all in
sync
"""
duration = int(duration,10)
cmd = '"'+('" "'.join(args))+'"'
if args[-1]=="on":
return [(time-duration,cmd)]
else:
return [(time,cmd)] | 5671d46ffe42bd456689cffc3ce3e1f6731101c8 | 29,972 |
def downsample_seg_to_mip(seg, mip_start, mip_end):
"""
Downsample a segmentation to the desired mip level.
Args:
seg (3darray): A volume segmentation.
mip_start (int): The MIP level of seg.
mip_end (int): The desired MIP level.
Returns:
3darray: seg downsampled to :param: mip_end
"""
assert mip_end > mip_start
mip = mip_start
while mip < mip_end:
seg = downsample_seg(seg)
mip += 1
return seg | 9245c6f1b0602f284a7d565758e322af083e6242 | 29,973 |
def example(name):
"""Renders a sample page with the name specified in the URL."""
return template('<b>Hello {{name}}</b>!', name=name) | df52c3ed0708698d7049223b5ea1b7d98f8c3eb7 | 29,974 |
def tri(N, M=None, k=0, dtype=float):
"""Creates an array with ones at and below the given diagonal.
Args:
N (int): Number of rows.
M (int): Number of columns. ``M == N`` by default.
k (int): The sub-diagonal at and below which the array is filled. Zero
is the main diagonal, a positive value is above it, and a negative
value is below.
dtype: Data type specifier.
Returns:
cupy.ndarray: An array with ones at and below the given diagonal.
.. seealso:: :func:`numpy.tri`
"""
if M is None:
M = N
out = cupy.empty((N, M), dtype=dtype)
return _tri_kernel(M, k, out) | e7de7d0bc41563450d7e98071a14ca3b85f250c5 | 29,975 |
def UpdateGClientBranch(webkit_rev, magic_gclient_branch):
"""Update the magic gclient branch to point at |webkit_rev|.
Returns: true if the branch didn't need changes."""
target = FindSVNRev(webkit_rev)
if not target:
print "r%s not available; fetching." % webkit_rev
subprocess.check_call(['git', 'fetch', GetRemote()],
shell=(os.name == 'nt'))
target = FindSVNRev(webkit_rev)
if not target:
print "ERROR: Couldn't map r%s to a git revision." % webkit_rev
sys.exit(1)
current = RunGit(['show-ref', '--hash', magic_gclient_branch])
if current == target:
return False # No change necessary.
subprocess.check_call(['git', 'update-ref', '-m', 'gclient sync',
magic_gclient_branch, target],
shell=(os.name == 'nt'))
return True | 52de6ec5139052de914d29d44b926938227894db | 29,976 |
import os
def get_process_name():
"""Return the main binary we are attached to."""
# The return from gdb.objfiles() could include the file extension of the debug symbols.
main_binary_name = gdb.objfiles()[0].filename
return os.path.splitext(os.path.basename(main_binary_name))[0] | eb74c4b325668888a0c4f2d1ed25aa025f7c0577 | 29,977 |
def calculate_trajectories(particles, daughters, alpha=1.):
"""Calculates the trajectories of the particles.
Args:
particles: a dataframe with the particle information.
daughters: a dataframe where each line represents a daughter for the particles.
alpha: for how long should stable tracks should be propagated.
"""
particles_for_lines = particles.copy()
distances_to_primary_vertex = _distances_to_primary_vertex(particles_for_lines)
alpha = 1.1 * distances_to_primary_vertex.max()
particles_for_lines['NDaughters'] = daughters.groupby('Id').apply(len)
particles_for_lines['NDaughters'] = particles_for_lines['NDaughters'].fillna(0.).astype(int)
# Particles with daughters
lines_daughters = particles_for_lines.join(daughters, how='inner')
lines_daughters = lines_daughters.join(particles_for_lines[['Vx', 'Vy', 'Vz']], on='DaughterId', rsuffix='_decay')
# Particles WITHOUT daughters
lines_single = _add_line_continuation(particles_for_lines[particles_for_lines['NDaughters'] == 0], alpha, '_decay')
lines = pd.concat([lines_daughters, lines_single])
decay_length = _decay_length(lines)
return lines[decay_length > 0] | fb9422ac315dc1c2b6e6781cfef93e8235dd7f2d | 29,978 |
def site_title(request, registry, settings):
"""Expose website name from ``tm.site_title`` config variable to templates.
This is the default ``<title>`` tag.
Example:
.. code-block:: html+jinja
<meta>
<title>My page - {{ site_title }}</title>
</meta>
"""
# Use .get() for BBB
return settings.get("tm.site_title", "") | fcc61acecabb163ef6e55ed2fde7d4d025a8082a | 29,979 |
import typing
from pathlib import Path
import importlib
import inspect
import ast
def linkcode_resolve(repo_link: str, domain: str, info: dict[str, str]) -> typing.Optional[str]:
"""
Function called by linkcode to get the URL for a given resource.
See for more details:
https://www.sphinx-doc.org/en/master/usage/extensions/linkcode.html#confval-linkcode_resolve
"""
if domain != "py":
raise Exception("Unknown domain passed to linkcode function.")
symbol_name = info["fullname"]
build_root = get_build_root()
# Import the package to find files
origin = build_root / info["module"].replace(".", "/")
search_locations = []
if origin.is_dir():
search_locations.append(origin.absolute().as_posix())
origin = origin / "__init__.py"
else:
origin = Path(origin.absolute().as_posix() + ".py")
if not origin.exists():
raise Exception(f"Could not find `{info['module']}` as a package or file.")
# We can't use a normal import (importlib.import_module), because the module can conflict with another copy
# in multiversion builds. We load the module from the file location instead
spec = importlib.util.spec_from_file_location(info["module"], origin, submodule_search_locations=search_locations)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
symbol = [module]
for name in symbol_name.split("."):
symbol.append(getattr(symbol[-1], name))
symbol_name = name
try:
lines, start = inspect.getsourcelines(symbol[-1])
end = start + len(lines)
except TypeError:
# Find variables by parsing the ast
source = ast.parse(inspect.getsource(symbol[-2]))
while isinstance(source.body[0], ast.ClassDef):
source = source.body[0]
for ast_obj in source.body:
if isinstance(ast_obj, ast.Assign):
names = []
for target in ast_obj.targets:
if isinstance(target, ast.Tuple):
names.extend([name.id for name in target.elts])
else:
names.append(target.id)
if symbol_name in names:
start, end = ast_obj.lineno, ast_obj.end_lineno
break
else:
raise Exception(f"Could not find symbol `{symbol_name}` in {module.__name__}.")
_, offset = inspect.getsourcelines(symbol[-2])
if offset != 0:
offset -= 1
start += offset
end += offset
file = Path(inspect.getfile(module)).relative_to(build_root).as_posix()
try:
sha = git.Repo(build_root).commit().hexsha
except git.InvalidGitRepositoryError:
# We are building a historical version, no git data available
sha = build_root.name
url = f"{repo_link}/blob/{sha}/{file}#L{start}"
if end != start:
url += f"-L{end}"
return url | 1fd4571b81f98c82c57dae43a2be957380dca91f | 29,980 |
import os
def scite(
genotype_file,
alpha,
beta,
n_iters,
n_restarts,
experiment,
time_limit,
smooth_rate,
iters_rate,
):
"""SCITE.
Tree inference for single-cell data :cite:`SCITE`.
scphylo scite input.SC 0.0001 0.1 -l 1000000 -r 3 -e -t 86400 -s 2
"""
outfile = os.path.splitext(genotype_file)[0]
scp.settings.verbosity = "info"
df_in = scp.io.read(genotype_file)
if not experiment:
scp.settings.logfile = f"{outfile}.scite.log"
df_out = scp.tl.scite(
df_in,
alpha=alpha,
beta=beta,
n_iters=n_iters,
n_restarts=n_restarts,
)
scp.io.write(df_out, f"{outfile}.scite.CFMatrix")
else:
scp.settings.logfile = f"{outfile}.scite.log"
df_out, running_time, _, _ = scp.tl.scite(
df_in,
alpha=alpha,
beta=beta,
n_iters=iters_rate,
n_restarts=1,
experiment=True,
)
n_iters = int(smooth_rate * iters_rate * time_limit / running_time)
def run(i):
do, r, s, b = scp.tl.scite(
df_in,
alpha=alpha,
beta=beta,
n_iters=n_iters,
n_restarts=1,
experiment=True,
)
return do, r, s, b
output = Parallel(n_jobs=n_restarts)(delayed(run)(i) for i in range(n_restarts))
scores = [x[2] for x in output]
betas = [x[3] for x in output]
best_i = np.argmax(scores)
df_out = output[best_i][0]
scp.ul.stat(df_in, df_out, alpha, beta, output[best_i][1])
scp.logg.info(f"score: {output[best_i][2]}")
scp.logg.info(f"beta: {output[best_i][3]}")
scp.logg.info(f"n_iters: {n_iters}")
scp.logg.info(f"scores: {','.join(list(map(str, scores)))}")
scp.logg.info(f"betas: {','.join(list(map(str, betas)))}")
scp.logg.info(f"picked: {best_i}")
scp.io.write(df_out, f"{outfile}.scite.CFMatrix")
return None | 524127b14edf0777c2dae039d652aa6271595825 | 29,981 |
def deterministic_hash(items):
"""
Intermediary hashing function that allows deterministic hashing of a list of items.
:param items: List of items to hash
:return: Numeric, deterministic hash, returns 0 if item is none
"""
h = 0
for item in items:
if not item:
pass
elif not isinstance(item, (int, long)):
h ^= bytes2long(item)
else:
h ^= item
return h | da3950039762e2b499f522cbd891a87d98633bd9 | 29,982 |
def get_unique_licenses(codebase, good_only=True):
"""
Return a tuple of two sets of license keys found in the codebase:
- the set license found in key files
- the set license found in non-key files
This is only for files in the core facet.
"""
key_license_keys = set()
other_license_keys = set()
for resource in codebase.walk():
# FIXME: consider only text, source-like files for now
if not resource.is_file:
continue
if not (resource.is_key_file or is_core_facet(resource)):
# we only cover either core code/core facet or top level, key files
continue
if resource.is_key_file:
license_keys = key_license_keys
else:
license_keys = other_license_keys
for detected_license in getattr(resource, 'licenses', []) or []:
if good_only and not is_good_license(detected_license):
license_keys.add('unknown')
else:
license_keys.add(detected_license['key'])
return key_license_keys, other_license_keys | ee9e1ea67809edcedc5f300c5321d9ebc60001d6 | 29,983 |
def execute_sql_insert(cursor, sql_query):
"""
Executes SQL INSERT queries.
:param cursor: Database cursor
:param sql_query: SQl query to execute
:return: Database cursor and last inserted row id
"""
if cursor is None:
raise AttributeError("Provide cursor as parameter")
if sql_query is None:
raise AttributeError("Provide sql_query as parameter")
last_id = ""
try:
cursor.execute(sql_query)
except Exception as exp:
exp = append_description_to_exception(exp=exp, description='Error in SQL INSERT query execution')
logger.error('Error in SQL query execution: ' + repr(exp))
raise
else:
logger.debug('SQL query executed')
try:
last_id = str(cursor.lastrowid)
except Exception as exp:
exp = append_description_to_exception(exp=exp, description='lastrowid not found')
logger.error('cursor.lastrowid not found: ' + repr(exp))
logger.info('cursor.lastrowid not found. Using None instead')
last_id = None
else:
logger.debug('cursor.lastrowid: ' + last_id)
return cursor, last_id | ece8362efff364558e9deb480db3ec690e638eca | 29,984 |
def fetch_known_transcripts_with_gene_label(cursor, datasets):
""" Fetch known transcripts along with the gene they belong to """
datasets = format_for_IN(datasets)
query = """SELECT DISTINCT gene_ID,transcript_ID FROM observed
LEFT JOIN transcript_annotations AS ta ON ta.ID = observed.transcript_ID
WHERE (ta.attribute = 'transcript_status' AND ta.value = 'KNOWN')
AND observed.dataset IN """ + datasets
cursor.execute(query)
known_transcripts = [(x[0], x[1], "FSM_transcript") for x in cursor.fetchall()]
return known_transcripts | 92dbd97ee79672ff0986c2caecf90ab95f05fa70 | 29,985 |
def change_data():
"""Редактирование профиля пользователя."""
form = ChangeDataForm()
if form.validate_on_submit():
current_user.age = form.age.data
current_user.country = form.country.data
current_user.city = form.city.data
current_user.telegram = form.telegram.data
current_user.git = form.git.data
db.session.commit()
flash('Профиль успешно изменен.', 'success')
return redirect(
url_for('user_profile', username=current_user.username))
elif request.method == 'GET':
form.age.data = current_user.age
form.country.data = current_user.country
form.city.data = current_user.city
form.telegram.data = current_user.telegram
current_user.git = form.git.data
return render_template(
'profile/change_data.html',
form=form,
) | 6dc8299a07733fe7291d1c8b646848f6e1b60c60 | 29,986 |
from typing import Optional
from pathlib import Path
import os
import subprocess
def simulate_single_image(
simulation: Simulation, idx: int, zarr_filename: Optional[str] = None
) -> np.ndarray:
"""Generate a single image from a single-particle simulation.
Optionally saves image into zarr store
"""
# get info required for simulation
image_parameters = simulation.per_image_parameters[idx]
parakeet_config = simulation.parakeet_config_files[idx]
# do work in a temporary directory (parakeet makes a bunch of files)
base_directory = Path('.').absolute()
with TemporaryDirectory() as tmp_dir:
# change into temporary directory
os.chdir(tmp_dir)
# rotate structure and save
load_rotate_save(
structure_file=str(image_parameters.input_structure),
rotation=image_parameters.rotation,
output_filename=image_parameters.rotated_structure_filename
)
# write parakeet config file
write_config(parakeet_config, 'parakeet_config.yaml')
# run parakeet
subprocess.run(
['parakeet.sample.new', '-c', 'parakeet_config.yaml']
)
subprocess.run(
['parakeet.simulate.exit_wave', '-c', 'parakeet_config.yaml']
)
subprocess.run(
['parakeet.simulate.optics', '-c', 'parakeet_config.yaml']
)
subprocess.run(
['parakeet.simulate.image', '-c', 'parakeet_config.yaml']
)
subprocess.run(
['parakeet.export', 'image.h5', '-o', 'image.mrc']
)
# load image file and invert
with mrcfile.open('image.mrc') as mrc:
image = np.squeeze(mrc.data) * -1
# change back to base directory
os.chdir(base_directory)
# optionally save image into zarr store
if zarr_filename is not None:
save_image_into_zarr_store(
image=image, idx=idx, zarr_filename=zarr_filename
)
return image | 105e1e7cdd85f22a6d1e31a98128d37441da92bb | 29,987 |
from io import StringIO
def run_checks(root, parent, cmds, scmds, paths='', opts={}):
"""Run the checks given in 'cmds', expected to have well-known signatures,
and report results for any which fail.
Return failure if any of them did.
NB: the function name of the commands passed in is used to name the NOT
file which excepts files from them."""
ret = 0
for cmd in cmds:
s = StringIO()
exclude = not_check(root, cmd.__name__)
result = cmd(root, parent, gen_files(root, parent, paths, exclude),
output=s)
ret |= result
if result != 0:
print(s.getvalue())
for cmd in scmds:
s = StringIO()
exclude = not_check(root, cmd.__name__)
result = cmd(root, parent, gen_links(root, parent, paths, exclude),
output=s)
ret |= result
if result != 0:
print(s.getvalue())
return ret | bde53f0f0fca0b6d12f6cf58b631cc841a0d567f | 29,988 |
def numpy_to_rdkit(adj, nf, ef, sanitize=False):
"""
Converts a molecule from numpy to RDKit format.
:param adj: binary numpy array of shape (N, N)
:param nf: numpy array of shape (N, F)
:param ef: numpy array of shape (N, N, S)
:param sanitize: whether to sanitize the molecule after conversion
:return: an RDKit molecule
"""
if rdc is None:
raise ImportError('`numpy_to_rdkit` requires RDKit.')
mol = rdc.RWMol()
for nf_ in nf:
atomic_num = int(nf_)
if atomic_num > 0:
mol.AddAtom(rdc.Atom(atomic_num))
for i, j in zip(*np.triu_indices(adj.shape[-1])):
if i != j and adj[i, j] == adj[j, i] == 1 and not mol.GetBondBetweenAtoms(int(i), int(j)):
bond_type_1 = BOND_MAP[int(ef[i, j, 0])]
bond_type_2 = BOND_MAP[int(ef[j, i, 0])]
if bond_type_1 == bond_type_2:
mol.AddBond(int(i), int(j), bond_type_1)
mol = mol.GetMol()
if sanitize:
rdc.SanitizeMol(mol)
return mol | 93295c556037ffa3e84373b73ca1308a9a1d53b7 | 29,989 |
from typing import Dict
from typing import Any
from pathlib import Path
def get_path(key: str, **kwargs: Dict[str, Any]) -> Path:
"""Get a file path string system variable as a pathlib.Path instance.
See signature of get() for parameter details."""
return Path(get(key, **kwargs)) | 9fe34573ced90c266ef7b73a430cc95ba4d09bc5 | 29,990 |
import time
def run_competition(builders=[], task=BalanceTask(), Optimizer=HillClimber, rounds=3, max_eval=20, N_hidden=3, verbosity=0):
""" pybrain buildNetwork builds a subtly different network structhan build_ann... so compete them!
Arguments:
task (Task): task to compete at
Optimizer (class): pybrain.Optimizer class to instantiate for each competitor
rounds (int): number of times to run the competition
max_eval (int): number of objective function evaluations that the optimizer is allowed
in each round
N_hidden (int): number of hidden nodes in each network being competed
The functional difference that I can see is that:
buildNetwork connects the bias to the output
build_ann does not
The api differences are:
build_ann allows heterogeneous layer types but the output layer is always linear
buildNetwork allows specification of the output layer type
"""
results = []
builders = list(builders) + [buildNetwork, util.build_ann]
for r in range(rounds):
heat = []
# FIXME: shuffle the order of the builders to keep things fair
# (like switching sides of the tennis court)
for builder in builders:
try:
competitor = builder(task.outdim, N_hidden, task.indim, verbosity=verbosity)
except NetworkError:
competitor = builder(task.outdim, N_hidden, task.indim)
# TODO: verify that a full reset is actually happening
task.reset()
optimizer = Optimizer(task, competitor, maxEvaluations=max_eval)
t0 = time.time()
nn, nn_best = optimizer.learn()
t1 = time.time()
heat += [(nn_best, t1-t0, nn)]
results += [tuple(heat)]
if verbosity >= 0:
print([competitor_scores[:2] for competitor_scores in heat])
# # alternatively:
# agent = ( pybrain.rl.agents.OptimizationAgent(net, HillClimber())
# or
# pybrain.rl.agents.LearningAgent(net, pybrain.rl.learners.ENAC()) )
# exp = pybrain.rl.experiments.EpisodicExperiment(task, agent).doEpisodes(100)
means = [[np.array([r[i][j] for r in results]).mean() for i in range(len(results[0]))] for j in range(2)]
if verbosity > -1:
print('Mean Performance:')
print(means)
perfi, speedi = np.argmax(means[0]), np.argmin(means[1])
print('And the winner for performance is ... Algorithm #{} (0-offset array index [{}])'.format(perfi+1, perfi))
print('And the winner for speed is ... Algorithm #{} (0-offset array index [{}])'.format(speedi+1, speedi))
return results, means | 8f826e4dbf7bbcc111a30bf4a08597629efb4f63 | 29,991 |
def get_reduced_tree(tree, reduce_by):
"""
Given a tree decomposition in tree and a required
size of reduction, produces a new tree decomposition
with treewidth reduced by the requested size and a list
of eliminated nodes.
We use a greedy algorithm to find nodes to eliminate.
This algorithm deletes variable subtrees from the maximal
node. The variables corresponding to larger subtrees are
deleted first. If the length of subtrees are equal then
subtrees passing through more nodes are removed first
Parameters
----------
tree : networkx.Graph
tree decomposition we need to reduce
reduce_by : int
reduce treewidth by this amount
Returns
-------
new_tree : networkx.Graph()
reduced tree decomposition
eliminated_nodes : list
list of eliminated nodes
"""
max_clique = find_max_cliques(tree)[0]
treewidth = len(max_clique) - 1
current_treewidth = treewidth
if reduce_by < 0 or reduce_by > treewidth - 1:
raise ValueError(
'Requested reduce_by: {}, allowed range: [0, {}]'.format(
reduce_by, treewidth-1))
eliminated_nodes = []
new_tree = tree
while current_treewidth > treewidth - reduce_by:
nodes_by_subwidth = get_subtree_by_length_width(
tree, list(max_clique))
# get (node, path length, total node's subtree width)
nodes_in_rmorder = [(node, len(nodes_by_subwidth[node]),
sum(nodes_by_subwidth[node]))
for node in nodes_by_subwidth]
# sort by path length, then by total width of subtree
nodes_in_rmorder = sorted(
nodes_in_rmorder,
key=lambda x: (x[1], x[2]))
rmnode = nodes_in_rmorder[-1][0]
new_tree = rm_element_in_tree(new_tree, rmnode)
eliminated_nodes.append(rmnode)
max_clique = find_max_cliques(new_tree)[0]
current_treewidth = len(max_clique) - 1
assert len(list(new_tree.selfloop_edges())) == 0
return new_tree, eliminated_nodes | a644ae326ef86e9b53bb3c3c510e46740038c8d3 | 29,992 |
from pathlib import Path
def temp_path(suffix=""):
"""Return the path of a temporary directory."""
directory = mkdtemp(suffix=suffix)
return Path(directory) | 2cd196a2a1974816d49d75fd10a0d43b03c12612 | 29,993 |
def maybe_utf8(value):
"""Encode to utf-8, only if the value is Unicode."""
if isinstance(value, unicode):
return value.encode("utf-8")
return value | 82e15ef35527e064a2b5bf3934c135985d60e1fe | 29,994 |
def parse_id_as_interval(id_string, regex):
""" The fasta ids contain the locus information. """
match = regex.match(id_string)
genome = match.group("genome")
seqid = match.group("seqid")
start_tmp = int(match.group("start"))
end_tmp = int(match.group("end"))
start = min([start_tmp, end_tmp])
end = max([start_tmp, end_tmp])
del start_tmp
del end_tmp
return (genome, seqid, start, end) | 7d35bdd7b4418d1edcd433cd39b9defc9050c6f6 | 29,995 |
def map_sentences_to_indices_of_vectors(sentences, word_to_index_glove, unknown_token):
""" map senteces to integers that represent the index of each word in the glove vocabulary """
# the list to be returned
mapped_sentences = []
# get the index of the unknown token
unknown_token_index = word_to_index_glove[unknown_token]
# iterate for each sentence
for sentence in sentences:
# get the split sentence
split_sentence = sentence.split()
# map it to the corresponding indices
mapped_sentence = [word_to_index_glove.get(word, unknown_token_index) for word in split_sentence]
# append it to the list
mapped_sentences.append(mapped_sentence)
# return the list
return mapped_sentences | 04a27bd4ccd5ac9d0366218107ee36b61d4a7655 | 29,996 |
import subprocess
def ruler(inputdict_unchecked):
"""
This program calculates reduced transition probabilities. (RULER readme)
Parameters
----------
inputdict_unchecked : dictionary
dictionary that must have the following key-pair values:
input_file : string, input ensdf file
output file : string, file for output to be written to (doesn't
have to exist)
Returns
-------
rtn : dictionary
Everything in input dictionary is returned if RULER completes
successfully.
"""
inputdict = {}
input_file = inputdict_unchecked['input_file']
output_report_file = inputdict_unchecked['output_report_file']
mode_of_operation = inputdict_unchecked['mode_of_operation']
assumed_dcc_theory = inputdict_unchecked['assumed_dcc_theory']
exe_path = path_to_exe('ruler')
ruler_output = subprocess.Popen([exe_path],stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
inp = input_file + '\n' + output_report_file + '\n' + mode_of_operation + \
'\n' + assumed_dcc_theory
ruler_output.stdin.write(inp.encode('utf-8'))
ruler_output.communicate()[0]
ruler_output.stdin.close()
return inputdict_unchecked | 7cbc88b9c2bf561d483d12609ff14decf19df7e7 | 29,997 |
import numpy
def jordan_wigner_dual_basis_jellium(grid, spinless=False,
include_constant=False):
"""Return the jellium Hamiltonian as QubitOperator in the dual basis.
Args:
grid (Grid): The discretization to use.
spinless (bool): Whether to use the spinless model or not.
include_constant (bool): Whether to include the Madelung constant.
Returns:
hamiltonian (QubitOperator)
"""
# Initialize.
n_orbitals = grid.num_points()
volume = grid.volume_scale()
if spinless:
n_qubits = n_orbitals
else:
n_qubits = 2 * n_orbitals
hamiltonian = QubitOperator()
# Compute vectors.
momentum_vectors = {}
momenta_squared_dict = {}
for indices in grid.all_points_indices():
momenta = momentum_vector(indices, grid)
momentum_vectors[indices] = momenta
momenta_squared_dict[indices] = momenta.dot(momenta)
# Compute the identity coefficient and the coefficient of local Z terms.
identity_coefficient = 0.
z_coefficient = 0.
for k_indices in grid.all_points_indices():
momenta = momentum_vectors[k_indices]
if momenta.any():
momenta_squared = momenta.dot(momenta)
identity_coefficient += momenta_squared / 2.
identity_coefficient -= (numpy.pi * float(n_orbitals) /
(momenta_squared * volume))
z_coefficient += numpy.pi / (momenta_squared * volume)
z_coefficient -= momenta_squared / (4. * float(n_orbitals))
if spinless:
identity_coefficient /= 2.
# Add identity term.
identity_term = QubitOperator((), identity_coefficient)
hamiltonian += identity_term
# Add local Z terms.
for qubit in range(n_qubits):
qubit_term = QubitOperator(((qubit, 'Z'),), z_coefficient)
hamiltonian += qubit_term
# Add ZZ terms and XZX + YZY terms.
zz_prefactor = numpy.pi / volume
xzx_yzy_prefactor = .25 / float(n_orbitals)
for p in range(n_qubits):
index_p = grid_indices(p, grid, spinless)
position_p = position_vector(index_p, grid)
for q in range(p + 1, n_qubits):
index_q = grid_indices(q, grid, spinless)
position_q = position_vector(index_q, grid)
difference = position_p - position_q
skip_xzx_yzy = not spinless and (p + q) % 2
# Loop through momenta.
zpzq_coefficient = 0.
term_coefficient = 0.
for k_indices in grid.all_points_indices():
momenta = momentum_vectors[k_indices]
momenta_squared = momenta_squared_dict[k_indices]
if momenta_squared == 0:
continue
cos_difference = numpy.cos(momenta.dot(difference))
zpzq_coefficient += (zz_prefactor * cos_difference /
momenta_squared)
if skip_xzx_yzy:
continue
term_coefficient += (xzx_yzy_prefactor * cos_difference *
momenta_squared)
# Add ZZ term.
qubit_term = QubitOperator(((p, 'Z'), (q, 'Z')), zpzq_coefficient)
hamiltonian += qubit_term
# Add XZX + YZY term.
if skip_xzx_yzy:
continue
z_string = tuple((i, 'Z') for i in range(p + 1, q))
xzx_operators = ((p, 'X'),) + z_string + ((q, 'X'),)
yzy_operators = ((p, 'Y'),) + z_string + ((q, 'Y'),)
hamiltonian += QubitOperator(xzx_operators, term_coefficient)
hamiltonian += QubitOperator(yzy_operators, term_coefficient)
# Include the Madelung constant if requested.
if include_constant:
hamiltonian += QubitOperator((),) * (2.8372 / grid.scale)
# Return Hamiltonian.
return hamiltonian | d52a5a102297213de830f58c8190337589d0d9ca | 29,998 |
def boxcar_decay(tbins, t0, area_box, height_box, area_decay):
"""
Compute the lightcurve from one or more boxcar-decay functions.
Parameters
----------
tbins : array
edges of the time bins used for the lightcurve
t0 : float or array
start times of the boxcar-decays
area_box : float or array
areas of the boxcar portion of the boxcar-decays
height_box : float or array
heights of the boxcar-decays
area_decay : float or array
areas of the decay portions of the boxcar-decays
Returns
-------
y : array
lightcurve values
Notes
-----
This function is a bottleneck when creating a lightcurve from a long
series of flares. If this code is to be adapted for quick simulation
of years-long series of flares, this is where the speedup needs to
happen.
"""
# politely let user know that, in this instance, astropy Quantities are not wanted
if any(isinstance(x, u.Quantity) for x in [tbins, t0, area_box, height_box, area_decay]):
raise ValueError('No astropy Quantity input for this function, please.')
# this is going to have to be ugly for it to be fast, I think
# standardize t0, area_box, height_box, and area_decay for array input
t0, area_box, height_box, area_decay = [np.reshape(a, [-1]) for a in [t0, area_box, height_box, area_decay]]
# compute end of box, start of decay
t1 = t0 + area_box/height_box
# correct for portions hanging over ends of tbins
t0 = np.copy(t0)
t0[t0 < tbins[0]] = tbins[0]
t1[t1 > tbins[-1]] = tbins[-1]
# initialize y array
y = np.zeros((len(t0), len(tbins)-1))
i_rows = np.arange(y.shape[0])
# add starting portion of box to first bin that is only partially covered by it
i0 = np.searchsorted(tbins, t0, side='right')
frac = (tbins[i0] - t0)/(tbins[i0] - tbins[i0-1])
y[i_rows, i0-1] += frac*height_box
# add box to bins fully covered by it
inbox = (tbins[None, :-1] > t0[:, None]) & (tbins[None, 1:] < t1[:, None])
y += height_box[:,None]*inbox
# add ending fraction of box to last bin that is partially covered by it
i1 = np.searchsorted(tbins, t1, side='left')
frac = (t1 - tbins[i1-1])/(tbins[i1] - tbins[i1-1])
y[i_rows, i1-1] += frac*height_box
# deal with any cases where the box was entirely within a bin
j = i0 == i1
y[i_rows[j], i0[j]-1] = area_box[j]/(tbins[i0][j] - tbins[i0-1][j])
# add decay
# compute cumulative decay integral at all time points
amp_decay = height_box
tau_decay = area_decay / amp_decay
with np.errstate(over='ignore', invalid='ignore'):
Idecay = -amp_decay[:,None]*tau_decay[:,None]*np.exp(-(tbins[None,:] - t1[:,None])/tau_decay[:,None])
ydecay = np.diff(Idecay, 1)/np.diff(tbins)
keep = tbins[:-1] > t1[:, None]
y[keep] += ydecay[keep]
# add fractional piece of exponential
i1 = np.searchsorted(tbins, t1, side='right')
inrange = i1 < len(tbins)
i_rows, i1 = i_rows[inrange], i1[inrange]
Idecay1 = -amp_decay*tau_decay
ydecay1 = (Idecay[i_rows, i1] - Idecay1[i_rows])/(tbins[i1] - tbins[i1-1])
y[i_rows, i1-1] += ydecay1
return np.sum(y, 0) | 31beb8d6cab940bd75a814121833535819c17e69 | 29,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.