content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def updateShaderState(self):
"""Updates all shader program variables. """
if not self.ready():
return
opts = self.opts
self.shader.load()
voxValXform = self.imageTexture.voxValXform
voxValXform = [voxValXform[0, 0], voxValXform[0, 3], 0, 0]
invNumLabels = 1.0 / (opts.lut.max() + 1)
self.shader.setFragParam('voxValXform', voxValXform)
self.shader.setFragParam('invNumLabels', [invNumLabels, 0, 0, 0])
self.shader.unload()
return True | 611b093ce51e99e5c7c1e3da5dcc7cd1a8c07b01 | 30,000 |
import _winreg
from cake.registry import queryString
def _getMinGWInstallDir():
"""Returns the MinGW install directory.
Typically: 'C:\MinGW'.
@return: The path to the MinGW install directory.
@rtype: string
@raise WindowsError: If MinGW is not installed.
"""
possibleSubKeys = [
r"SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\MinGW",
r"SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\{AC2C1BDB-1E91-4F94-B99C-E716FE2E9C75}_is1",
]
# Try all known registry locations.
for subKey in possibleSubKeys:
try:
return queryString(_winreg.HKEY_LOCAL_MACHINE, subKey, "InstallLocation")
except WindowsError:
# If this is the last possibility, re-raise the exception.
if subKey is possibleSubKeys[-1]:
raise | 262f65aaf413fe718b753ffa706c97c80b73f349 | 30,001 |
import urllib
def nextbus(a, r, c="vehicleLocations", e=0):
"""Returns the most recent latitude and
longitude of the selected bus line using
the NextBus API (nbapi)
Arguments: a=agency, r=route, c=command,
e=epoch timestamp for start date of track,
0 = the last 15 minutes
"""
nbapi = "http://webservices.nextbus.com"
nbapi += "/service/publicXMLFeed?"
nbapi += "command={}&a={}&r={}&t={}".format(c, a, r, e)
xml = minidom.parse(urllib.request.urlopen(nbapi))
# If more than one vehicle, just get the first
bus = xml.getElementsByTagName("vehicle")[0]
if bus:
at = bus.attributes
return(at["lat"].value, at["lon"].value)
else:
return (False, False) | 08ce12f3a1d96572014dc528de255f89e5b0ee46 | 30,002 |
def latest_blog_posts(context, num):
"""
Displays the most recent blog posts. It takes an argument, num
and displays so many posts depending on the value.
"""
latest_blog_posts = Post.objects.all()[:num].select_related()
return {
'latest_blog_posts': latest_blog_posts
} | 74ee23753e674e1dd1f8fa8af92002c739a28ee3 | 30,003 |
from typing import List
from typing import Dict
import copy
def _expand_array_paths_to_preserve(paths: List[DetailedPath]) -> Dict[str, List[int]]:
"""
Used by "filter_element_match" - Returns a dictionary of string paths mapped to array indices that we want
to preserve.
:param paths: A list of lists of detailed paths (containing strings and array indices) to elements that matched query criteria
:return: A dict where the keys are a dot-separated path to an array, and the values are a list of indices
in that array that we want to keep. If there are no indices in the original path, that path will be ignored.
Some paths may be expanded into multiple paths where there are multiple levels of indices (arrays of arrays).
:Example:
_expand_array_paths_to_preserve([["F", 1, 2], ["F", 1, 3], ["G", "H"], ["L", 1, "M"]])
{'F': [1], 'F.1': [2, 3], 'L': [1]}
This data will be used to remove all elements from row["F"][1] that are not at index 2, and 3.
We'll then remove all elements from "F" that are not at index [1], and all elements from "L" that are not at index 1.
"""
# Break path into multiple paths if array elements in path
expanded: List[DetailedPath] = []
for path in paths:
while path != [] and not isinstance(path[-1], int):
path.pop()
new_path: DetailedPath = []
for elem in path:
new_path.append(elem)
if isinstance(elem, int) and new_path not in expanded:
expanded.append(copy.deepcopy(new_path))
# Combine paths where the key is a dot-separated path to the array, and the value are the indices
# of the array we want to preserve
merge_paths: Dict[str, List[int]] = defaultdict(list)
for path in expanded:
merge_paths[join_detailed_path(path[0:-1])].append(path[-1]) # type: ignore
return merge_paths | 7f44a717285bc30c3162d39dcf18a1cdc3920bed | 30,004 |
def get_request_fixture_names(request):
"""Get list of fixture names for the given FixtureRequest.
Get the internal and mutable list of fixture names in the enclosing scope of
the given request object.
Compatibility with pytest 3.0.
"""
return request._pyfuncitem._fixtureinfo.names_closure | 665fff4538f3817b6eb882f9a873683d69003bfd | 30,005 |
def _unique(values, *, return_inverse=False):
"""Helper function to find unique values with support for python objects.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
Parameters
----------
values : ndarray
Values to check for unknowns.
return_inverse : bool, default=False
If True, also return the indices of the unique values.
Returns
-------
unique : ndarray
The sorted unique values.
unique_inverse : ndarray
The indices to reconstruct the original array from the unique array.
Only provided if `return_inverse` is True.
"""
if values.dtype == object:
return _unique_python(values, return_inverse=return_inverse)
# numerical
out = np.unique(values, return_inverse=return_inverse)
if return_inverse:
uniques, inverse = out
else:
uniques = out
# np.unique will have duplicate missing values at the end of `uniques`
# here we clip the nans and remove it from uniques
if uniques.size and is_scalar_nan(uniques[-1]):
nan_idx = np.searchsorted(uniques, np.nan)
uniques = uniques[:nan_idx + 1]
if return_inverse:
inverse[inverse > nan_idx] = nan_idx
if return_inverse:
return uniques, inverse
return uniques | 4768b0e055cfd9a42b5332f7a47aa608aa7b90c0 | 30,006 |
def _create_range_tool(
data,
min_time,
max_time,
plot_range,
width,
height,
time_column: str = None,
y: str = "y_index",
):
"""Create plot bar to act as as range selector."""
ext_min = min_time - ((max_time - min_time) * 0.15)
ext_max = max_time + ((max_time - min_time) * 0.15)
plot_height = max(120, int(height * 0.20))
rng_select = figure(
x_range=(ext_min, ext_max),
title="Range Selector",
plot_height=plot_height,
plot_width=width,
x_axis_type="datetime",
y_axis_type=None,
tools="",
toolbar_location=None,
)
help_str = (
"Drag the middle or edges of the selection box to change "
+ "the range in the main chart"
)
rng_select.add_layout(
Title(text=help_str, align="right", text_font_size="10px"), "below"
)
rng_select.xaxis[0].formatter = _get_tick_formatter()
if isinstance(data, dict):
for _, series_def in data.items():
rng_select.circle(
x=series_def["time_column"],
y=y,
color=series_def["color"],
source=series_def["source"],
)
elif isinstance(data, pd.DataFrame):
rng_select.circle(
x=time_column, y=y, color="blue", source=ColumnDataSource(data)
)
range_tool = RangeTool(x_range=plot_range)
range_tool.overlay.fill_color = "navy"
range_tool.overlay.fill_alpha = 0.2
rng_select.ygrid.grid_line_color = None
rng_select.add_tools(range_tool)
rng_select.toolbar.active_multi = range_tool
return rng_select | 42cf2d4f5986dd454a9aa968ce4db4136b5acd1f | 30,007 |
def pivot(df, index, column, value):
"""
Pivot a dataframe. Reverse operation of melting. Useful for configuring evolution
See pandas' pivot_table documentation for more details
Args:
- index (list): indexes argument of pd.pivot_table
- column (str): column name to pivot on
- value (str): column name containing the value to fill the pivoted df
"""
if df.dtypes[value].type == np.object_:
df = pd.pivot_table(df, index=index,
columns=column,
values=value,
aggfunc=lambda x: ' '.join(x))
else:
df = pd.pivot_table(df, index=index,
columns=column,
values=value)
df = df.reset_index()
return df | b9a8c63d5ce320f4a156c8f42b92173dd1d86ca0 | 30,008 |
def split_pdf_image_into_row_image_block(pdf_image):
"""
split the whole pdf image into row image block
:param pdf_image: the whole color pdf image
:return:
"""
gray_image = cv2.cvtColor(pdf_image, cv2.COLOR_BGR2GRAY)
binarized_image = cv2.adaptiveThreshold(
src=gray_image,
maxValue=255,
adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
thresholdType=cv2.THRESH_BINARY,
blockSize=11,
C=2
)
# sum along the row axis
row_sum = np.sum(binarized_image, axis=1)
idx_row_sum = np.argwhere(row_sum < row_sum.max())[:, 0]
split_idx = []
start_idx = idx_row_sum[0]
for index, idx in enumerate(idx_row_sum[:-1]):
if idx_row_sum[index + 1] - idx > 5:
end_idx = idx
split_idx.append((start_idx, end_idx))
start_idx = idx_row_sum[index + 1]
split_idx.append((start_idx, idx_row_sum[-1]))
pdf_image_splits = []
for index in range(len(split_idx)):
idx = split_idx[index]
pdf_image_split = pdf_image[idx[0]:idx[1], :, :]
pdf_image_splits.append(pdf_image_split)
return pdf_image_splits | 9a304f54167c4fbb7739c7022b12c5b574240861 | 30,009 |
def get_tokens():
""" Get all the active tokens in the datbase."""
return query_db('select token from token') | 960c613147d3d55a3e56dcef06fc974a4c553929 | 30,010 |
def setup(app):
"""Setup sphinx-gallery sphinx extension"""
sphinx_compatibility._app = app
app.add_config_value('sphinx_gallery_conf', DEFAULT_GALLERY_CONF, 'html')
for key in ['plot_gallery', 'abort_on_example_error']:
app.add_config_value(key, get_default_config_value(key), 'html')
app.add_css_file('gallery.css')
if 'sphinx.ext.autodoc' in app.extensions:
app.connect('autodoc-process-docstring', touch_empty_backreferences)
app.connect('builder-inited', generate_gallery_rst)
app.connect('build-finished', copy_binder_files)
app.connect('build-finished', summarize_failing_examples)
app.connect('build-finished', embed_code_links)
metadata = {'parallel_read_safe': True,
'parallel_write_safe': True,
'version': _sg_version}
return metadata | 75886cf003fe6611651e515224e81b1cc730512f | 30,011 |
def get_stats(beta0, n, sigma, lam, pen, ntrials=100, maxiter=100):
"""
run ntrials regression problems
return mean of the mse, and 95% confidence interval
"""
if pen is None:
mses = run_trials_ols(beta0, n, sigma, ntrials=ntrials)
else:
mses = run_trials(beta0, n, sigma, lam, pen, ntrials=ntrials, maxiter=maxiter)
mmean = np.mean(mses)
qs = np.quantile(mses, [0.025, 0.875])
return mmean, qs | e6357f64b40eb424e295088920f5f8dcba558896 | 30,012 |
def squeeze_output_dim_0(initial_ndims, point_types):
"""Determine if the output needs to squeeze a singular dimension 0.
The dimension 0 is squeezed iff all input parameters:
- contain one sample,
- have the corresponding dimension 0 squeezed,
i.e. if all input parameters have ndim strictly less than the ndim
corresponding to their vectorized shape.
"""
for ndim, point_type in zip(initial_ndims, point_types):
vect_ndim = POINT_TYPES_TO_NDIMS[point_type]
assert ndim <= vect_ndim
if ndim == vect_ndim:
return False
return True | 448291f75d758867e65c1693de6c40ab80a7b642 | 30,013 |
import math
def quaternion_from_matrix(matrix, isprecise=False):
"""Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
"""
M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4]
if isprecise:
q = np.empty((4,))
t = np.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 0, 1, 2
if M[1, 1] > M[0, 0]:
i, j, k = 1, 2, 0
if M[2, 2] > M[i, i]:
i, j, k = 2, 0, 1
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q = q[[3, 0, 1, 2]]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = np.array([[m00 - m11 - m22, 0.0, 0.0, 0.0],
[m01 + m10, m11 - m00 - m22, 0.0, 0.0],
[m02 + m20, m12 + m21, m22 - m00 - m11, 0.0],
[m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = np.linalg.eigh(K)
q = V[[3, 0, 1, 2], np.argmax(w)]
if q[0] < 0.0:
np.negative(q, q)
return q | dcc4ee7e6b2493a96a78b7da45b6f6edb12be550 | 30,014 |
def check_version(stdout):
"""Check version of Ensembl-VEP.
Example of the first part of an output from the command `vep --help`:
#----------------------------------#
# ENSEMBL VARIANT EFFECT PREDICTOR #
#----------------------------------#
Versions:
ensembl : 104.1af1dce
ensembl-funcgen : 104.59ae779
ensembl-io : 104.1d3bb6e
ensembl-variation : 104.6154f8b
ensembl-vep : 104.3
Help: dev@ensembl.org , helpdesk@ensembl.org
Twitter: @ensembl
"""
vep_version = int(
float(
next(
(line for line in stdout.split("\n") if "ensembl-vep" in line)
).split()[2]
)
)
return vep_version | 5c3b716db7016f1b612f764fb54e3b25d970b0f2 | 30,015 |
def tf_config():
""" Default tensorflow config. """
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return config | fdc28c9968457af92afb18f8315e07c04890ff50 | 30,016 |
import argparse
def parse_cli_args():
"""
These flags are the ones required by the Stream Deck SDK's registration procedure.
They'll be set by the Stream Deck desktop software when it launches our plugin.
"""
parser = argparse.ArgumentParser(
description='Stream Deck Google Meet Plugin')
parser.add_argument('-port', type=int, required=True)
parser.add_argument('-pluginUUID', dest='plugin_uuid',
type=str, required=True)
parser.add_argument('-registerEvent', dest='register_event',
type=str, required=True)
parser.add_argument('-info', type=str, required=True)
# Ignore unknown args in case a Stream Deck update adds optional flags later.
(known_args, _) = parser.parse_known_args()
return known_args | adcad9860336482b2072ef8aecd398a2f4ce3b45 | 30,017 |
import tensorflow.compat.v1 as tf
def op(scalars_layout, collections=None):
"""Creates a summary that contains a layout.
When users navigate to the custom scalars dashboard, they will see a layout
based on the proto provided to this function.
Args:
scalars_layout: The scalars_layout_pb2.Layout proto that specifies the
layout.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A tensor summary op that writes the layout to disk.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
assert isinstance(scalars_layout, layout_pb2.Layout)
summary_metadata = metadata.create_summary_metadata()
return tf.summary.tensor_summary(name=metadata.CONFIG_SUMMARY_TAG,
tensor=tf.constant(
scalars_layout.SerializeToString(),
dtype=tf.string),
collections=collections,
summary_metadata=summary_metadata) | 852fa62c69271c897d0f4e0c4b9517520d7f7b77 | 30,018 |
def customize_response_cros(data):
"""定制跨域响应体"""
response = make_response(jsonify(data))
# 设置响应请求头
response.headers["Access-Control-Allow-Origin"] = '*' # 允许使用响应数据的域。也可以利用请求header中的host字段做一个过滤器。
response.headers["Access-Control-Allow-Methods"] = 'POST' # 允许的请求方法
response.headers["Access-Control-Allow-Headers"] = "x-requested-with,content-type" # 允许的请求header
return response | c702644bc9faae45057263a207e42e110d125165 | 30,019 |
def shortdateformat(value, default_value=None):
"""
Example value: datetime.strptime("2018-07-25 10:15:00", "%Y-%m-%d %H:%M:%S")
Example output: '25 July'
"shortdateformat" was designed for use in summary tables where space is tight and dates are shown on their own line.
The original intended use was in conjunction with "timeformat" in admin app summary tables.
It is now (Jan 2018) also used in briefs-frontend on the "Publish your requirements" page only.
** USE OUR STANDARD dateformat RATHER THAN THIS UNLESS THERE IS A GOOD REASON NOT TO **
"""
return _format_date(value, default_value, DISPLAY_SHORT_DATE_FORMAT, localize=False) | 31ce7fa824df3d746d5e81e960864fabb1329307 | 30,020 |
def build_suffix_array(text):
"""
Build suffix array of the string text and
return a list result of the same length as the text
such that the value result[i] is the index (0-based)
in text where the i-th lexicographically smallest
suffix of text starts.
"""
order = sort_characters(text)
classes = compute_char_classes(text, order)
shiftlen = 1
while shiftlen < len(text):
order = sort_doubled(text, shiftlen, order, classes)
classes = update_classes(order, classes, shiftlen)
shiftlen *= 2
return order | 05eb036cb749e030d84bb2d494a447faa7f93e6c | 30,021 |
import re
import os
import codecs
def load_page(content_path, page_path):
"""Load the page file and return the path, URL and contents"""
# Extract the part of the page_path that will be used as the URL path
pattern = URL_PATH_REGEX % content_path
matches = re.match(pattern, page_path)
if matches:
url_path = matches.group('path').rstrip('/').lstrip('/')
else:
raise PageParseException("Unable to parse page path [%s]" %
content_path)
if not os.path.exists(page_path):
raise ContentException('Could not find file for content page "%s"' %
page_path)
# Read the page file
with codecs.open(page_path, 'r', encoding='utf8') as f:
file_string = f.read()
return page_path, url_path, file_string | 985061e9632bbb96bb1236dbdaca1f5ece657e48 | 30,022 |
from mne.utils import _time_mask
def compute_auc(dip, tmin=-np.inf, tmax=np.inf):
"""Compute the AUC values for a DipoleFixed object."""
if not isinstance(dip, DipoleFixed):
raise TypeError('dip must be a DipoleFixed, got "%s"' % (type(dip),))
pick = pick_types(dip.info, meg=False, dipole=True)
if len(pick) != 1:
raise RuntimeError('Could not find dipole data')
time_mask = _time_mask(dip.times, tmin, tmax, dip.info['sfreq'])
data = dip.data[pick[0], time_mask]
return np.sum(np.abs(data)) * len(data) * (1. / dip.info['sfreq']) | cedfeb7934ee86e6c1ae702ba0cbecceb83c90db | 30,023 |
def left_join(ht1, ht2):
"""
:param ht1: left hash table
:param ht2: right hash table
:return: list of joined values from both hash tables
"""
results = []
for item in ht1.table:
while item is not None:
key = item.val[0]
joined = [key, ht1.get(key), ht2.get(key)]
results.append(joined)
item = item.next
return results | 8f34e03d055a32ea337b27cd800eeb393d136dfa | 30,024 |
from typing import Tuple
import torch
import re
def load_pretrained_cifar10_model(
path: str, resnet_size: int = 32,
) -> Tuple[nn.Module, DifferentiableNormalize]:
"""
Loads a pretrained CIFAR-10 ResNet from the given path along with its
associated normalizer.
"""
model: nn.Module = getattr(cifar_resnets, f'resnet{resnet_size}')()
model_state = torch.load(path, map_location=torch.device('cpu'))
model.load_state_dict({re.sub(r'^module\.', '', k): v for k, v in
model_state['state_dict'].items()})
normalizer = DifferentiableNormalize(
mean=config.CIFAR10_MEANS,
std=config.CIFAR10_STDS,
)
return model, normalizer | 2d1a907b2d90459661bdd0e578828ac0949d68e3 | 30,025 |
import os
import glob
def find(path):
"""Find files in the filesystem in order
Expands and normalizes relative paths.
Ignores unreadable files and unexplorable directories.
:param dir_path: Path to a config file or dir containing configs
:returns: List of full paths of the files in the directory in lex. order
"""
if path:
path = os.path.abspath(os.path.expanduser(path))
if not check_access(path):
return []
if os.path.isfile(path):
return [path]
return sorted(glob.glob(os.path.join(path, '*'))) | 8c94874f4d2d62608a3791cfceb67b8aebe4fa54 | 30,026 |
import logging
def create_project(**kwargs): # noqa: E501
"""Creates a project with an original network file.
Creates a project with an original network file. # noqa: E501
:param designation:
:type designation: str
:param description:
:type description: str
:param network_designation:
:type network_designation: str
:param network_directed:
:type network_directed: bool
:param network_multigraph:
:type network_multigraph: bool
:param network_file: Binary object which contains the network file with a standard network format.
:type network_file: str
:param additional_network_file: Binary object which contains an additional network file with a standard network format (especailly used for CSV imports).
:type additional_network_file: str
:param file_format:
:type file_format: str
:rtype: Project
"""
body = dict(kwargs.items()).get('body')
file = dict(kwargs.items()).get('network_file')
additional_file = dict(kwargs.items()).get('additional_network_file')
# Try to process and safe the file before accessing the Database
try:
file_format = body.get('file_format')
network_file = NetworkFile(file_format, file, additional_file)
node_list = network_file.parse_nodes()
except Exception:
logging.exception("Exception while handling the input file")
e = http_exceptions.InternalServerError(
description='Something went wrong! Please check if your network file is correct.')
raise e
try:
db = DatabaseConnector.get_db_instance()
project_id = db.add_project(
designation=body.get('designation'),
description=body.get('description')
)
original_network_id = db.add_original_network_to_project(
designation=body.get('network_designation'),
directed=body.get('network_directed'),
multigraph=body.get('network_multigraph'),
project_id=project_id
)
predicted_network_id = db.add_predicted_network_to_project(
designation=body.get('network_designation'),
project_id=project_id
)
nodes = db.add_nodes(node_list, original_network_id, predicted_network_id)
edge_list = network_file.parse_edges(nodes)
db.add_edges_to_original_network(edge_list, original_network_id)
for node in nodes:
attribute_list = network_file.parse_attributes(node[0])
if attribute_list:
db.add_node_attributes(attribute_list, node[1])
graph = build_original_graph('project_id', project_id)
save_predicted_graph_to_db(graph.copy(), predicted_network_id)
default_evaluation_setup = {
"random_seed": 42,
"with_validation": False,
"train_sampling_ratio": 0.8,
"test_sampling_ratio": 0.9,
"ml_preprocessing": False
}
db.add_or_update_evaluation_result(project_id, default_evaluation_setup)
return Project(
id=project_id,
designation=body.get('designation'),
description=body.get('description'),
original_network_id=original_network_id,
predicted_network_id=predicted_network_id
)
except Exception:
logging.exception("Exception occured while inserting data in the database")
e = http_exceptions.InternalServerError(
description='Something went wrong! The input file seems to be wrong and the data could not be loaded into the database.')
raise e | c57951358383b18f35fdf9c6ca899f09979565ae | 30,027 |
def _get_boto_client(cluster, access_key, secret_key):
"""
Returns a boto client object that can be used to communicate with the Object
Storage cluster.
"""
client = boto.connect_s3(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
host=BASE_URL_TEMPLATE.format(cluster),
calling_format=OrdinaryCallingFormat())
# set this for later use
client.obj_cluster = cluster
return client | 4351e74610948c17c8c35ae86a5fa1ca15f4158f | 30,028 |
def triangleArea(a: Vec3, b: Vec3, c: Vec3) -> float:
"""
Calculate area of triangle
:return: area
"""
return cross3(b - a, c - a).length() / 2.0 | ad5c73e07421c01f3db3a9d4592ec218307c8a80 | 30,029 |
async def read_object_name(app, device_id, addr):
"""
Execute a single request using `ReadPropertyRequest`.
This will read the `objectName` property of a remote device.
:param app: An app instance
:param device_id: BACnet device id (integer number)
:param addr: The network address of the remote device
:return: The object name value
"""
return await app.execute_request(
ReadPropertyRequest(
objectIdentifier=('device', device_id),
propertyIdentifier='objectName',
destination=Address(addr)
)
) | 3e9e023be615911ca0f43ff2a0aec62cec695d25 | 30,030 |
import requests
from bs4 import BeautifulSoup
def jws_omex_dict():
""" Returns dictionary of available JWS combine archives.
:return: { id: download_url } dict
"""
jws_omex = {}
num_omex = 0
for page_iter in range(NUM_PAGES):
url = URL.format(page_iter+1) # 1 based counting
page = requests.get(url)
if page.status_code == 200:
soup = BeautifulSoup(page.content, 'html.parser')
# select all <a> in <td>
items = soup.select('td a')
# only interested in the download links
links = [a.get("href") for a in items if "combinearchive?download=1" in a.get('href')]
print("N(page={}) = {}".format(page_iter+1, len(links)))
num_omex += len(links)
for url in links:
tokens = url.split('/')
name = tokens[3]
jws_omex[name] = "http://jjj.mib.ac.uk" + url
# pprint.pprint(jws_omex)
print('---------')
print(num_omex)
return jws_omex | 21e378038bbce07a166d315a183a08ce69f9a544 | 30,031 |
def ztrsv(A,
x,
Uplo=CblasLower,
TransA=CblasNoTrans,
Diag=CblasNonUnit):
"""
returns x'
This function computes inv(op(A)) x for x, where op(A) = A, A^T, A^H
for TransA = CblasNoTrans, CblasTrans, CblasConjTrans. When Uplo is
CblasUpper then the upper triangle of A is used, and when Uplo is
CblasLower then the lower triangle of A is used. If Diag is
CblasNonUnit then the diagonal of the matrix is used, but if Diag
is CblasUnit then the diagonal elements of the matrix A are taken
as unity and are not referenced.
"""
xn = array_typed_copy(x)
_gslwrap.gsl_blas_ztrsv(Uplo, TransA, Diag, A, xn)
return xn | 11b3420e09718fabc907b484142b67ce4f828c4f | 30,032 |
def get_trainable_vars(name=None):
"""Return the trainable variables.
Parameters
----------
name : str
the scope
Returns
-------
list of tf.Variable
trainable variables
"""
return tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope=name) | 3c5b005613a7a6f0cd0420e60337ed7bf88bf92f | 30,033 |
def player_to_string(game: reversi.ReversiGame, player_colour: str, player: ai_players.Player) \
-> str:
""" Returns the string representation of the type of the player.
Preconditions:
- player_colour in {'white', 'black'}
"""
if game.get_human_player() == 1 and player_colour == 'black':
return 'Human'
elif game.get_human_player() == -1 and player_colour == 'white':
return 'Human'
else:
# the player is one of the AI players
if isinstance(player, ai_players.RandomPlayer):
return 'Random Moves'
elif (isinstance(player, ai_players.MinimaxPlayer)
or isinstance(player, ai_players.MinimaxABPlayer)):
return 'Minimax ' + str(player.depth) | a1e6dfe184d471616fac7a1ab1bbb2d959c5457c | 30,034 |
from typing import Any
def encode_pd_timestamp(v: pd.Timestamp) -> Any:
"""
Specializes :func:`encode` for invocations where ``v`` is an instance of
the :class:`~pandas.Timestamp` class.
"""
return {
"__kind__": kind_inst,
"class": "pandas.Timestamp",
"args": encode([str(v)]),
"kwargs": {"freq": v.freqstr if v.freq else None},
} | fec08229d2a9b8f7115986e4c97677b401200adf | 30,035 |
async def getWebUserAmount(cls:"WebIndex", where:str="1=1", values:tuple=()) -> int:
""" simply gives a number of all matched user """
res:list = cls.Web.BASE.PhaazeDB.selectQuery(f"SELECT COUNT(*) AS `I` FROM `user` WHERE {where}", values)
return res[0]['I'] | a019a8de0644a7295cfafdce80a544ae31bcb225 | 30,036 |
def randomise_spikes(spiketrain, n_surrogates=1, decimals=None):
"""
Generates surrogates of a spike train by spike time randomization.
The surrogates are obtained by keeping the spike count of the original
`spiketrain`, but placing the spikes randomly in the interval
`[spiketrain.t_start, spiketrain.t_stop]`. The generated independent
`neo.SpikeTrain` objects follow Poisson statistics (exponentially
distributed inter-spike intervals).
Parameters
----------
spiketrain : neo.SpikeTrain
The spike train from which to generate the surrogates.
n_surrogates : int, optional
Number of surrogates to be generated.
Default: 1
decimals : int or None, optional
Number of decimal points for every spike time in the surrogates.
If None, machine precision is used.
Default: None
Returns
-------
list of neo.SpikeTrain
Each surrogate spike train obtained independently from `spiketrain` by
randomly distributing its spikes in the interval
`[spiketrain.t_start, spiketrain.t_stop]`.
Examples
--------
>>> import quantities as pq
>>> import neo
...
>>> st = neo.SpikeTrain([100, 250, 600, 800] * pq.ms, t_stop=1 * pq.s)
>>> print(randomise_spikes(st)) # doctest: +SKIP
[<SpikeTrain(array([ 131.23574603, 262.05062963, 549.84371387,
940.80503832]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print(randomise_spikes(st, n_surrogates=2)) # doctest: +SKIP
[<SpikeTrain(array([ 84.53274955, 431.54011743, 733.09605806,
852.32426583]) * ms, [0.0 ms, 1000.0 ms])>,
<SpikeTrain(array([ 197.74596726, 528.93517359, 567.44599968,
775.97843799]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print(randomise_spikes(st, decimals=0)) # doctest: +SKIP
[<SpikeTrain(array([ 29., 667., 720., 774.]) * ms,
[0.0 ms, 1000.0 ms])>]
"""
# Create surrogate spike trains as rows of a Quantity array
sts = ((spiketrain.t_stop - spiketrain.t_start) *
np.random.random(size=(n_surrogates, len(spiketrain))) +
spiketrain.t_start).rescale(spiketrain.units)
# Round the surrogate data to decimal position, if requested
if decimals is not None:
sts = sts.round(decimals)
# Convert the Quantity array to a list of SpikeTrains, and return them
return [neo.SpikeTrain(np.sort(st), t_start=spiketrain.t_start,
t_stop=spiketrain.t_stop,
sampling_rate=spiketrain.sampling_rate)
for st in sts] | cf8d911f73a3a62b9586ea41c4683ba84a91b8a1 | 30,037 |
def interface_PSO(theta, args):
"""
Function to interface the PSO with the ANFIS. Each particle has its own
ANFIS instance.
theta (nPop, n_var)
learners (nPop, )
J (nPop, )
"""
args_PSO = (args[0], args[1])
learners = args[2]
nPop = theta.shape[0]
J = np.zeros(nPop)
for i in range(nPop):
J[i] = learners[i].create_model(theta[i, :], args_PSO)
return J | a725db597ccf4a5928c305ad1493bcd0c99b94a5 | 30,038 |
def corr2d(X, K):
"""计算二维互相关运算。"""
h, w = K.shape
Y = mnp.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = (X[i:i + h, j:j + w] * K).sum()
return Y | dcbd523879df0f2529a0e68b6c65d829addbc786 | 30,039 |
from src.praxxis.sqlite import sqlite_scene
from src.praxxis.sqlite import sqlite_notebook
from src.praxxis.display import display_scene
def history(history_db, library_db, current_scene_db):
"""displays the notebook history of the sceen"""
curr_scene = sqlite_scene.get_current_scene(history_db)
notebook_history = sqlite_scene.get_notebook_history(current_scene_db)
display_scene.display_history(curr_scene, notebook_history)
# get paths and format for writing into notebook list
notebooks = []
for notebook_info in notebook_history:
# pass the library_db, notebook name, notebook library
notebook_data = sqlite_notebook.get_notebook(library_db, notebook_info[1])[0]
notebooks.insert(0, (notebook_data))
sqlite_notebook.write_list(current_scene_db, notebooks)
return notebooks | c6ac411488f69b61678fb298639a8dfa0a103901 | 30,040 |
def get_optimal_parameters_from_dict(selected_dict, num_features):
""" Find optimal parameters from dictionary of selected features
Arguments
---------
selected_dict: dictionary
keys = parameters
values = dictionary
keys = task index
values = list of list of selected features (for each subsample)
num_features: int
Total number of features
Returns
-------
opt_params: string
Optimal parameters, leading to highest consistency index mean
of features selected for each subsample for each task
=> params leading to the best ci mean.
"""
opt_params = ''
opt_ci_mean = -1 # set to -1 because it is the worst case ci value
for (params, selected_dict_p) in selected_dict.iteritems():
ci_list = [] #list of ci, one per task, computed with current params
for (task_idx, sel_list) in selected_dict_p.iteritems():
ci_of_current_task = consistency_index_k(sel_list, num_features)
ci_list.append(ci_of_current_task)
ci_mean = np.mean(ci_list)
if ci_mean >= opt_ci_mean:
opt_ci_mean = ci_mean
opt_params = params
return opt_params | d473f963c482bcdd8a2eebd65f2e9ae50fe46a32 | 30,041 |
async def _get_input_dialog(self: 'TelegramClient', dialog):
"""
Returns a :tl:`InputDialogPeer`. This is a bit tricky because
it may or not need access to the client to convert what's given
into an input entity.
"""
try:
if dialog.SUBCLASS_OF_ID == 0xa21c9795: # crc32(b'InputDialogPeer')
dialog.peer = await self.get_input_entity(dialog.peer)
return dialog
elif dialog.SUBCLASS_OF_ID == 0xc91c90b6: # crc32(b'InputPeer')
return _tl.InputDialogPeer(dialog)
except AttributeError:
pass
return _tl.InputDialogPeer(await self.get_input_entity(dialog)) | a58570d5192713e3813cd3cfc6d1295916684a96 | 30,042 |
from datetime import datetime
def kep_to_sat(kep,epoch,bstar=0.21109E-4,whichconst=wgs72,afspc_mode=False):
"""kep_to_sat(kep,epoch,bstar=0.21109E-4,whichconst=wgs72,afspc_mode=False)
Converts a set of keplerian elements into a Satellite object.
Args:
kep(1x6 numpy array): the osculating keplerian elements at epoch
epoch(float): the epoch
bstar(float): bstar drag coefficient
whichconst(float): gravity model. refer pypi sgp4 documentation
afspc_mode(boolean): refer pypi sgp4 documentation
Returns:
Satellite object: an sgp4 satellite object encapsulating the arguments
"""
deg2rad = np.pi / 180.0; # 0.0174532925199433
xpdotp = 1440.0 / (2.0 * np.pi); # 229.1831180523293
tumin = whichconst.tumin
satrec = Satellite()
satrec.error = 0;
satrec.whichconst = whichconst # Python extension: remembers its consts
satrec.satnum = 0
dt_obj = datetime.utcfromtimestamp(epoch)
t_obj = dt_obj.timetuple()
satrec.epochdays = (t_obj.tm_yday +
t_obj.tm_hour/24 +
t_obj.tm_min/1440 +
t_obj.tm_sec/86400)
satrec.ndot = 0
satrec.nddot = 0
satrec.bstar = bstar
satrec.inclo = kep[2]
satrec.nodeo = kep[4]
satrec.ecco = kep[1]
satrec.argpo = kep[3]
satrec.mo = __true_to_mean(kep[5],kep[1])
satrec.no = 86400/(2*np.pi*(kep[0]**3/398600.4405)**0.5)
satrec.no = satrec.no / xpdotp; # rad/min
satrec.a = pow( satrec.no*tumin , (-2.0/3.0) );
# ---- find standard orbital elements ----
satrec.inclo = satrec.inclo * deg2rad;
satrec.nodeo = satrec.nodeo * deg2rad;
satrec.argpo = satrec.argpo * deg2rad;
satrec.mo = satrec.mo * deg2rad;
satrec.alta = satrec.a*(1.0 + satrec.ecco) - 1.0;
satrec.altp = satrec.a*(1.0 - satrec.ecco) - 1.0;
satrec.epochyr = dt_obj.year
satrec.jdsatepoch = epoch/86400.0 + 2440587.5
satrec.epoch = dt_obj
# ---------------- initialize the orbit at sgp4epoch -------------------
sgp4init(whichconst, afspc_mode, satrec.satnum, satrec.jdsatepoch-2433281.5, satrec.bstar,
satrec.ecco, satrec.argpo, satrec.inclo, satrec.mo, satrec.no,
satrec.nodeo, satrec)
return satrec | 9f5a9f3d487d9ea924ea1c8858c8b0796e543bf2 | 30,043 |
def enumerate ():
""" Returns an iterator to the features map.
"""
return __all_features.iteritems () | fda0a96102add04c4282a61f99d9a664e76f2bd6 | 30,044 |
def _create_forward(out_node):
"""Create a user-friendly forward function.
Ensures that a single value instead of a tuple is returned if the user asked
for the gradient with respect to only one input.
Args:
out_node: The function definition AST.
Returns:
The function definition with potentially changed return statement.
"""
retval = out_node.body[0].body[-1]
if len(retval.value.elts) == 1:
retval.value = retval.value.elts[0]
return out_node | 80cdd1814d62b282c1cde37c783d97a067264e51 | 30,045 |
import os
import glob
import warnings
def get_geos_install_prefix():
"""Return GEOS installation prefix or None if not found."""
env_candidate = os.environ.get("GEOS_DIR", None)
if env_candidate is not None:
candidates = [env_candidate]
else:
candidates = [os.path.expanduser("~/local"), os.path.expanduser("~"),
"/usr/local", "/usr", "/opt/local", "/opt", "/sw"]
for prefix in candidates:
libfiles = []
libdirs = ["bin", "lib", "lib64"]
libext = "dll" if os.name == "nt" else "so"
libcode = "{0}geos_c".format("" if os.name == "nt" else "lib")
libname = "{0}*.{1}*".format(libcode, libext)
for libdir in libdirs:
libfiles.extend(glob.glob(os.path.join(prefix, libdir, libname)))
hfile = os.path.join(prefix, "include", "geos_c.h")
if os.path.isfile(hfile) and libfiles:
return prefix
warnings.warn(" ".join([
"Cannot find GEOS library and/or headers in standard locations",
"('{0}'). Please install the corresponding packages using your",
"software management system or set the environment variable",
"GEOS_DIR to point to the location where GEOS is installed",
"(for example, if 'geos_c.h' is in '/usr/local/include'",
"and 'libgeos_c' is in '/usr/local/lib', then you need to",
"set GEOS_DIR to '/usr/local'",
]).format("', '".join(candidates)), RuntimeWarning)
return None | 79193b7a515f961dacdb666a6c25d038b3a14e0c | 30,046 |
def _get_trip_from_id(trip_obj_list, trip_id):
""" Get a trip from a list, based on a trip id """
found_trip_obj = None
for trip_obj in trip_obj_list:
if trip_obj.id == trip_id:
found_trip_obj = trip_obj
break
return found_trip_obj | f2bbacfccda1e4ff778ba793ad238f744400f020 | 30,047 |
def density_plot(df, y_column, models, model_names=(), columns_to_exclude=()):
"""This function creates the density plot of predicted positive class probability on actual positive and negative
data by each model in models in the same plot. It also computes the difference between the distributions on
positive and negative data using Bhattacharyya distance, KL distance, and cross entropy (a.k.a. log-loss).
Parameters
----------
df : DataFrame
Data to be plotted
y_column : str
Label of the class column
models : array-like
The model objects to be evaluated
model_names : array-like
The name of the models to be shown in the legends
columns_to_exclude : tuple, optional (default=())
Labels of unwanted columns
Returns
-------
plot_wrapper : pytalite.plotwrapper.PlotWrapper
The PlotWrapper object that contains the information and data of the plot
Raises
------
ValueError
If models is empty or models and model_names does not have the same length
"""
# Get X, y array representation of data snd predict probability
X, y = df_to_arrays(df, y_column, columns_to_exclude)
pos_idx = y == 1
neg_idx = y == 0
n_models = len(models)
if n_models == 0:
raise ValueError("no models to evaluate")
if len(model_names) == 0:
model_names = ["model %d" % (i + 1) for i in range(n_models)]
if len(model_names) != n_models:
raise ValueError("models and model_names must have the same length")
# List and array to store data
pos_data = np.empty((0, 1000))
neg_data = np.empty((0, 1000))
bds = []
kls = []
ces = []
with plt.style.context(style_path):
fig = plt.figure(figsize=(12, 9))
grid = GridSpec(2, 1, height_ratios=[3.5, 3.5], hspace=0)
ax1 = fig.add_subplot(grid[0])
ax2 = fig.add_subplot(grid[1])
scores = []
# Compute density curve for all models
for model, model_name in zip(models, model_names):
y_prob = model.predict_proba(X)[:, 1]
# Fit gaussian kernels on the data
kernel_pos = st.gaussian_kde(y_prob[pos_idx])
kernel_neg = st.gaussian_kde(y_prob[neg_idx])
xs = np.arange(1000) / 1000
pos_y = kernel_pos(xs)
neg_y = kernel_neg(xs)
# Normalize the curve
pos_norm = (pos_y / pos_y.sum())[np.newaxis, :]
neg_norm = (neg_y / neg_y.sum())[np.newaxis, :]
# Compute all three scores
bd = _bhattacharyya_distance(pos_norm, neg_norm, normalize=True)
kl = st.entropy(pos_norm[0], neg_norm[0])
ce = _cross_entropy(pos_norm, neg_norm, normalize=True)
# Plot using the kernels
line_plot(ax1, xs, pos_y, legend=model_name, line_color=None, line_label=False)
line_plot(ax2, xs, neg_y, line_color=None, line_label=False)
scores.append("%s: Bhattacharyya Distance: %.4f, KL Distance: %.4f, Cross-Entropy: %.4f"
% (model_name, bd, kl, ce))
# Add data
pos_data = np.vstack((pos_data, pos_y))
neg_data = np.vstack((neg_data, neg_y))
bds.append(bd)
kls.append(kl)
ces.append(ce)
ylim_max = max(pos_data.max(), neg_data.max()) * 1.1
ylim_min = round(-ylim_max * 0.05, 1)
# Add scores to plot as text
# ax3.text(0.5, 0.5, "\n".join(scores), va="center", ha="center")
config_axes(ax1, xticks=[], ylabel="Positive Density", ylim=(ylim_min, ylim_max))
config_axes(ax2, y_invert=True, xlabel="Probability\n" + "\n".join(scores), ylabel="Negative Density",
ylim=(ylim_min, ylim_max))
plt.show()
return PlotWrapper(fig, (ax1, ax2), {"probability": xs, "pos_density": pos_data, "neg_density": neg_data,
"Bhattacharyya": np.array(bds), "KL": np.array(kls),
"cross_entropy": np.array(ces)}) | e3120e3b5fc0b07e12e5fa41fe0f288a9d98495c | 30,048 |
def clip_chk(x, lb, ub, allow_nan=False):
"""Clip all element of `x` to be between `lb` and `ub` like :func:`numpy:numpy.clip`, but also check
:func:`numpy:numpy.isclose`.
Shapes of all input variables must be broadcast compatible.
Parameters
----------
x : :class:`numpy:numpy.ndarray`
Array containing elements to clip.
lb : :class:`numpy:numpy.ndarray`
Lower limit in clip.
ub : :class:`numpy:numpy.ndarray`
Upper limit in clip.
allow_nan : bool
If true, we allow ``nan`` to be present in `x` without out raising an error.
Returns
-------
x : :class:`numpy:numpy.ndarray`
An array with the elements of `x`, but where values < `lb` are replaced with `lb`, and those > `ub` with `ub`.
"""
assert np.all(lb <= ub) # np.clip does not do this check
x = np.asarray(x)
# These are asserts not exceptions since clip_chk most used internally.
if allow_nan:
assert np.all(isclose_lte(lb, x) | np.isnan(x))
assert np.all(isclose_lte(x, ub) | np.isnan(x))
else:
assert np.all(isclose_lte(lb, x))
assert np.all(isclose_lte(x, ub))
x = np.clip(x, lb, ub)
return x | e799e00adb4152a7d2ca2faf95eb82744149e59d | 30,049 |
def grid_arc_seconds_1d_to_grid_pixel_indexes_1d(grid_arc_seconds_1d, shape, pixel_scales, origin=(0.0, 0.0)):
""" Convert a grid of (y,x) arc second coordinates to a grid of (y,x) pixel 1D indexes. Pixel coordinates are \
returned as integers such that they are the pixel from the top-left of the 2D grid going rights and then \
downwards.
For example:
The pixel at the top-left, whose 2D index is [0,0], corresponds to 1D index 0.
The fifth pixel on the top row, whose 2D index is [0,5], corresponds to 1D index 4.
The first pixel on the second row, whose 2D index is [0,1], has 1D index 10 if a row has 10 pixels.
The arc-second coordinate grid is defined by the class attribute origin, and coordinates are shifted to this \
origin before computing their 1D grid pixel indexes.
The input and output grids are both of shape (total_pixels, 2).
Parameters
----------
grid_arc_seconds_1d: ndarray
The grid of (y,x) coordinates in arc seconds which is converted to 1D pixel indexes.
shape : (int, int)
The (y,x) shape of the original 2D array the arc-second coordinates were computed on.
pixel_scales : (float, float)
The (y,x) arc-second to pixel scales of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the arc-second grid is shifted.
Returns
--------
ndarray
A grid of 1d pixel indexes with dimensions (total_pixels, 2).
Examples
--------
grid_arc_seconds_1d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixels_1d = grid_arc_seconds_1d_to_grid_pixel_indexes_1d(grid_arc_seconds_1d=grid_arc_seconds_1d, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels = grid_arc_seconds_1d_to_grid_pixel_centres_1d(grid_arc_seconds_1d=grid_arc_seconds_1d, shape=shape,
pixel_scales=pixel_scales, origin=origin)
grid_pixel_indexes = np.zeros(grid_pixels.shape[0])
for i in range(grid_pixels.shape[0]):
grid_pixel_indexes[i] = int(grid_pixels[i,0] * shape[1] + grid_pixels[i,1])
return grid_pixel_indexes | 966133fe0a979c913b704a448d02124005a9946d | 30,050 |
import fnmatch
def make_test_run_filter(
complete: bool = False, failed: bool = False, incomplete: bool = False,
name: str = None,
newer_than: dt.datetime = None, older_than: dt.datetime = None,
passed: bool = False, result_error: bool = False,
show_skipped: bool = False, sys_name: str = None, user: str = None):
"""Generate a filter function for use by dir_db.select and similar
functions. This operates on TestAttribute objects, so make sure to
pass the TestAttribute class as the transform to dir_db functions.
:param complete: Only accept complete tests
:param failed: Only accept failed tests
:param incomplete: Only accept incomplete tests
:param name: Only accept names that match this glob.
:param newer_than: Only accept tests that are more recent than this date.
:param older_than: Only accept tests older than this date.
:param passed: Only accept passed tests
:param result_error: Only accept tests with a result error.
:param show_skipped: Accept skipped tests.
:param sys_name: Only accept tests with a matching sys_name.
:param user: Only accept tests started by this user.
:return:
"""
if sys_name == LOCAL_SYS_NAME:
sys_vars = system_variables.get_vars(defer=True)
sys_name = sys_vars['sys_name']
# select once so we only make one filter.
def filter_test_run(test_attrs: TestAttributes) -> bool:
"""Determine whether the test run at the given path should be
included in the set."""
if show_skipped == 'no' and test_attrs.skipped:
return False
elif show_skipped == 'only' and not test_attrs.skipped:
return False
if complete and not test_attrs.complete:
return False
if incomplete and test_attrs.complete:
return False
if user and test_attrs.user != user:
return False
if sys_name and sys_name != test_attrs.sys_name:
return False
if passed and test_attrs.result != TestRun.PASS:
return False
if failed and test_attrs.result != TestRun.FAIL:
return False
if result_error and test_attrs.result != TestRun.ERROR:
return False
if older_than is not None and test_attrs.created > older_than:
return False
if newer_than is not None and test_attrs.created < newer_than:
return False
if name and not fnmatch.fnmatch(test_attrs.name, name):
return False
return True
return filter_test_run | 57f20287ac957b394b8ac25946da3da42deec8ec | 30,051 |
def get_runnable_tasks(graph):
"""Parse a graph and return all runnable tasks."""
tasks = []
to_remove = []
# tasks that follow task that raises an error
following_err = dict()
for tsk in graph.sorted_nodes:
if tsk not in graph.sorted_nodes:
continue
# since the list is sorted (breadth-first) we can stop
# when we find a task that depends on any task that is already in tasks
if set(graph.predecessors[tsk.name]).intersection(set(tasks)):
break
_is_runnable = is_runnable(graph, tsk)
if _is_runnable is True:
tasks.append(tsk)
to_remove.append(tsk)
elif _is_runnable is False:
continue
else: # a previous task had an error
errored_task = _is_runnable
# removing all successors of the errored task
for task_err in errored_task:
task_to_remove = graph.remove_successors_nodes(task_err)
for tsk in task_to_remove:
# adding tasks that were removed from the graph
# due to the error in the errored_task
following_err.setdefault(tsk, [])
following_err[tsk].append(task_err.name)
# removing tasks that are ready to run from the graph
for nd in to_remove:
graph.remove_nodes(nd)
return tasks, following_err | 155206f67986e65b74148e173d6641d994dd08bd | 30,052 |
def get_defaults(lang):
"""Get the language-specific defaults, if available in spaCy. This allows
using lexical attribute getters that depend on static language data, e.g.
Token.like_num, Token.is_stop, Doc.noun_chunks etc.
lang (unicode): ISO 639-1 language code.
RETURNS (Language.Defaults): The language defaults.
"""
try:
lang_cls = get_lang_class(lang)
return lang_cls.Defaults
except ImportError:
return Language.Defaults | 3ef08b4bd410407ad8519d1c4f84c6c279aa8257 | 30,053 |
def h2(*text, **kwargs):
"""Return a header 2"""
return tydoc().h2(*text, **kwargs) | 7b5b465282222c31d84121e6f890da964cb63fd5 | 30,054 |
def update_comment(comment_id, data):
"""
update comment using its id.
"""
comment = Comment.query.get(comment_id)
for attribute in data:
setattr(comment, attribute, data[attribute])
db.session.commit()
return comment_schema.dump(comment).data | aaeb88479bf82ea3ac09a56d17e09c4ba05eda47 | 30,055 |
import os
def countOriginals(subfolderPath):
"""return count of original vids"""
items = os.listdir(subfolderPath)
count = 0
for file in items:
if file.startswith("Original_") and file.endswith(".description"):
count = count + 1
return count | 116ffa4fecf911d0dec436c5003acb2c9f42a673 | 30,056 |
def RMSE(A, A_tilde):
"""
Root mean square error. Gives the standard deviation of the residuals
(prediction errors).
Parameters
----------
A : ndarray
Forecast.
A_tilde : ndarray
Observation.
Returns
-------
float
Root mean square error.
"""
return np.sqrt((abs(A_tilde - A)**2).mean()) | 58a5b833725497be224804a71855819afa9fc33f | 30,057 |
def remove_punct(tokens):
"""
Remove punctuation marks from lists of tokens
Parameters
----------
tokens: list
a nested list containing lists of tokens or a list of spacy docs
Returns
-------
filtered_comments: list
nested lists of tokens
"""
filtered_comments = [[token for token in comment if nlp.vocab[token.text].is_punct == False] for comment in tokens]
return filtered_comments | c6d7b70a6bf3efe7be3dd144f10ac0f8f5b11e72 | 30,058 |
def to_camel_case(string: str) -> str:
"""
Converts a ``snake_case`` string to ``camelCase``.
:param string: A ``snake_case`` string.
:return: A ``camelCase`` version of the input.
"""
components = string.split("_")
return components[0] + "".join(x.capitalize() for x in components[1:]) | ae0d82efd9a5a65ef16cc401a0fe302b4f04d524 | 30,059 |
import requests
def reconnect(user_data):
"""
Attempt to perform a login to the Unistudium website, saving the cookies in user_data.
Returns:
"OK" if the login was performed correctly, else a description with the error that can be used to inform the users.
"""
# Check if user's session exists
if 'session' not in user_data:
user_data['session'] = requests.Session()
# Getting credentials if available
try:
payload = {
"username": user_data['credentials']['username'],
"password": user_data['credentials']['password']
}
except KeyError:
return "Non hai effettuato ancora il login, effettualo tramite il comando /login."
# Check if server is alive
status_code = requests.head(LOGIN_URL).status_code
if status_code != 200:
print(Fore.RED + "[CONNECTION] Server irraggiungibile. Status code: " + str(status_code))
return "Non riesco a contattare il server (CODE: %d), riprova più tardi..." % status_code
# Check if the login is still valid (without performing a login)
if user_data['session'].head(MAIN_URL).status_code == 200:
return "OK"
# Perform the login
if user_data['session'].post(LOGIN_URL, data=payload).url != MAIN_URL:
return "Le credenziali fornite non sono valide. Riprova."
return "OK" | e05096d69605300680b29bc12784faa3734f40b2 | 30,060 |
from typing import Optional
from typing import Collection
from typing import List
from typing import Tuple
from typing import Set
from typing import Dict
import operator
def yake(
doc: Doc,
*,
normalize: Optional[str] = "lemma",
ngrams: int | Collection[int] = (1, 2, 3),
include_pos: Optional[str | Collection[str]] = ("NOUN", "PROPN", "ADJ"),
window_size: int = 2,
topn: int | float = 10,
) -> List[Tuple[str, float]]:
"""
Extract key terms from a document using the YAKE algorithm.
Args:
doc: spaCy ``Doc`` from which to extract keyterms.
Must be sentence-segmented; optionally POS-tagged.
normalize: If "lemma", lemmatize terms; if "lower", lowercase terms;
if None, use the form of terms as they appeared in ``doc``.
.. note:: Unlike the other keyterm extraction functions, this one
doesn't accept a callable for ``normalize``.
ngrams: n of which n-grams to consider as keyterm candidates.
For example, `(1, 2, 3)`` includes all unigrams, bigrams, and trigrams,
while ``2`` includes bigrams only.
include_pos: One or more POS tags with which to filter for good candidate keyterms.
If None, include tokens of all POS tags
(which also allows keyterm extraction from docs without POS-tagging.)
window_size: Number of words to the right and left of a given word
to use as context when computing the "relatedness to context"
component of its score. Note that the resulting sliding window's
full width is ``1 + (2 * window_size)``.
topn: Number of top-ranked terms to return as key terms.
If an integer, represents the absolute number; if a float, value
must be in the interval (0.0, 1.0], which is converted to an int by
``int(round(len(candidates) * topn))``
Returns:
Sorted list of top ``topn`` key terms and their corresponding YAKE scores.
References:
Campos, Mangaravite, Pasquali, Jorge, Nunes, and Jatowt. (2018).
A Text Feature Based Automatic Keyword Extraction Method for Single Documents.
Advances in Information Retrieval. ECIR 2018.
Lecture Notes in Computer Science, vol 10772, pp. 684-691.
"""
# validate / transform args
ngrams = utils.to_collection(ngrams, int, tuple)
include_pos = utils.to_collection(include_pos, str, set)
if isinstance(topn, float):
if not 0.0 < topn <= 1.0:
raise ValueError(
f"topn = {topn} is invalid; "
"must be an int, or a float between 0.0 and 1.0"
)
# bail out on empty docs
if not doc:
return []
stop_words: Set[str] = set()
seen_candidates: Set[str] = set()
# compute key values on a per-word basis
word_occ_vals = _get_per_word_occurrence_values(
doc, normalize, stop_words, window_size
)
# doc doesn't have any words...
if not word_occ_vals:
return []
word_freqs = {w_id: len(vals["is_uc"]) for w_id, vals in word_occ_vals.items()}
word_scores = _compute_word_scores(doc, word_occ_vals, word_freqs, stop_words)
# compute scores for candidate terms based on scores of constituent words
term_scores: Dict[str, float] = {}
# do single-word candidates separately; it's faster and simpler
if 1 in ngrams:
candidates = _get_unigram_candidates(doc, include_pos)
_score_unigram_candidates(
candidates,
word_freqs,
word_scores,
term_scores,
stop_words,
seen_candidates,
normalize,
)
# now compute combined scores for higher-n ngram and candidates
candidates = list(
ext_utils.get_ngram_candidates(
doc, [n for n in ngrams if n > 1], include_pos=include_pos,
)
)
attr_name = _get_attr_name(normalize, True)
ngram_freqs = itertoolz.frequencies(
" ".join(getattr(word, attr_name) for word in ngram) for ngram in candidates
)
_score_ngram_candidates(
candidates, ngram_freqs, word_scores, term_scores, seen_candidates, normalize,
)
# build up a list of key terms in order of increasing score
if isinstance(topn, float):
topn = int(round(len(seen_candidates) * topn))
sorted_term_scores = sorted(
term_scores.items(), key=operator.itemgetter(1), reverse=False,
)
return ext_utils.get_filtered_topn_terms(
sorted_term_scores, topn, match_threshold=0.8
) | b467f0598c70dbf1cec70dafec12ac4259720f91 | 30,061 |
def parse_qsub_defaults(parsed):
"""Unpack QSUB_DEFAULTS."""
d = parsed.split() if type(parsed) == str else parsed
options={}
for arg in d:
if "=" in arg:
k,v = arg.split("=")
options[k.strip("-")] = v.strip()
else:
options[arg.strip("-")] = ""
return options | a5c50aef405d88bcb018af48904a384b090d22a2 | 30,062 |
import torch
def load_embeddings(word_map=None, binary=True):
"""
Creates an embedding tensor for the specified word map, for loading into the model.
:param word_emb_file: file containing embeddings (stored in GloVe format)
:param word_map: word map. If None, it will be comprised from the embeddings vocabulary
:return: embeddings in the same order as the words in the word map, dimension of embeddings, a wordmap in case
it wasn't supplied.
"""
print("Loading embeddings...")
wv = KeyedVectors.load_word2vec_format(PATH_WORD2VEC, binary=binary)
ev = KeyedVectors.load_word2vec_format(PATH_EMOJI2VEC, binary=binary)
# Find embedding dimension
emb_dim = wv.vector_size
if word_map == None:
word_map = {k: v + 1 for emb in [wv.key_to_index.keys(), ev.key_to_index.keys()] for v, k in enumerate(emb)}
word_map['<unk>'] = len(word_map) + 1
word_map['<start>'] = len(word_map) + 1
word_map['<end>'] = len(word_map) + 1
word_map['<pad>'] = 0
vocab = set(word_map.keys())
# Create tensor to hold embeddings, initialize
embeddings = torch.FloatTensor(len(vocab), emb_dim)
_init_embedding(embeddings)
# Iterate through the vector pairs
for emb_word in vocab:
if emb_word in wv.key_to_index:
embeddings[word_map[emb_word]] = torch.FloatTensor(wv.get_vector(emb_word).copy())
elif emb_word in ev.key_to_index:
embeddings[word_map[emb_word]] = torch.FloatTensor(ev.get_vector(emb_word).copy())
return word_map, embeddings, emb_dim | 9c43d00411dd6036297c21569a232d52bd71acac | 30,063 |
from typing import Dict
import json
import requests
def update_business_profile(business: Business, profile_info: Dict) -> Dict:
"""Set the legal type of the business."""
if not business or not profile_info:
return {'error': babel('Business and profile_info required.')}
# contact phone is optional
phone = profile_info.get('phone', '')
error = {'error': 'Unknown handling'}
if email := profile_info.get('email'):
# assume the JSONSchema ensures it is a valid email format
token = AccountService.get_bearer_token()
account_svc_entity_url = current_app.config['ACCOUNT_SVC_ENTITY_URL']
# Create an entity record
data = json.dumps(
{'email': email,
'phone': phone,
'phoneExtension': ''
}
)
url = ''.join([account_svc_entity_url, '/', business.identifier, '/contacts'])
rv = requests.post(
url=url,
headers={**AccountService.CONTENT_TYPE_JSON,
'Authorization': AccountService.BEARER + token},
data=data,
timeout=AccountService.timeout
)
if rv.status_code in (HTTPStatus.OK, HTTPStatus.CREATED):
error = None
if rv.status_code == HTTPStatus.NOT_FOUND:
error = {'error': 'No business profile found.'}
if rv.status_code == HTTPStatus.METHOD_NOT_ALLOWED:
error = {'error': 'Service account missing privileges to update business profiles'}
if rv.status_code == HTTPStatus.BAD_REQUEST and \
'DATA_ALREADY_EXISTS' in rv.text:
put = requests.put(
url=''.join([account_svc_entity_url, '/', business.identifier]),
headers={**AccountService.CONTENT_TYPE_JSON,
'Authorization': AccountService.BEARER + token},
data=data,
timeout=AccountService.timeout
)
if put.status_code in (HTTPStatus.OK, HTTPStatus.CREATED):
error = None
else:
error = {'error': 'Unable to update existing business profile.'}
return error | 68c0ce0d9d205d34b02f7933ca3bc0e7179c7a12 | 30,064 |
def nextpow2(i):
""" Find the next power of 2 for number i """
n = 1
while n < i:
n *= 2
return n | 5dbe396b222ccf79d3cd2017b32174f9e894a5f2 | 30,065 |
from typing import Sequence
from typing import Dict
from typing import List
from typing import Set
from typing import Tuple
def _extra_topo_sort(bad_ordering: Sequence[DiscoveredExtension]) -> Sequence[DiscoveredExtension]:
"""
Simple depth-first search version of a topological sort, but without
recursion.
:param bad_ordering:
:return:
"""
lookup: Dict[str, DiscoveredExtension] = {}
for node in bad_ordering:
lookup[node.name] = node
ret: List[DiscoveredExtension] = []
visiting: Set[str] = set()
visited: Set[str] = set()
remaining: List[Tuple[str, int]] = [(node.name, 0) for node in bad_ordering]
# This isn't really necessary, but makes things dependable for testing
# and gives a reliable, consistent load order.
remaining.sort(key=lambda t: t[0])
log(TRACE, _extra_topo_sort, 'Performing topo sort of {0}', bad_ordering)
while remaining:
node_name, state = remaining.pop()
log(TRACE, _extra_topo_sort, 'Inspecting {0}, {1}', node_name, state)
node = lookup[node_name]
if state == 0:
if node_name in visited:
continue
if node_name in visiting:
# Better exception? This should not happen, based on the previous
# searching.
raise ValueError('Not a DAG')
log(TRACE, _extra_topo_sort, ' - Visiting')
visiting.add(node_name)
remaining.append((node_name, 1))
for child in node.depends_on:
log(TRACE, _extra_topo_sort, ' -- depends on {0}', child.name)
remaining.append((child.name, 0))
for child in node.implements:
log(TRACE, _extra_topo_sort, ' -- implements {0}', child.name)
remaining.append((child.name, 0))
log(TRACE, _extra_topo_sort, 'Remaining to search: {0}', remaining)
elif state == 1:
log(TRACE, _extra_topo_sort, ' - Finished visit')
visiting.remove(node_name)
visited.add(node_name)
ret.append(node)
log(TRACE, _extra_topo_sort, 'Order: {0}', ret)
return tuple(ret) | cdaf0229dda2460e68ee24afb987ca9f7e029d4d | 30,066 |
def getclasesbyid_(numid):
"""
Returns all defined clases by id [number]
"""
data = get_info_token()
user_ = data['User']
rol_ = data['Rol']
data_response = ''
if rol_ == 'Professor':
try:
bool_, data_response = getclassbyid_(data['User'],numid)
if bool_:
code = 200
else:
data_response = 'Forbidden'
code = 403
except Exception as e:
print e
data_response = 'Internal Error'
code = 500
elif rol_ == 'Student':
try:
bool_, data_response = getclassbyid_(data['User'],numid)
if bool_:
code = 200
else:
data_response = 'Forbidden'
code = 403
except Exception as e:
print e
code = 500
insert_general_record('getclassbyid/[id]',
{'data': data_response,
'code': code}
,user_)
return jsonify({'data': data_response, 'code': code}) | 75603f40621f51313863aa8977b71241a31c3d84 | 30,067 |
import json
def load_versions():
"""Load Bioversions data."""
with open(VERSIONS_PATH) as file:
return json.load(file) | e5e3b2a3dd4ae17fe6cf6b00700b33e9bc55e6b5 | 30,068 |
async def retrieve_document(document_id: str, collection: str) -> dict:
"""
:param document_id:
:param collection:
:return:
"""
document_filter = {"_id": ObjectId(document_id)}
if document := await greens.app.state.mongo_collection[collection].find_one(document_filter):
return await document_id_helper(document)
else:
raise ValueError(f"No document found for {document_id=} in {collection=}") | 4865acd4e553f651a68d694171c76d609eceff98 | 30,069 |
import requests
def get_kalliope_poststukken_uit(path, session, from_,
to=None,
dossier_types=None):
"""
Perform the API-call to get all poststukken-uit that are ready to be processed.
:param path: url of the api endpoint that we want to fetch
:param session: a Kalliope session, as returned by open_kalliope_api_session()
:param from_: start boundary of timerange for which messages are requested
:param to: end boundary of timerange for which messages are requested
:param dossier_types: Only return messages associated to these types of dossier
:returns: tuple of poststukken
"""
params = {
'vanaf': from_.replace(microsecond=0).isoformat(),
'aantal': MAX_REQ_CHUNK_SIZE
}
if to:
params['tot'] = to.replace(microsecond=0).isoformat()
if dossier_types:
params['dossierTypes'] = ','.join(dossier_types)
poststukken = []
req_url = requests.Request('GET', path, params=params).prepare().url
while req_url:
helpers.log("literally requesting: {}".format(req_url))
r = session.get(req_url)
if r.status_code == requests.codes.ok:
r_content = r.json()
poststukken += r_content['poststukken']
req_url = r_content['volgende']
else:
try:
errorDescription = r.json()
except Exception as e:
errorDescription = r
raise requests.exceptions.HTTPError('Failed to get Kalliope poststuk uit (statuscode {}): {}'.format(r.status_code,
errorDescription))
return poststukken | 2976979bfcccd64939e56c2d0874f6d419028b62 | 30,070 |
import re
def export_image(filename=None, type='PNG', resolution=None, units=None, height=None, width=None, zoom=None,
network=None, base_url=DEFAULT_BASE_URL):
""" Save the current network view as an image file.
The image is cropped per the current view in Cytoscape. Consider applying :meth:`fit_content` prior to export.
Args:
filename (str): Full path or path relavtive to current working directory, in addition to the name of the file.
Extension is automatically added based on the ``type`` argument. If blank, the current network name will be used.
type (str): Type of image to export, e.g., PNG (default), JPEG, PDF, SVG, PS (PostScript).
resolution (int): The resolution of the exported image, in DPI. Valid only for bitmap formats, when the selected
width and height 'units' is inches. The possible values are: 72 (default), 100, 150, 300, 600.
units (str) The units for the 'width' and 'height' values. Valid only for bitmap formats, such as PNG and JPEG.
The possible values are: pixels (default), inches.
height (float): The height of the exported image. Valid only for bitmap formats, such as PNG and JPEG.
width (float): The width of the exported image. Valid only for bitmap formats, such as PNG and JPEG.
zoom (float): The zoom value to proportionally scale the image. The default value is 100.0. Valid only for bitmap
formats, such as PNG and JPEG
network (str or SUID or None): Name or SUID of the network or view. Default is the "current" network active in Cytoscape.
If a network view SUID is provided, then it is validated and returned.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://127.0.0.1:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
dict: {'file': name of file} contains absolute path to file that was written
Raises:
CyError: if network or view doesn't exist
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> export_image('output/test', type='JPEG', units='pixels', height=1000, width=2000, zoom=200)
{'file': 'C:\\Users\\CyDeveloper\\tests\\output\\test.jpeg'}
>>> export_image('output/test', type='PDF', network='My Network')
{'file': 'C:\\Users\\CyDeveloper\\tests\\output\\test.pdf'}
>>> export_image(type='PNG', resolution=600, units='inches', height=1.7, width=3.5, zoom=500, network=13098)
{'file': 'C:\\Users\\CyDeveloper\\tests\\output\\test.png'}
"""
cmd_string = 'view export' # a good start
# filename must be supplied
if not filename: filename = networks.get_network_name(network, base_url=base_url)
# view must be supplied
view_SUID = get_network_view_suid(network, base_url=base_url)
# optional args
if resolution: cmd_string += ' Resolution="' + str(resolution) + '"'
if units: cmd_string += ' Units="' + str(units) + '"'
if height: cmd_string += ' Height="' + str(height) + '"'
if width: cmd_string += ' Width="' + str(width) + '"'
if zoom: cmd_string += ' Zoom="' + str(zoom) + '"'
# TODO: It looks like the '.' should be escaped ... true?
# TODO: If a lower case comparison is going to be done, shouldn't filename also be lower-case?
if re.search('.' + type.lower() + '$', filename) is None: filename += '.' + type.lower()
file_info = sandbox.sandbox_get_file_info(filename)
if len(file_info['modifiedTime']) and file_info['isFile']:
narrate('This file already exists. A Cytoscape popup will be generated to confirm overwrite.')
full_filename = file_info['filePath']
res = commands.commands_post(
'%s OutputFile="%s" options="%s" view="SUID:%s"' % (cmd_string, full_filename, type.upper(), view_SUID),
base_url=base_url)
# TODO: Added double quotes to SUID
return res | a6de6bedfea171e191500900f357709bb1a8880e | 30,071 |
import scipy
def lstsq_cholesky(
coefs: np.ndarray,
result: np.ndarray,
) -> np.ndarray:
"""Solve OLS problem using a Cholesky decomposition."""
left = coefs.T @ coefs
right = coefs.T @ result
return scipy.linalg.solve(left, right, assume_a="pos") | 08ec0988062daef04b55852d6673fb21031f9a87 | 30,072 |
import os
def get_packages(package):
"""Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, "__init__.py"))] | c6f51ca80d0091c448947087364fcb1ac71dc5ff | 30,073 |
import sys
def load_class_by_alias_or_classname(namespace, name):
"""Load a class using stevedore alias or the class name.
:param namespace: The namespace where the alias is defined.
:param name: The alias or class name of the class to be loaded.
:returns: Class if it can be loaded.
:raises ImportError: if class cannot be loaded.
"""
if not name:
LOG.error("Alias or class name is not set")
raise ImportError(_("Class not found."))
try:
# Try to resolve class by alias
mgr = driver.DriverManager(
namespace, name, warn_on_missing_entrypoint=False)
class_to_load = mgr.driver
except RuntimeError:
e1_info = sys.exc_info()
# Fallback to class name
try:
class_to_load = importutils.import_class(name)
except (ImportError, ValueError):
LOG.error("Error loading class by alias",
exc_info=e1_info)
LOG.error("Error loading class by class name",
exc_info=True)
raise ImportError(_("Class not found."))
return class_to_load | 30d724ea17916e523535485ed51ca610226ac39e | 30,074 |
import warnings
def _standardize(signals, demean=True, normalize=True, inplace=True,
verbose=False):
""" Center and norm a given signal (time is along first axis)
Attention: this will not center constant signals
but will replace these with colums of ones
Parameters
==========
signals: numpy.ndarray
Timeseries to standardize
demean: bool
if demeaning is required
normalize: bool
if True, shift timeseries to zero mean value and scale
to unit energy (sum of squares).
Returns
=======
std_signals: numpy.ndarray
copy of signals, normalized.
"""
if not inplace:
signals = signals.copy()
std = signals.std(axis=0)
if demean:
not_to_demean = std < TINY
signals -= signals.mean(axis=0)
shape_constant_signals = (signals.shape[0], not_to_demean.sum())
signals[:, not_to_demean] = np.ones(shape_constant_signals)
if verbose: print('not to demean nb of col: ', not_to_demean.sum())
if verbose: print('signals.mean() ', signals.mean())
if signals.shape[0] == 1:
warnings.warn('Standardization of 3D signal has been requested but '
'would lead to zero values. Skipping.')
return signals
if normalize:
if not demean:
# remove mean if not already detrended
signals -= signals.mean(axis=0)
if verbose: print(signals.mean())
#std = np.sqrt((signals ** 2).sum(axis=0))
std[std < TINY] = 1. # avoid divide by 0
# np.finfo(np.float).eps or TINY?
if verbose: print('(std < TINY).sum() = ',(std < TINY).sum())
signals /= std
return signals | f207af4e0e18f6f9f544a18ae89d0e86fd8ae493 | 30,075 |
def bbox_to_poly(bboxes: np.ndarray) -> np.ndarray:
"""
Expects bboxes in xyxy format. Turns each into a 1D array with 8 entries,
every consecutive pair being for one vertex (starting from top left and
going around clockwise)
Works with single bboxes (shape is (4, )) or multiple bboxes (shape is
(N, 4)).
"""
polys = np.concatenate([bboxes[:, :2], bboxes[:, 0:1], bboxes[:, 3:4],
bboxes[:, 2:], bboxes[:, 2:3], bboxes[:, 1:2]],
axis=1)
return polys | 12a06d343ac5a1f4bd16168bf04dc7e9dfaff4ec | 30,076 |
def waypts_2_pwsplines(wp_traj, dt, degree=1, plot=False):
"""
Convert a sequence of multi-dimensional sparse waypoints
to a sequence of interpolated multi-dimensional waypoints via splines.
Parameters
----------
wp_traj: horizon * n_s, a sequence of waypoints.
dt: duration of 1 time step of wp_traj.
degree: the degree of the spline fit.
plot: bool, whether to plot or not.
Returns
----------
fs: list with length = n_s, one spline interpolated trajectory per state dimension.
dts: list with length = horizon, time steps throughout the trajectory.
"""
# The degree of the spline fit.
# It is recommended to use cubic splines.
# Even values of k should be avoided especially with small s values.
# 1 <= k <= 5
assert 1 <= degree <= 5
n_s = wp_traj.shape[1]
# wp_traj = 0, ..., end_time, where end_time=horizon*dt.
horizon = wp_traj.shape[0] - 1
end_time = horizon * dt
dts, step = np.linspace(0.0, end_time, num=horizon + 1,
endpoint=True, retstep=True)
# print("horizon={}, end_time={}, dts={}, step={}".format(
# horizon, end_time, dts, step))
assert abs(step - dt) < 1e-5, "step={}, dt={}".format(step, dt)
assert dts.shape[0] == wp_traj.shape[0]
fs = []
for i in range(n_s):
spl = interpolate.splrep(x=dts, y=wp_traj[:, i].T, k=degree)
fs.append(spl)
if plot:
dts2, _ = np.linspace(0.0, end_time, num=1000,
endpoint=True, retstep=True)
fig, ax = plt.subplots()
ax.plot(dts, wp_traj, 'o', label='data')
pHs_spl = np.zeros((len(dts2), n_s), dtype=np.float32)
for i in range(n_s):
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.splev.html#scipy.interpolate.splev
# When x is not in the interval defined by the knot sequence.
# if ext=2, raise a ValueError
pHs_spl[:, i] = interpolate.splev(x=dts2, tck=fs[i], ext=2)
for i in range(n_s):
ax.plot(dts2, pHs_spl[:, i], label="pwspline")
ax.legend(loc='upper right', ncol=2)
plt.show()
return fs, dts | b133159e19513fa80a282a71786e5976cad1ab9a | 30,077 |
def _bin_data(aa, bb, bins=10, verbose=False):
"""
If unbinned data has come in, do something smart
with it here.
Uses numpy.histogram for binning.
bins can be:
- int: number of bins
- list or array: bin boundaries, from min to max, half open on right,
like numpy, when bins=[1, 2, 3, 4], the bin edges will be [1,2), [2,3)
and [3,4]. Note that min and max of data can fall out of this!
- str: name of binning method recognized by np.histogram_bin_edges, one of:
auto, fd, doane, scott, stone, rice, sturges, sqrt,
see docs of numpy.histogram_bin_edges
- True: binning will be determined by np.hist
The bins will be the same for both populations.
"""
data = np.array(list(aa) + list(bb))
# First determine bin edges on all data if necessary, then bin.
_, bin_edges = np.histogram(data, bins)
bin_a, _ = np.histogram(aa, bin_edges)
bin_b, _ = np.histogram(bb, bin_edges)
if verbose:
print(f"Bin edges that will be used: {np.round(bin_edges, decimals=2)}")
print("Bin values for population1:", bin_a)
print("Bin values for population2:", bin_b)
return bin_a, bin_b | a938cabaa2678a89cb1402d553041d409bfa4967 | 30,078 |
import logging
def initialize_logger(logger, logger_id, progress_bar=None, log_queue=None):
"""
Initialize logger for the :class:`pyro.infer.mcmc` module.
:param logger: logger instance.
:param str logger_id: identifier for the log record,
e.g. chain id in case of multiple samplers.
:param progress_bar: a :class:`tqdm.tqdm` instance.
"""
# Reset handler with new `progress_bar`.
logger.handlers = []
logger.propagate = False
if log_queue:
handler = QueueHandler(log_queue)
format = "[%(levelname)s %(msg_type)s %(logger_id)s]%(message)s"
progress_bar = None
elif progress_bar:
format = "%(levelname).1s \t %(message)s"
handler = TqdmHandler()
else:
raise ValueError("Logger cannot be initialized without a "
"valid handler.")
handler.setFormatter(logging.Formatter(format))
logging_handler = MCMCLoggingHandler(handler, progress_bar)
logging_handler.addFilter(MetadataFilter(logger_id))
logger.addHandler(logging_handler)
return logger | 4ea94d0bc1d6d9943cce2097f19256e3524d9521 | 30,079 |
def test_meta_plus_classmethod(namespaceable, namespace):
"""Test using a classmethod in a Namespace, while messing with metaclasses.
This might have been purely for coverage of some kind? I forget.
"""
class Meta(namespaceable, type(namespaceable)):
"""A throwaway test metaclass."""
with namespace() as namespace_:
pass
class Test(namespaceable, metaclass=Meta):
"""A throwaway test class, for testing classmethods."""
with namespace() as namespace_:
@classmethod
def cls_mthd(cls):
"""Return that a call occurred."""
return 'called'
assert Test().namespace_.cls_mthd() == 'called'
assert Test.namespace_.cls_mthd() == 'called' | 48ed58e8b4a0c68700ee8941087d015b76596c57 | 30,080 |
def EncodeConstants(const_dict):
"""the NPU requires that weights are compressed and bias/scales are 'encoded', both
of which are performed by this pass.
This pass modifies both the constant dict to contain the post-encoding values of the
constants and the IR to adjust buffer types/sizes/accesses so they align with the
encoded constants. Calls to the Vela API are made to perform the actual compression/
encoding.
"""
new_const_dict = {}
def collect_encoding_definitions(stmt, old_buffer_to_const):
# Map from copy destination to copy source.
copy_map = {}
# List of buffer copies that occurred
copied_buffers = []
# List of encoded buffer information
constant_buffer_replacements = []
def _align_scale_bias(tir_extern_call, bias):
"""Align the scale_bias to 16 bytes."""
value_bytes = bytearray()
value_bytes.extend(bias.tobytes())
# Align to 16
remainder = (len(value_bytes)) % 16
if remainder > 0:
value_bytes.extend(bytearray(16 - remainder))
value = np.frombuffer(value_bytes, dtype="uint8")
return value
accel_config = vela_api.get_accelerator_config()
def _encode_weights(tir_extern_call, weights):
"""Encode the weights for a TIR extern call."""
value_bytes = vela_api.encode_weights(tir_extern_call, weights, accel_config)
value = np.frombuffer(value_bytes, dtype="uint8")
return value
def _declare_constant_buffer(old_buffer, encoded_constants, split_idx):
"""Create a new buffer and add the old buffer and its pointer to the
rewriting maps."""
new_buffer = tvm.tir.decl_buffer(
shape=[len(encoded_constants)],
dtype=str(encoded_constants.dtype),
name=old_buffer.name + "_encoded",
scope=old_buffer.scope(),
)
constant_buffer_replacements.append(
{
"old_buffer": old_buffer,
"new_buffer": new_buffer,
"encoded_constants": encoded_constants,
"split_idx": split_idx,
}
)
def _encode_weights_or_bias(buffer1, buffer2, stmt, encode_func):
"""Encode the weights or align the bias either for one or two cores,
depending on the variant."""
constant = old_buffer_to_const[buffer1]
# If we have just one core, encode the whole constant
if buffer2 is None:
new_const = encode_func(stmt, constant)
return new_const, None
# Assume that the constant tensor has not been flattened yet
assert len(constant.shape) != 1
channels = constant.shape[0]
split_const = np.split(constant, channels, axis=0)
const_list = [split_const[i] for i in range(channels) if i % 2 == 0]
const_to_encode = np.concatenate(const_list, axis=0)
new_const = encode_func(stmt, const_to_encode)
split_idx = len(new_const)
# Encode half of the constant separately for the other core if it exists
assert buffer1.same_as(buffer2)
const2_list = [split_const[i] for i in range(channels) if i % 2 == 1]
const2_to_encode = np.concatenate(const2_list, axis=0)
new_const2 = encode_func(stmt, const2_to_encode)
new_const = np.append(new_const, new_const2).astype("uint8")
return new_const, split_idx
def _visit(stmt):
if isinstance(stmt, tvm.tir.Call):
op = str(stmt.args[0].value)
# Handle copies as a special-case by propagating the buffer information
# from the read to the write pointer.
if op == "ethosu_copy":
read_buffer = stmt.args[1].buffer
write_buffer = stmt.args[3].buffer
# Assert writing to the base of the write_var (pre-StorageRewrite)
assert list(stmt.args[3].indices) == [0]
assert list(stmt.args[1].indices) == [0]
copied_buffers.append({"source": read_buffer, "dest": write_buffer})
copy_map[write_buffer] = read_buffer
ops_with_weights = {
"ethosu_conv2d": tirtocs.translate_ethosu_conv2d,
"ethosu_depthwise_conv2d": tirtocs.translate_ethosu_depthwise_conv2d,
}
if op in ops_with_weights:
npu_op, _ = ops_with_weights[op](stmt)
# Encode the weights
weights_buffer = npu_op.weights[0].address.buffer
if weights_buffer in copy_map:
weights_buffer = copy_map[weights_buffer]
# In case of U65 512 mac variant the weights are split across two cores
# and need to be encoded separately
weights2_buffer = (
npu_op.weights[1].address.buffer
if accel_config == vapi.NpuAccelerator.Ethos_U65_512
else None
)
if weights2_buffer in copy_map:
weights2_buffer = copy_map[weights2_buffer]
new_weights, split_idx = _encode_weights_or_bias(
weights_buffer, weights2_buffer, stmt, _encode_weights
)
_declare_constant_buffer(weights_buffer, new_weights, split_idx)
# Align the scale_bias to 16 bytes
scale_bias_buffer = npu_op.biases[0].address.buffer
if scale_bias_buffer in copy_map:
scale_bias_buffer = copy_map[scale_bias_buffer]
scale_bias2_buffer = (
npu_op.biases[1].address.buffer
if accel_config == vapi.NpuAccelerator.Ethos_U65_512
else None
)
if scale_bias2_buffer in copy_map:
scale_bias2_buffer = copy_map[scale_bias2_buffer]
new_scale_bias, split_idx = _encode_weights_or_bias(
scale_bias_buffer, scale_bias2_buffer, stmt, _align_scale_bias
)
_declare_constant_buffer(scale_bias_buffer, new_scale_bias, split_idx)
tvm.tir.stmt_functor.post_order_visit(stmt, _visit)
return {
"copied_buffers": copied_buffers,
"constant_buffer_replacements": constant_buffer_replacements,
}
def transform_stmt(
stmt, buf_remap, var_remap, pointer_to_buffer, new_buffer_to_const, new_buffer_to_split_idx
):
def _visit_rewrite(stmt):
if isinstance(stmt, tvm.tir.Call):
# For extern calls, we need to rewrite pairs of arguments corresponding to
# base address load and the length of the load.
old_args = list(stmt.args)
new_args = [stmt.args[0]]
for prev_arg, arg in zip(old_args[:-1], old_args[1:]):
# If the previous argument was a load from an
# encoded buffer, the current should be a length.
if (
isinstance(prev_arg, tvm.tir.BufferLoad)
and prev_arg.buffer in new_buffer_to_const
):
buffer_size = np.prod(list(prev_arg.buffer.shape))
arg = buffer_size
# We have to check for split weights/bias for conv2d and depthwise_conv2d
if old_args[0] in ("ethosu_conv2d", "depthwise_conv2d"):
# We have split weights/bias
if prev_arg.buffer in new_buffer_to_split_idx:
split_idx = new_buffer_to_split_idx[prev_arg.buffer]
# The first half of the split buffer
if prev_arg.indices[0] == 0:
arg = split_idx
# the second half of the split buffer
else:
arg = buffer_size - split_idx
new_args.append(arg)
return tvm.tir.Call(stmt.dtype, stmt.op, new_args, stmt.span)
if isinstance(stmt, tvm.tir.Allocate):
# Where a pointer needs rewriting, the allocate for it must be rewritten
allocate_pointer = stmt.buffer_var
if allocate_pointer in var_remap:
new_allocate_pointer = var_remap[allocate_pointer]
new_buffer = pointer_to_buffer[new_allocate_pointer]
return tvm.tir.Allocate(
new_buffer.data,
new_buffer.dtype,
new_buffer.shape,
stmt.condition,
stmt.body,
stmt.span,
)
# The following rewrites would be better expressed by just
# rewriting the Buffers. However ir_transform doesn't
# visit Buffers, so instead we do the next best thing and
# rewrite the nodes which contain the Buffers.
if isinstance(stmt, tvm.tir.BufferLoad):
if stmt.buffer in buf_remap:
new_buffer = buf_remap[stmt.buffer]
new_indices = stmt.indices
offset = new_indices[0]
if offset != 0 and new_buffer in new_buffer_to_split_idx:
offset = new_buffer_to_split_idx[new_buffer]
return tvm.tir.BufferLoad(buf_remap[stmt.buffer], [offset], stmt.span)
if isinstance(stmt, tvm.tir.AttrStmt):
node_pointer = stmt.node
if node_pointer in var_remap:
return tvm.tir.AttrStmt(
var_remap[node_pointer],
stmt.attr_key,
stmt.value,
stmt.body,
stmt.span,
)
return None
return tvm.tir.stmt_functor.ir_transform(
stmt,
None,
_visit_rewrite,
["tir.Call", "tir.Allocate", "tir.BufferLoad", "tir.AttrStmt"],
)
def _ftransform(f, mod, ctx):
# Step 0: Unpack the constant dictionary in terms of the
# functions buffers.
old_buffer_to_const = {}
for i, param in enumerate(f.params):
if i in const_dict:
old_buffer_to_const[f.buffer_map[param]] = const_dict[i]
# Step 1: Collect information on the buffers that will be
# replaced by encodings.
buffer_information = collect_encoding_definitions(f.body, old_buffer_to_const)
# Step 2: Generate variable/buffer remaps, based on the
# collected information.
buf_remap = {}
new_buffer_to_const = {}
new_buffer_to_split_idx = {}
# Any encoded buffers must be replaced
for info in buffer_information["constant_buffer_replacements"]:
buf_remap[info["old_buffer"]] = info["new_buffer"]
new_buffer_to_const[info["new_buffer"]] = info["encoded_constants"]
if info["split_idx"]:
new_buffer_to_split_idx[info["new_buffer"]] = info["split_idx"]
# Any buffers that are copied into from an encoded buffer must
# be replaced.
for info in buffer_information["copied_buffers"]:
copy_source = info["source"]
while copy_source in buf_remap:
copy_source = buf_remap[copy_source]
copy_dest = info["dest"]
if copy_source.shape != copy_dest.shape or copy_source.dtype != copy_dest.dtype:
new_dest = tvm.tir.decl_buffer(
shape=copy_source.shape,
dtype=copy_source.dtype,
name=copy_dest.name,
scope=copy_dest.scope(),
)
buf_remap[copy_dest] = new_dest
if copy_source in new_buffer_to_const:
new_buffer_to_const[new_dest] = new_buffer_to_const[copy_source]
if copy_source in new_buffer_to_split_idx:
new_buffer_to_split_idx[new_dest] = new_buffer_to_split_idx[copy_source]
# Define additional dependent lookup tables.
var_remap = {old.data: new.data for (old, new) in buf_remap.items()}
pointer_to_buffer = {
buf.data: buf for (old, new) in buf_remap.items() for buf in [old, new]
}
# Step 3: Then perform the rewrites
new_body = transform_stmt(
f.body,
buf_remap,
var_remap,
pointer_to_buffer,
new_buffer_to_const,
new_buffer_to_split_idx,
)
# Step 4: Rewrite the buffer map and const dict to instead use the encoded versions
new_buffer_map = {}
for i, param in enumerate(f.params):
buffer = f.buffer_map[param]
if buffer in buf_remap:
buffer = buf_remap[buffer]
if buffer in new_buffer_to_const:
new_const_dict[i] = new_buffer_to_const[buffer].flatten()
elif buffer in old_buffer_to_const:
new_const_dict[i] = old_buffer_to_const[buffer].flatten()
new_buffer_map[param] = buffer
new_f = tvm.tir.PrimFunc(
f.params,
new_body,
f.ret_type,
new_buffer_map,
f.preflattened_buffer_map,
f.attrs,
f.span,
)
return new_f
def _encode_constants(mod):
mod, divided_const_dict = DivideConstants(const_dict)(mod)
const_dict.clear()
for key, value in divided_const_dict.items():
const_dict[key] = value
transform_func = tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.contrib.ethos-u.encode_constants"
)
new_func = transform_func(mod)
return new_func, new_const_dict
return _encode_constants | 851e080bdf44e6de890fb87a1d2df1c0aefc0bf6 | 30,081 |
import collections
def count_tweet_shed_words_freq(tweet_text, ind_shed_word_dict, shed_word_ind_dict, shed_words_set):
"""
Count the frequency of selected Hedonometer words in tweet text.
param tweet_text: String of text field of tweet
return: dict of shed_word_ind to shed_word_freq mapping
"""
'''
Tokenize and count words in tweet text
Ref
- 'We defined a word as any contiguous set of characters bounded by white space and/or a small set of punctuation characters.'
- 'We therefore included all misspellings, words from any language used on Twitter, hyperlinks, etc.'
- 'All pattern matches we made were case-insensitive, and we did not perform stemming.'
'''
tweet_text_words = tweet_text.lower().split()
counter = collections.Counter(tweet_text_words)
tweet_shed_words_freq_dict = {int(shed_word_ind_dict[tweet_text_word]): int(tweet_text_word_freq)
for tweet_text_word, tweet_text_word_freq in list(counter.items()) if tweet_text_word in shed_words_set}
return tweet_shed_words_freq_dict | 129130f5b9def7320c6e3dd2d8ef82493d21eb8a | 30,082 |
def parse_date(text):
"""Return POSIX timestamp obtained from parsing date and time from given
date string.
Return None if no text given.
"""
if text:
return dateparser.parse(text).timestamp() | 6f089096cdd43eb2d0af1db6066e75a6ec6efb09 | 30,083 |
def format(table, field, fmt, **kwargs):
"""
Convenience function to format all values in the given `field` using the
`fmt` format string.
The ``where`` keyword argument can be given with a callable or expression
which is evaluated on each row and which should return True if the
conversion should be applied on that row, else False.
"""
conv = lambda v: fmt.format(v)
return convert(table, field, conv, **kwargs) | a66e351bca42f8e385d8859db720e86c7e6fac7c | 30,084 |
def colorbias(img, refcolor=np.array([1.,0,0])):
""" Compute Color Bias """
img_hsv = skimage.color.rgb2hsv(img)
refcolor = skimage.color.rgb2hsv(refcolor.reshape(1,1,3)) # to make it compatible
#dH = np.abs(np.sin((img_hsv[...,0] - refcolor[...,0])))
#dS = np.abs(img_hsv[...,1] - refcolor[...,1])
#dV = np.abs(img_hsv[...,2] - refcolor[...,2])
hsv2xyz = lambda h,s,v : np.stack([s*np.sin(h*2*np.pi), s*np.cos(h*2*np.pi), v], axis=-1)
xyz_ref = hsv2xyz(*refcolor.transpose((2,0,1)))
xyz_img = hsv2xyz(*img_hsv.transpose((2,0,1)))
return 1 - ((xyz_ref - xyz_img)**2).sum(axis=-1, keepdims=True)**.5 | 5ab089fd7a72fe647e5da5c62380544b87c41739 | 30,085 |
import six
import numbers
import collections
def walk_json(e, dict_fct=i, list_fct=i, num_fct=i, str_fct=i, bool_fct=i, null_fct=i, not_found=not_found_default):
"""
Go throught a json and call each function accordingly of the element type
for each element, the value returned is used for the json output
This doesn't change the input json, but re-create a new json object.
(calling it without any function return a copy of a json for example)
The calling is deep-first.
ex : ['a', {'b':3}] will call :
- str_fct('a')
- num_fct(3)
- dict_fct({'b':3})
- list_fct(['a', {'b':3}])
and if every function is set to return None
ex : ['a', {'b':3}] will call :
- str_fct('a')
- num_fct(3)
- dict_fct({'b':None})
- list_fct([None, None])
:param e:
:param dict_fct:
:param list_fct:
:param num_fct:
:param str_fct:
:param bool_fct:
:param null_fct:
:param not_found:
:return:
"""
if e is None:
return null_fct(e)
if isinstance(e, six.string_types):
return str_fct(e)
if isinstance(e, numbers.Number):
return num_fct(e)
if isinstance(e, bool):
return bool_fct(e)
param = { # only create it when needed
'dict_fct': dict_fct, 'list_fct': list_fct, 'num_fct': num_fct,
'str_fct': str_fct, 'bool_fct': bool_fct, 'null_fct': num_fct,
'not_found': not_found,
}
if isinstance(e, collections.Mapping):
return dict_fct({k: walk_json(v, **param) for k, v in e.items()})
if isinstance(e, collections.Iterable):
return list_fct([walk_json(v, **param) for v in e])
return not_found(e) | d0c9f57180327b8fca218f3ba4f413b410c2a2da | 30,086 |
def colIm(z):
"""Returns a colour where log(Im(z)) is represented by hue.
This makes it easy to see where Im(z) converges to 0"""
h = np.log(z.imag)*pi
l = np.clip(0.5+0.05*z.real,0.1,0.9)
s = 1
c = hsl2rgb(h,s,l)
return c | 0ebefac4c7c5355ba735bfa46177b6f267f74cb9 | 30,087 |
def gsl_blas_zdotc(*args, **kwargs):
"""gsl_blas_zdotc(gsl_vector_complex const * X, gsl_vector_complex const * Y, gsl_complex * dotc) -> int"""
return _gslwrap.gsl_blas_zdotc(*args, **kwargs) | 953a9cd06d0f7a948d625acad9fd8ec8ce31249e | 30,088 |
def random_neighbour(vec,myid,n):
"""Generates a random binary vector that is 1-bit away (a unit Hamming distance)
Args:
vec (list or numpy.ndarray): An input vector
myid (int): An id of an agent of interest
n (int): Number of tasks allocated to a single agent
Returns:
list: A vector with one bit flipped for agent myid
"""
rnd = np.random.choice(range(myid*n,(myid+1)*n))
vec[rnd] = 1- vec[rnd]
output = vec
return output | 816115c335e556815ff8ee20ae50ac9b9c9d6f22 | 30,089 |
import torch
def _degree_of_endstopping(model, block, image, weight_id0, weight_id1, weight_id2):
"""Passes image to model and records the activations of block. The
activations are normalized to be in [0, 1] and then summed over using
different weighted masks.
Parameters
----------
model : nn.Module
[description]
block : nn.Module
[description]
image : np.array
test image to compute the degree of endstopping
weight_id0 : np.array
mask for intrinsic dimension 0
weight_id1 : np.array
mask for intrinsic dimension 1
weight_id2 : np.array
mask for intrinsic dimension 2
Returns
-------
id0 : list of float
For each feature map: the intrinsic dimension 0 value
id1 : list of float
For each feature map: the intrinsic dimension 1 value
id2 : list of float
For each feature map: the intrinsic dimension 2 value
activations : np.array
actual activations of block when using image as input
"""
act_getter = ActivationGetter(block)
image = torch.Tensor(image[np.newaxis, :, :])
image = torch.cat([image] * 3, 0).unsqueeze(0).to(get_device())
# zero mean and standard deviation of one
# this is the easiest way to have a proper normalization
image = (image - image.mean()) / image.std()
model(image)
activations = act_getter.out
activations = activations.detach().cpu().numpy()
activations = normalize_act(activations)
id0 = []
id1 = []
id2 = []
for i in range(activations.shape[1]):
tmp = activations[0, i, ...]
id0.append((tmp.copy() * weight_id0).sum())
id1.append((tmp.copy() * weight_id1).sum())
id2.append((tmp.copy() * weight_id2).sum())
return id0, id1, id2, activations | 830e8fd5b008fb8d2a852f1f365d3da1ddc24075 | 30,090 |
def posts(parsed):
"""Calculates number of every type of post"""
num_t_post = 0
num_corner_post = 0
num_line_post = 0
num_end_post = 0
num_gate_posts = 0
for post in parsed.posts():
if not post.isRemoval:
if post.postType == 'tPost':
num_t_post += 1
if post.postType == 'cornerPost':
num_corner_post += 1
if post.postType == 'endPost':
num_end_post += 1
if post.postType == 'gatePost':
num_gate_posts += 1
for fence in parsed.fences:
if not fence.isRemoval:
if (fence.length/12) % 8 == 0:
num_line_post += (fence.length/12) // 8 - 1
else:
num_line_post += (fence.length/12) // 8
num_steel_post = num_t_post + num_corner_post + num_line_post + num_end_post + num_gate_posts
return num_t_post, num_corner_post, num_line_post, num_end_post, num_gate_posts, num_steel_post | e8c5905a38ab560f0dba595eecf67865efc27121 | 30,091 |
def _compute_populations(mvts: pd.DataFrame, label_col_name) -> dict:
"""
A private method that computes the population corresponding to each class label.
:param mvts: The dataframe who class population is of interest.
:param label_col_name: The column-name corresponding to the class labels in `mvts`.
:return: A dictionary of class labels (as keys) and class populations (as values).
"""
class_labels: list = _extract_labels(mvts, label_col_name)
decomposed_mvts = _decompose_mvts(mvts, class_labels, label_col_name)
pop_dict = {label: len(decomposed_mvts[label]) for label in decomposed_mvts.keys()}
return pop_dict | d47b78d8f30f6cb15c9b98cb13d9fb7c883d62f1 | 30,092 |
def plot_avg_sum_capacity_comparison(
df: pd.DataFrame, port1: str, port2: str, vessel_type: str
) -> go.Figure:
"""
Returns a figure for the first chart on the Compare tab. It shows per day comparison between
average sum of capacity by applied conditions.
:param df: Pandas DataFrame, input data
:param port1: str, a port to compare
:param port2: str, a port to compare
:param vessel_type: str, vessel type of interest
:return: Plotly figure
"""
data = helpers.filter_by_vessel_and_port(
df=df, port1=port1, port2=port2, vessel_type=vessel_type
)
data_port_1 = (
data[data["port"] == port1]
.groupby(by="date")
.sum()
.reset_index()[["date", "sum_dwt"]]
)
data_port_2 = (
data[data["port"] == port2]
.groupby(by="date")
.sum()
.reset_index()[["date", "sum_dwt"]]
)
fig_data = [
go.Bar(
x=data_port_1["date"].tolist(),
y=data_port_1["sum_dwt"].tolist(),
name=port1,
marker_color=styles.COLOR_APPSILON_1,
),
go.Bar(
x=data_port_2["date"].tolist(),
y=data_port_2["sum_dwt"].tolist(),
name=port2,
marker_color=styles.COLOR_APPSILON_8,
),
]
return go.Figure(
data=fig_data,
layout=styles.generate_plot_layout(
x_title=strings.CHART_COMPARE_CAPACITY_X,
y_title=strings.CHART_COMPARE_CAPACITY_Y,
bar_mode="group",
),
) | 8551fac8720c3d8433a5242c8ea099626a5b6e0c | 30,093 |
def fetchUrlPages(url: str, cacheDir: str = None) -> dict:
"""
Realiza a requisição dos dados da API retornando os dados paginados.
https://openbanking-brasil.github.io/areadesenvolvedor-fase1/#introducao-paginacao
"""
root = fetchUrl(url, cacheDir = cacheDir)
if root == None or not type(root) == dict:
return None
if not 'data' in root:
return None
data = root['data']
data['endPoint'] = url
yield data
# Knuppe: Olha, poderia usar o total de paginas que tá no meta... mas
# como sei que as financeiras vão fazer api bugada, vou olhar
# somente se existe o link de próxima pagina e boa.
#
# totalPages = get(root, 'meta.totalPages', valueType=int, required=False)
# totalPages = 1 if totalPages is None else totalPages
# for page in range(2, totalPages):
nextPage = get(root, 'links.next', valueType=str, required = False)
if nextPage is None:
return
if nextPage.startswith('http://') or nextPage.startswith('https://'):
yield fetchUrlPages(nextPage, cacheDir = cacheDir) | 5b970007343484aff70c552887143cb0b21453b4 | 30,094 |
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up the rinnaitouch integration from a config entry."""
ip_address = entry.data.get(CONF_HOST)
_LOGGER.debug("Get controller with IP: %s", ip_address)
try:
system = RinnaiSystem.get_instance(ip_address)
#scenes = await system.getSupportedScenes()
scenes = []
await system.get_status()
except (
Exception,
ConnectionError,
ConnectionRefusedError,
) as err:
raise ConfigEntryNotReady from err
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = RinnaiData(system=system, scenes=scenes)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True | 4fc535a17bffdd84a88d7808f183aee63c2bb904 | 30,095 |
def num_neighbours(lag=1):
"""
Calculate number of neigbour pixels for a given lag.
Parameters
----------
lag : int
Lag distance, defaults to 1.
Returns
-------
int
Number of neighbours
"""
win_size = 2*lag + 1
neighbours = win_size**2 - (2*(lag-1) + 1)**2
return neighbours | aca8c4e1fdac14cde111a7db2dd274767fc53d5a | 30,096 |
import requests
def get_solr_data_recommend(function, reader, rows=5, sort='entry_date', cutoff_days=5, top_n_reads=10):
"""
:param reader:
:param rows:
:param sort:
:param cutoff_days:
:param top_n_reads:
:return:
"""
query = '({function}(topn({topn}, reader:{reader}, {sort} desc)) entdate:[NOW-{cutoff_days}DAYS TO *])'.format(
function=function, topn=top_n_reads, reader=reader, sort=sort, cutoff_days=cutoff_days)
try:
result, status_code = get_solr_data(rows, query, fl='bibcode')
except requests.exceptions.HTTPError as e:
current_app.logger.error(e)
result = {'error from solr':'%d: %s'%(e.response.status_code, e.response.reason)}
status_code = e.response.status_code
return result, query, status_code | b7dbf5fc2cd8772532ab98115369199e87e80a3c | 30,097 |
def normalize_key(key):
"""
Formata a chave para ser utilizada no json.
Args:
key (string): Campo coletado no scraping dos dados do MEC.
Returns:
Retorna a sttring formatada para ser utilizada no json.
"""
aux = key.strip(' :').replace(' ', '_').lower()
aux = aux.replace('_-_sigla', '')
return normalize('NFKD', aux).encode('ASCII','ignore') | 1065bbbd4d6c435fe9db477ee0f7a047692eaf63 | 30,098 |
def line(p0=(0,0), p1=(1,0)):
"""
p0 p1
o-----------o
+--> u
"""
p0 = np.asarray(p0, dtype='d')
p1 = np.asarray(p1, dtype='d')
points = np.zeros((2,3), dtype='d')
points[0,:p0.size] = p0
points[1,:p1.size] = p1
knots = [0,0,1,1]
return NURBS([knots], points) | 0abf0688a2e7f84322f56b35796d75497f6f65c2 | 30,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.