max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
sp_api/api/orders/models/order_items_list.py | lionsdigitalsolutions/python-amazon-sp-api | 0 | 12761151 | <gh_stars>0
# coding: utf-8
"""
Selling Partner API for Orders
The Selling Partner API for Orders helps you programmatically retrieve order information. These APIs let you develop fast, flexible, custom applications in areas like order synchronization, order research, and demand-based decision support tools. # noqa: E501
OpenAPI spec version: v0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OrderItemsList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'order_items': 'OrderItemList',
'next_token': 'str',
'amazon_order_id': 'str'
}
attribute_map = {
'order_items': 'OrderItems',
'next_token': 'NextToken',
'amazon_order_id': 'AmazonOrderId'
}
def __init__(self, order_items=None, next_token=None, amazon_order_id=None): # noqa: E501
"""OrderItemsList - a model defined in Swagger""" # noqa: E501
self._order_items = None
self._next_token = None
self._amazon_order_id = None
self.discriminator = None
self.order_items = order_items
if next_token is not None:
self.next_token = next_token
self.amazon_order_id = amazon_order_id
@property
def order_items(self):
"""Gets the order_items of this OrderItemsList. # noqa: E501
:return: The order_items of this OrderItemsList. # noqa: E501
:rtype: OrderItemList
"""
return self._order_items
@order_items.setter
def order_items(self, order_items):
"""Sets the order_items of this OrderItemsList.
:param order_items: The order_items of this OrderItemsList. # noqa: E501
:type: OrderItemList
"""
if order_items is None:
raise ValueError("Invalid value for `order_items`, must not be `None`") # noqa: E501
self._order_items = order_items
@property
def next_token(self):
"""Gets the next_token of this OrderItemsList. # noqa: E501
When present and not empty, pass this string token in the next request to return the next response page. # noqa: E501
:return: The next_token of this OrderItemsList. # noqa: E501
:rtype: str
"""
return self._next_token
@next_token.setter
def next_token(self, next_token):
"""Sets the next_token of this OrderItemsList.
When present and not empty, pass this string token in the next request to return the next response page. # noqa: E501
:param next_token: The next_token of this OrderItemsList. # noqa: E501
:type: str
"""
self._next_token = next_token
@property
def amazon_order_id(self):
"""Gets the amazon_order_id of this OrderItemsList. # noqa: E501
An Amazon-defined order identifier, in 3-7-7 format. # noqa: E501
:return: The amazon_order_id of this OrderItemsList. # noqa: E501
:rtype: str
"""
return self._amazon_order_id
@amazon_order_id.setter
def amazon_order_id(self, amazon_order_id):
"""Sets the amazon_order_id of this OrderItemsList.
An Amazon-defined order identifier, in 3-7-7 format. # noqa: E501
:param amazon_order_id: The amazon_order_id of this OrderItemsList. # noqa: E501
:type: str
"""
if amazon_order_id is None:
raise ValueError("Invalid value for `amazon_order_id`, must not be `None`") # noqa: E501
self._amazon_order_id = amazon_order_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(OrderItemsList, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OrderItemsList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 2.453125 | 2 |
leetcode/easy/rotate-image.py | vtemian/interviews-prep | 8 | 12761152 | <filename>leetcode/easy/rotate-image.py
class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
q = 0
m = len(matrix) - 1
while q < len(matrix) // 2:
c = 0
while c < len(matrix) - 1 - q * 2:
n = matrix[q][q + c]
n, matrix[q + c][m - q] = matrix[q + c][m - q], n
n, matrix[m - q][m - q - c] = matrix[m - q][m - q - c], n
n, matrix[m - q - c][q] = matrix[m - q - c][q], n
n, matrix[q][q + c] = matrix[q][q + c], n
c += 1
q += 1
| 3.625 | 4 |
trik/ya/network.py | hellotrik/trik | 0 | 12761153 | <filename>trik/ya/network.py
import collections
from ..sheng import Operator,numpy,Optimizer,VCategory,V, constant, placeholder,Plus,SGD,Adam
from .loss import LossLayer, Loss
from .regularization import RegularizationLayer, Regularization
from .connection import ConnectionLayer, Connection
from .activation import ActivationLayer, Activation
from .cnnlayer import Convolution, Pooling, Unpooling
from .plugin import Plugin, default_plugin
optimizer_map ={'sgd': SGD, 'adam': Adam}
def register_optimizer(name: str, optimizer: Optimizer):
optimizer_map[name.lower()] = optimizer
class Model:
def __init__(self):
self.epoch = None
self.epochs = None
self.iteration = None
self.iterations = None
self.batch_size = None
self.engine = V()
self.predict_engine = V()
self.__layer = []
self.__layer_name_map = collections.OrderedDict()
self.__layer_stack = []
self.__input_symbol = placeholder(name='InputSymbol')
self.__current_symbol = self.__input_symbol
self.__current_output = None
self.__variables = []
self.__layer_weights = {}
self.__data = None
self.__optimizer = None
self.__loss = None
self.__regularization_term = None
self.__plugin = collections.OrderedDict()
self.load_default_plugin()
def __valid_current_output(self):
if self.__current_output is None:
raise ValueError('Current output is None.')
else:
return self.__current_output
def get_layer(self, name: str):
if name in self.__layer_name_map:
return self.__layer_name_map[name]
else:
raise ValueError('No such layer in Model named: {}'.format(name))
def layer_name_map(self):
return self.__layer_name_map
def layer_stack(self):
return self.__layer_stack
def add(self, layer, name=None):
if isinstance(layer, collections.Iterable):
for i, l in enumerate(layer):
if name is not None and i < len(name):
self.__add(l, name[i])
else:
self.__add(l)
else:
self.__add(layer, name)
def __add(self, layer, name: str=None):
self.__layer.append(layer)
if name is None:
name = 'layer_{}'.format(len(self.__layer_stack))
if name in self.__layer_name_map:
raise ValueError('Layer name has contained in Model: {}'.format(name))
else:
self.__layer_name_map[name] = layer
self.__layer_stack.append(layer)
if isinstance(layer, Operator):
self.__add_operator(layer, name)
elif isinstance(layer, ConnectionLayer):
self.__add_connection(layer, name)
elif isinstance(layer, Connection):
self.__add_connection(layer.connection_layer(), name)
elif isinstance(layer, Convolution):
self.__add_connection(layer.convolution_layer(), name)
elif isinstance(layer, Pooling):
self.__add_connection(layer.pooling_layer(), name)
elif isinstance(layer, Unpooling):
self.__add_connection(layer.unpooling_layer(), name)
elif isinstance(layer, ActivationLayer):
self.__add_activation(layer, name)
elif isinstance(layer, Activation):
self.__add_activation(layer.activation_layer(), name)
else:
raise ValueError('Invalid get_layer type: {}'. format(type(layer)))
def __add_operator(self, layer: Operator, name: str=None):
self.__current_symbol = V(operator=layer, inputs=[self.__current_symbol], category=VCategory.operator)
self.__current_output = layer.shape(self.__current_output)
self.__layer_weights[name] = []
def __add_connection(self, layer: ConnectionLayer, name: str=None):
if layer.input_shape is None:
layer.input_shape = self.__valid_current_output()
self.__current_symbol = layer.connection(self.__current_symbol)
self.__current_output = layer.output_shape
for v in layer.variables():
self.__variables.append(v)
self.__layer_weights[name] = layer.weights()
def __add_activation(self, layer: ActivationLayer, name: str=None):
self.__current_symbol = layer.activation_function(self.__current_symbol)
previous_layer = self.__layer_stack[-2]
if isinstance(previous_layer, Connection) or isinstance(previous_layer, ConnectionLayer):
previous_layer = previous_layer.connection_layer()
for weight in previous_layer.weights():
weight.value = layer.weight_initialization(weight.value.shape)
for bias in previous_layer.biases():
bias.value = layer.bias_initialization(bias.value.shape)
self.__layer_weights[name] = []
def get_symbol(self):
return self.__current_symbol
def optimizer(self, optimizer_object, *args, **kwargs):
if isinstance(optimizer_object, str):
name = optimizer_object.lower()
if name in optimizer_map:
self.__optimizer = optimizer_map[name](*args, **kwargs)
else:
raise ValueError('No such optimizer: {}'.format(name))
elif isinstance(optimizer_object, Optimizer):
self.__optimizer = optimizer_object
else:
raise ValueError('Invalid optimizer type: {}'.format(type(optimizer_object)))
def loss(self, loss_object, *args, **kwargs):
if isinstance(loss_object, str):
self.__loss = Loss(loss_object, *args, **kwargs).loss_layer()
elif isinstance(loss_object, LossLayer):
self.__loss = loss_object
elif isinstance(loss_object, Loss):
self.__loss = loss_object.loss_layer()
else:
raise ValueError('Invalid loss type: {}'.format(type(loss_object)))
def regularization(self, regularization_object, decay: float, name=None, *args, **kwargs):
regularization_weights = set()
if name is None:
for _, weights in self.__layer_weights.items():
regularization_weights |= set(weights)
else:
if isinstance(name, str):
name = [name]
if isinstance(name, collections.Iterable):
for each_name in name:
if each_name in self.__layer_weights:
regularization_weights |= set(self.__layer_weights[each_name])
else:
raise ValueError('No such layer in Model named: {}'.format(each_name))
else:
ValueError('Invalid name type: {}'.format(type(name)))
if isinstance(regularization_object, str):
regularization_function = Regularization(regularization_object, *args, **kwargs).regularization_layer().regularization_term
elif isinstance(regularization_object, RegularizationLayer):
regularization_function = regularization_object.regularization_term
elif isinstance(regularization_object, Regularization):
regularization_function = regularization_object.regularization_layer().regularization_term
else:
raise ValueError('Invalid regularization type: {}'.format(type(regularization_object)))
for weight in regularization_weights:
self.__add_regularization_term(regularization_function(weight, decay))
def __add_regularization_term(self, regularization_term):
if self.__regularization_term is None:
self.__regularization_term = regularization_term
else:
self.__regularization_term = self.__regularization_term(Plus(),regularization_term)
def train(self, data, target, epochs: int=10000, batch_size: int=0):
data = numpy.array(data)
target = numpy.array(target)
self.epochs = epochs
if data.shape[0] != target.shape[0]:
raise ValueError('Data dimension not match target dimension: {} {}'.format(data.shape[0], target.shape[0]))
data_scale = data.shape[0]
target_symbol = None
if batch_size != 0:
target_symbol = placeholder()
self.engine = self.__loss.loss_function(self.__current_symbol, target_symbol)
else:
self.engine = self.__loss.loss_function(self.__current_symbol, constant(target))
self.engine.set_bind({self.__input_symbol: data})
if self.__regularization_term is not None:
self.engine =self.engine(Plus(),self.__regularization_term)
self.engine.variables = self.__variables
try:
self.iteration = 0
iterations = [0] if batch_size == 0 else range(0, data_scale, batch_size)
self.iterations = self.epochs * len(iterations)
self.run_plugin('begin_training')
for epoch in range(self.epochs):
self.epoch = epoch + 1
self.run_plugin('begin_epoch')
for i in iterations:
if batch_size != 0:
self.engine.set_bind({
self.__input_symbol: data[i: min([i + batch_size, data_scale])],
target_symbol: target[i: min([i + batch_size, data_scale])]
})
self.run_plugin('begin_iteration')
self.__optimizer.minimize(self.engine)
self.iteration += 1
self.run_plugin('end_iteration')
self.run_plugin('end_epoch')
except KeyboardInterrupt:
print('<Keyboard Interrupt>')
self.run_plugin('end_training')
def predict(self, data):
self.predict_engine = self.__current_symbol
self.predict_engine.set_bind({self.__input_symbol: data})
self.run_plugin('begin_predict')
predict_data = self.predict_engine.val()
self.run_plugin('end_predict')
return predict_data
def load_default_plugin(self):
for name, plugin, enable in default_plugin:
plugin.enable = enable
self.add_plugin(name, plugin)
def add_plugin(self, name: str, plugin: Plugin):
self.__plugin[name.lower()] = plugin
plugin.bind_network(self)
def run_plugin(self, stage: str):
for _, plugin in self.__plugin.items():
if plugin.enable:
getattr(plugin, stage)()
def plugin(self, name: str):
if name.lower() in self.__plugin:
return self.__plugin[name.lower()]
else:
raise ValueError('No such plugin: {}'.format(name))
| 2.015625 | 2 |
src/UQpy/surrogates/polynomial_chaos/regressions/RidgeRegression.py | SURGroup/UncertaintyQuantification | 0 | 12761154 | <gh_stars>0
import logging
import numpy as np
from UQpy.surrogates.polynomial_chaos.polynomials import PolynomialBasis
from UQpy.surrogates.polynomial_chaos.regressions.baseclass.Regression import Regression
class RidgeRegression(Regression):
def __init__(self, learning_rate: float = 0.01, iterations: int = 1000,
penalty: float = 1):
"""
Class to calculate the polynomial_chaos coefficients with the Ridge regression method.
:param learning_rate: Size of steps for the gradient descent.
:param iterations: Number of iterations of the optimization algorithm.
:param penalty: Penalty parameter controls the strength of regularization. When it
is close to zero, then the ridge regression converges to the linear
regression, while when it goes to infinity, polynomial_chaos coefficients
converge to zero.
"""
self.learning_rate = learning_rate
self.iterations = iterations
self.penalty = penalty
self.logger = logging.getLogger(__name__)
def run(self, x: np.ndarray, y: np.ndarray, design_matrix: np.ndarray):
"""
Implements the LASSO method to compute the polynomial_chaos coefficients.
:param x: :class:`numpy.ndarray` containing the training points (samples).
:param y: :class:`numpy.ndarray` containing the model evaluations (labels) at the training points.
:param design_matrix: matrix containing the evaluation of the polynomials at the input points **x**.
:return: Weights (polynomial_chaos coefficients) and Bias of the regressor
"""
m, n = design_matrix.shape
if y.ndim == 1 or y.shape[1] == 1:
y = y.reshape(-1, 1)
w = np.zeros(n).reshape(-1, 1)
b = 0
for _ in range(self.iterations):
y_pred = (design_matrix.dot(w) + b).reshape(-1, 1)
dw = (-(2 * design_matrix.T.dot(y - y_pred)) + (2 * self.penalty * w)) / m
db = -2 * np.sum(y - y_pred) / m
w = w - self.learning_rate * dw
b = b - self.learning_rate * db
else:
n_out_dim = y.shape[1]
w = np.zeros((n, n_out_dim))
b = np.zeros(n_out_dim).reshape(1, -1)
for _ in range(self.iterations):
y_pred = design_matrix.dot(w) + b
dw = (-(2 * design_matrix.T.dot(y - y_pred)) + (2 * self.penalty * w)) / m
db = -2 * np.sum((y - y_pred), axis=0).reshape(1, -1) / m
w = w - self.learning_rate * dw
b = b - self.learning_rate * db
return w, b, np.shape(w)[1]
| 2.9375 | 3 |
tally_ho/apps/tally/views/archive.py | crononauta/tally-ho | 0 | 12761155 | from django.db import transaction
from django.shortcuts import get_object_or_404, redirect
from django.utils.translation import ugettext as _
from django.views.generic import FormView, TemplateView
from guardian.mixins import LoginRequiredMixin
from tally_ho.apps.tally.models.audit import Audit
from tally_ho.apps.tally.models.result_form import ResultForm
from tally_ho.libs.models.enums.form_state import FormState
from tally_ho.libs.permissions import groups
from tally_ho.libs.views.session import session_matches_post_result_form
from tally_ho.libs.verify.quarantine_checks import quarantine_checks
from tally_ho.libs.views import mixins
from tally_ho.libs.views.form_state import form_in_state
def check_quarantine(result_form, user):
"""Run quarantine checks. Create an audit with links to the failed
quarantine checks if any fail.
:param result_form: The result form to run quarantine checks on.
:param user: The user to associate with an audit if any checks fail.
"""
audit = None
result_form.audit_set.update(active=False)
if not result_form.skip_quarantine_checks:
for passed_check, check in quarantine_checks():
if not passed_check(result_form):
if not audit:
audit = Audit.objects.create(
user=user,
result_form=result_form)
audit.quarantine_checks.add(check)
if audit:
result_form.audited_count += 1
result_form.save()
def states_for_form(user, result_form, states=[FormState.ARCHIVING]):
"""Get the possible states for this result_form.
Archive supervisors can modify archived forms, check the user and see if
this state should be added.
:param user: The user to determine form states for.
:param result_form: The form to check the state of.
:param states: The initial states a form can be in.
:returns: A list of states that a form may be in.
"""
if groups.QUALITY_CONTROL_ARCHIVE_SUPERVISOR in groups.user_groups(user)\
and result_form.form_state == FormState.ARCHIVED:
states.append(FormState.ARCHIVED)
return states
class ArchivePrintView(LoginRequiredMixin,
mixins.GroupRequiredMixin,
mixins.TallyAccessMixin,
mixins.ReverseSuccessURLMixin,
FormView):
group_required = [groups.QUALITY_CONTROL_ARCHIVE_CLERK,
groups.QUALITY_CONTROL_ARCHIVE_SUPERVISOR]
template_name = "archive/print_cover.html"
success_url = 'archive-success'
def get(self, *args, **kwargs):
tally_id = kwargs.get('tally_id')
pk = self.request.session.get('result_form')
result_form = get_object_or_404(ResultForm, pk=pk)
possible_states = states_for_form(self.request.user, result_form)
form_in_state(result_form, possible_states)
check_quarantine(result_form, self.request.user)
return self.render_to_response(
self.get_context_data(result_form=result_form))
@transaction.atomic
def post(self, *args, **kwargs):
tally_id = kwargs.get('tally_id')
post_data = self.request.POST
pk = session_matches_post_result_form(post_data, self.request)
result_form = get_object_or_404(ResultForm, pk=pk)
possible_states = states_for_form(self.request.user, result_form)
form_in_state(result_form, possible_states)
result_form.form_state = FormState.AUDIT if result_form.audit else\
FormState.ARCHIVED
result_form.save()
return redirect(self.success_url, tally_id=tally_id)
class ConfirmationView(LoginRequiredMixin,
mixins.GroupRequiredMixin,
TemplateView):
template_name = "success.html"
group_required = [groups.QUALITY_CONTROL_ARCHIVE_CLERK,
groups.QUALITY_CONTROL_ARCHIVE_SUPERVISOR]
def get(self, *args, **kwargs):
tally_id = kwargs.get('tally_id')
pk = self.request.session.get('result_form')
result_form = get_object_or_404(ResultForm, pk=pk)
next_step = _('Quarantine') if result_form.audit else _('Archive')
del self.request.session['result_form']
return self.render_to_response(self.get_context_data(
result_form=result_form, header_text=_('Quality Control & Archiving'),
next_step=next_step, start_url='quality-control',
tally_id=tally_id))
| 1.84375 | 2 |
utils/utils_HD.py | lmkoch/subgroup-shift-detection | 0 | 12761156 | """
Helper functions for calculating MMD and performing MMD test
This module contains original code from: https://github.com/fengliu90/DK-for-TST
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import torch
def get_item(x):
"""get the numpy value from a torch tensor."""
x = x.cpu().detach().numpy()
return x
def Pdist2(x, y):
"""compute the paired distance between x and y."""
x_norm = (x ** 2).sum(1).view(-1, 1)
if y is not None:
y_norm = (y ** 2).sum(1).view(1, -1)
else:
y = x
y_norm = x_norm.view(1, -1)
Pdist = x_norm + y_norm - 2.0 * torch.mm(x, torch.transpose(y, 0, 1))
Pdist[Pdist<0]=0
return Pdist
def h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed, use_1sample_U=True):
"""compute value of MMD and std of MMD using kernel matrix."""
Kxxy = torch.cat((Kx,Kxy),1)
Kyxy = torch.cat((Kxy.transpose(0,1),Ky),1)
Kxyxy = torch.cat((Kxxy,Kyxy),0)
nx = Kx.shape[0]
ny = Ky.shape[0]
is_unbiased = True
if is_unbiased:
xx = torch.div((torch.sum(Kx) - torch.sum(torch.diag(Kx))), (nx * (nx - 1)))
yy = torch.div((torch.sum(Ky) - torch.sum(torch.diag(Ky))), (ny * (ny - 1)))
# one-sample U-statistic.
if use_1sample_U:
xy = torch.div((torch.sum(Kxy) - torch.sum(torch.diag(Kxy))), (nx * (ny - 1)))
else:
xy = torch.div(torch.sum(Kxy), (nx * ny))
mmd2 = xx - 2 * xy + yy
else:
xx = torch.div((torch.sum(Kx)), (nx * nx))
yy = torch.div((torch.sum(Ky)), (ny * ny))
# one-sample U-statistic.
if use_1sample_U:
xy = torch.div((torch.sum(Kxy)), (nx * ny))
else:
xy = torch.div(torch.sum(Kxy), (nx * ny))
mmd2 = xx - 2 * xy + yy
if not is_var_computed:
return mmd2, None, Kxyxy
hh = Kx+Ky-Kxy-Kxy.transpose(0,1)
V1 = torch.dot(hh.sum(1)/ny,hh.sum(1)/ny) / ny
V2 = (hh).sum() / (nx) / nx
varEst = 4*(V1 - V2**2)
return mmd2, varEst, Kxyxy
def MMDu(Fea, len_s, Fea_org, sigma, sigma0=0.1, epsilon = 10**(-10), is_smooth=True, is_var_computed=True, use_1sample_U=True):
"""compute value of deep-kernel MMD and std of deep-kernel MMD using merged data."""
X = Fea[0:len_s, :] # fetch the sample 1 (features of deep networks)
Y = Fea[len_s:, :] # fetch the sample 2 (features of deep networks)
X_org = Fea_org[0:len_s, :] # fetch the original sample 1
Y_org = Fea_org[len_s:, :] # fetch the original sample 2
L = 1 # generalized Gaussian (if L>1)
Dxx = Pdist2(X, X)
Dyy = Pdist2(Y, Y)
Dxy = Pdist2(X, Y)
Dxx_org = Pdist2(X_org, X_org)
Dyy_org = Pdist2(Y_org, Y_org)
Dxy_org = Pdist2(X_org, Y_org)
if is_smooth:
Kx = (1-epsilon) * torch.exp(-(Dxx / sigma0)**L -Dxx_org / sigma) + epsilon * torch.exp(-Dxx_org / sigma)
Ky = (1-epsilon) * torch.exp(-(Dyy / sigma0)**L -Dyy_org / sigma) + epsilon * torch.exp(-Dyy_org / sigma)
Kxy = (1-epsilon) * torch.exp(-(Dxy / sigma0)**L -Dxy_org / sigma) + epsilon * torch.exp(-Dxy_org / sigma)
else:
Kx = torch.exp(-Dxx / sigma0)
Ky = torch.exp(-Dyy / sigma0)
Kxy = torch.exp(-Dxy / sigma0)
return h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed, use_1sample_U)
def TST_MMD_u(Fea, N_per, N1, Fea_org, sigma, sigma0, ep, alpha, device, dtype, is_smooth=True):
"""run two-sample test (TST) using deep kernel kernel."""
mmd_vector = np.zeros(N_per)
TEMP = MMDu(Fea, N1, Fea_org, sigma, sigma0, ep, is_smooth)
mmd_value = get_item(TEMP[0])
Kxyxy = TEMP[2]
count = 0
nxy = Fea.shape[0]
nx = N1
for r in range(N_per):
# print r
ind = np.random.choice(nxy, nxy, replace=False)
# divide into new X, Y
indx = ind[:nx]
# print(indx)
indy = ind[nx:]
Kx = Kxyxy[np.ix_(indx, indx)]
# print(Kx)
Ky = Kxyxy[np.ix_(indy, indy)]
Kxy = Kxyxy[np.ix_(indx, indy)]
TEMP = h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed=False)
mmd_vector[r] = TEMP[0]
if mmd_vector[r] > mmd_value:
count = count + 1
if count > np.ceil(N_per * alpha):
h = 0
threshold = "NaN"
break
else:
h = 1
if h == 1:
S_mmd_vector = np.sort(mmd_vector)
# print(np.int(np.ceil(N_per*alpha)))
threshold = S_mmd_vector[np.int(np.ceil(N_per * (1 - alpha)))]
return h, threshold, mmd_value.item()
| 2.875 | 3 |
Data Collection/ResourceCollection.py | prabhatmalhan/Hand-Gesture-Detection | 0 | 12761157 | <reponame>prabhatmalhan/Hand-Gesture-Detection
from EditJson import EditConfig
import cv2 as cv
def capture(ind=''):
filePath = '..\\Resources\\'+ind
x = EditConfig(filePath+'\\info.json').readConfig()
a = int(x['count'])
i = 0
video = cv.VideoCapture(0)
mode = False
sent = 'Press \'z\' when hand is in frame : '+x['name']
success = False
while(1):
try:
_, frame = video.read()
frame = cv.flip(frame, 1)
cropRegion = frame[50:350, 250:550]
cv.rectangle(frame, (250, 50), (550, 350), (0, 255, 0), 2)
k = cv.waitKey(5) & 0xFF
if k == 27:
break
if k == 122:
sent = 'Activated'
mode = True
cv.putText(frame, sent, (0, 470), cv.FONT_HERSHEY_COMPLEX_SMALL,
1, (0, 0, 255), 2, cv.LINE_8)
sampleImg = cv.imread(filePath+'\\sample.jpg')
cv.imshow('sample', sampleImg)
cv. moveWindow('sample', 40, 30)
cv.imshow('frame', frame)
cv. moveWindow('frame', 500, 150)
cv.imshow('cropped', cropRegion)
cv. moveWindow('cropped', 40, 400)
if mode:
cv.imwrite(filePath+'\\'+str(a)+'.jpg', cropRegion)
a += 1
i += 1
if(i == 240):
x['count'] = str(a)
EditConfig(filePath+'\\info.json').writeConfig(x)
success = True
break
except:
print("Error Occured")
break
cv.destroyAllWindows()
video.release()
return success
| 2.953125 | 3 |
scripts/rs-download-fastq-files-from-ena-via-ascp.py | SamBryce-Smith/polyAsite_workflow | 10 | 12761158 | #!/usr/bin/env python
"""
My template:
"""
__date__ = "2016-07-07"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "GPL"
# imports
import sys
import os
import time
from argparse import ArgumentParser, RawTextHelpFormatter
import shutil
import subprocess
from subprocess import run
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("-v",
"--verbose",
dest="verbose",
action="store_true",
default=False,
help="Be loud!")
parser.add_argument("-s",
"--srr_id",
dest="srr_id",
help="SRR number (from SRA) for the current sample")
parser.add_argument("--outdir",
dest="outdir",
help="directory to which the fastq files are written")
parser.add_argument("--paired",
dest="paired",
action="store_true",
help="indicate if two samples (paired-end sequencing) belong to that sample id")
# redefine a functions for writing to stdout and stderr to save some writting
syserr = sys.stderr.write
sysout = sys.stdout.write
def main(options):
"""Download the fastq file(s) for the given id"""
# get the path to the installation of aspera
starts_with_sep = shutil.which("ascp").startswith(os.sep)
aspera_path_list = shutil.which("ascp").split(os.sep)
bin_dir = len(aspera_path_list)
for i in range(len(aspera_path_list) - 1, -1, -1):
if aspera_path_list[i] == "bin":
bin_dir = i
break
aspera_path = os.path.join( *aspera_path_list[0:bin_dir] )
# prepend a directory separator if necessary
if starts_with_sep and not aspera_path.startswith(os.sep):
aspera_path = os.sep + aspera_path
command_list = ["ascp", "-QT", "-l", "300m", "-P33001", "-d", "-i"]
command_list.append( aspera_path + "/etc/asperaweb_id_dsa.openssh")
base_address = "<EMAIL>:/vol1/fastq"
# SRR, ERR or DRR?
prefix = options.srr_id[0:3]
srr_number = options.srr_id.replace(prefix, "")
if len(srr_number) == 6:
address = os.path.join(base_address,
options.srr_id[:6],
options.srr_id,
options.srr_id
)
elif len(srr_number) == 7:
address = os.path.join(base_address,
options.srr_id[:6],
"00" + str(srr_number[-1]),
options.srr_id,
options.srr_id
)
elif len(srr_number) == 8:
address = os.path.join(base_address,
options.srr_id[:6],
"0" + str(srr_number[-2:]),
options.srr_id,
options.srr_id
)
elif len(srr_number) == 8:
address = os.path.join(base_address,
options.srr_id[:6],
str(srr_number[-3:]),
options.srr_id,
options.srr_id
)
else:
syserr("[ERROR] SRR id %s has unexpected format. Expected is the form: SRR\d+ with \d+ being 6 to 9 digits\n" % srr_number)
sys.exit(2)
if options.paired:
for read in [1,2]:
fulladdress = address + "_" + str(read) + ".fastq.gz"
command = command_list + [fulladdress, options.outdir]
# print( command )
returnObj = run(command, stderr = subprocess.DEVNULL, stdout = subprocess.DEVNULL)
# syserr("[INFO] command args: %s\n" % str(return_obj.args))
if returnObj.returncode != 0:
syserr("[ERROR] command failed\n")
syserr("[ERROR] command: %s\n" % command)
sys.exit(2)
else:
fulladdress = address + ".fastq.gz"
command = command_list + [fulladdress, options.outdir]
return_val = run(command, stderr = subprocess.DEVNULL, stdout = subprocess.DEVNULL).returncode
if return_val != 0:
syserr("[ERROR] command failed\n")
syserr("[ERROR] command: %s\n" % command)
sys.exit(2)
if __name__ == '__main__':
try:
# check if aspera's ascp is available
if not shutil.which("ascp"):
syserr("[ERROR] Could not find Aspera's ascp\n")
syserr("[ERROR] Ensure that ascp is available and rerun the script\n")
sys.exit(2)
try:
options = parser.parse_args()
except Exception:
parser.print_help()
sys.exit()
if options.verbose:
start_time = time.time()
start_date = time.strftime("%d-%m-%Y at %H:%M:%S")
syserr("############## Started script on %s ##############\n" %
start_date)
main(options)
if options.verbose:
syserr("### Successfully finished in %i seconds, on %s ###\n" %
(time.time() - start_time,
time.strftime("%d-%m-%Y at %H:%M:%S")))
except KeyboardInterrupt:
syserr("Interrupted by user after %i seconds!\n" %
(time.time() - start_time))
sys.exit(-1)
| 2.234375 | 2 |
rotv_apps/navigations/admin.py | ivellios/django-rotv-apps | 1 | 12761159 | # -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Nav
class NavAdmin(admin.ModelAdmin):
list_display = ['__unicode__', 'parent', 'subnav_type', 'slug', ]
search_fields = ['name', ]
list_filter = ['parent', ]
prepopulated_fields = {'slug': ('name',)}
admin.site.register(Nav, NavAdmin)
| 1.859375 | 2 |
sensors/signalr_hub_sensor.py | cognifloyd/stackstorm-signalr | 0 | 12761160 | from st2reactor.sensor.base import Sensor
from signalr import Connection
__all__ = [
'SignalRHubSensor'
]
class SignalRHubSensor(Sensor):
def __init__(self, sensor_service, config=None):
super(SignalRHubSensor, self).__init__(sensor_service=sensor_service,
config=config)
self._logger = self._sensor_service.get_logger(__name__)
self.url = config['hub_url']
self.hub_name = config['hub_name']
self._trigger_ref = 'signalr.message_received'
self._hub = None
self.connection = None
self.session = None
def setup(self):
self.connection = Connection(self.url, self.session)
# start a connection
self.connection.start()
# add a handler to process notifications to the connection
def _log_notifications(data):
self._logger.debug('Connection: new notification - {}'.format(data))
self.connection.handlers += _log_notifications # noqa pylint: disable=no-member
# get hub
self._hub = self.connection.hub(self.hub_name)
def message_received(self, message):
self._logger.debug('Connection: new notification.' % message)
self._sensor_service.dispatch(trigger=self._trigger_ref,
payload={message: message})
def run(self):
self._hub.client.on('message_received',
SignalRHubSensor.message_received)
def cleanup(self):
# do not receive new messages
self._hub.client.off('message_received', self.message_received)
self.connection.close()
| 2.578125 | 3 |
src/test/blocked_bad_ip.py | jalapenopuzzle/rr | 5,156 | 12761161 | from util import *
send_gdb('c')
expect_rr('EXIT-SUCCESS')
expect_gdb('SIGSEGV')
send_gdb('reverse-stepi')
expect_gdb('SIGSEGV')
send_gdb('reverse-stepi')
expect_gdb('start_thread')
ok()
| 1.390625 | 1 |
dpctl/tests/_helper.py | reazulhoque/dpctl | 1 | 12761162 | import dpctl
def has_gpu(backend="opencl"):
return bool(dpctl.get_num_devices(backend=backend, device_type="gpu"))
def has_cpu(backend="opencl"):
return bool(dpctl.get_num_devices(backend=backend, device_type="cpu"))
def has_sycl_platforms():
return bool(len(dpctl.get_platforms()))
| 2.40625 | 2 |
02_influence_of_minimum_reads_per_locus/rad-seq-stacks_with_dedup/scripts/minimal_read_length.py | HenningTimm/rad-seq-stacks_evaluation | 1 | 12761163 | """
"""
lengths = []
for len_file in snakemake.input:
with open(len_file, 'r') as lf:
lengths.append(int(lf.readline().strip()))
with open(snakemake.output[0], "w") as out_file:
print(min(lengths), file=out_file)
| 2.953125 | 3 |
deploy/deploy_tookit/fedvision_deploy_toolkit/_generate_template.py | owlet42/FedVision | 80 | 12761164 | # Copyright (c) 2020 The FedVision Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import typer
from fedvision_deploy_toolkit import __template__
app = typer.Typer(help="template tools")
@app.command()
def generate():
"""
generate template
"""
shutil.copy(os.path.join(__template__, "template.yaml"), os.getcwd())
@app.command(name="standalone")
def standalone_template():
"""
generate template for standalone deploy
"""
shutil.copy(os.path.join(__template__, "standalone_template.yaml"), os.getcwd())
| 1.8125 | 2 |
rxrx/main.py | henrique/rxrx1-utils | 0 | 12761165 | <reponame>henrique/rxrx1-utils
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train a ResNet-50 model on RxRx1 on TPU.
Original file:
https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_main.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import os
import time
import argparse
import numpy as np
import tensorflow as tf
from tensorflow.python.estimator import estimator
from .model import model_fn
from rxrx import input as rxinput
DEFAULT_INPUT_FN_PARAMS = {
'tfrecord_dataset_buffer_size': 256,
'tfrecord_dataset_num_parallel_reads': None,
'parallel_interleave_cycle_length': 32,
'parallel_interleave_block_length': 1,
'parallel_interleave_buffer_output_elements': None,
'parallel_interleave_prefetch_input_elements': None,
'map_and_batch_num_parallel_calls': 128,
'transpose_num_parallel_calls': 128,
'prefetch_buffer_size': tf.contrib.data.AUTOTUNE,
}
# The mean and stds for each of the channels
GLOBAL_PIXEL_STATS = (np.array([6.74696984, 14.74640167, 10.51260864,
10.45369445, 5.49959796, 9.81545561]),
np.array([7.95876312, 12.17305868, 5.86172946,
7.83451711, 4.701167, 5.43130431]))
def dummy_pad_files(real, dummy, batch_size):
to_pad = math.ceil(batch_size / 277)
return np.concatenate([real.tolist(), dummy[:to_pad]])
def main(use_tpu,
tpu,
gcp_project,
tpu_zone,
url_base_path,
use_cache,
model_dir,
train_epochs,
train_batch_size,
num_train_images,
epochs_per_loop,
log_step_count_epochs,
num_cores,
data_format,
transpose_input,
tf_precision,
n_classes,
momentum,
weight_decay,
base_learning_rate,
warmup_epochs,
input_fn_params=DEFAULT_INPUT_FN_PARAMS,
train_df=None,
test_df=None,
valid_pct=.2,
model='resnet',
model_depth=50,
valid_steps=16,
pred_batch_size=64,
dim=512,
pred_on_tpu=False):
if use_tpu & (tpu is None):
tpu = os.getenv('TPU_NAME')
tf.logging.info('tpu: {}'.format(tpu))
if gcp_project is None:
gcp_project = os.getenv('TPU_PROJECT')
tf.logging.info('gcp_project: {}'.format(gcp_project))
steps_per_epoch = (num_train_images // train_batch_size)
train_steps = steps_per_epoch * train_epochs
current_step = estimator._load_global_step_from_checkpoint_dir(
model_dir) # pylint: disable=protected-access,line-too-long
iterations_per_loop = steps_per_epoch * epochs_per_loop
log_step_count_steps = steps_per_epoch * log_step_count_epochs
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
tpu if (tpu or use_tpu) else '', zone=tpu_zone, project=gcp_project)
config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=model_dir,
save_summary_steps=iterations_per_loop,
save_checkpoints_steps=iterations_per_loop,
log_step_count_steps=log_step_count_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=iterations_per_loop,
num_shards=num_cores,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.
PER_HOST_V2)) # pylint: disable=line-too-long
momentum_optimizer = tf.train.MomentumOptimizer(learning_rate=base_learning_rate,
momentum=momentum,
use_nesterov=True)
adam_optimizer = tf.train.AdamOptimizer(base_learning_rate)
train_model_fn = functools.partial(
model_fn,
n_classes=n_classes,
num_train_images=num_train_images,
data_format=data_format,
transpose_input=transpose_input,
train_batch_size=train_batch_size,
iterations_per_loop=iterations_per_loop,
tf_precision=tf_precision,
weight_decay=weight_decay,
base_learning_rate=base_learning_rate,
warmup_epochs=warmup_epochs,
model_dir=model_dir,
use_tpu=use_tpu,
model_depth=model_depth,
optimizer=adam_optimizer,
model=model,
pred_on_tpu=pred_on_tpu)
classifier = tf.contrib.tpu.TPUEstimator(
use_tpu=use_tpu,
model_fn=train_model_fn,
config=config,
train_batch_size=train_batch_size,
eval_batch_size=train_batch_size,
predict_batch_size=train_batch_size,
eval_on_tpu=True,
export_to_cpu=True)
use_bfloat16 = (tf_precision == 'bfloat16')
tfrecord_glob = os.path.join(url_base_path, '*.tfrecord')
tf.logging.info("Train glob: {}".format(tfrecord_glob))
train_files, valid_files = rxinput.get_tfrecord_names(url_base_path, train_df, True, valid_pct=valid_pct)
train_input_fn = functools.partial(rxinput.input_fn,
train_files,
input_fn_params=input_fn_params,
pixel_stats=GLOBAL_PIXEL_STATS,
transpose_input=transpose_input,
use_bfloat16=use_bfloat16,
dim=dim)
valid_input_fn = functools.partial(rxinput.input_fn,
valid_files,
input_fn_params=input_fn_params,
pixel_stats=GLOBAL_PIXEL_STATS,
transpose_input=transpose_input,
use_bfloat16=use_bfloat16,
dim=dim)
tf.logging.info('Training for %d steps (%.2f epochs in total). Current'
' step %d.', train_steps, train_steps / steps_per_epoch,
current_step)
start_timestamp = time.time() # This time will include compilation time
classifier.train(input_fn=train_input_fn, max_steps=train_steps)
classifier.evaluate(input_fn=valid_input_fn, steps=valid_steps)
tf.logging.info('Finished training up to step %d. Elapsed seconds %d.',
train_steps, int(time.time() - start_timestamp))
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info('Finished training up to step %d. Elapsed seconds %d.',
train_steps, elapsed_time)
tf.logging.info('Exporting SavedModel.')
def serving_input_receiver_fn():
features = {
'feature': tf.placeholder(dtype=tf.float32, shape=[None, dim, dim, 6]),
}
receiver_tensors = features
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
classifier.export_saved_model(os.path.join(model_dir, 'saved_model'), serving_input_receiver_fn)
test_files = rxinput.get_tfrecord_names(url_base_path, test_df)
all_files = rxinput.get_tfrecord_names(url_base_path, train_df)
classifier_pred = tf.contrib.tpu.TPUEstimator(
use_tpu=pred_on_tpu,
model_fn=train_model_fn,
config=config,
train_batch_size=train_batch_size,
eval_batch_size=train_batch_size,
predict_batch_size=pred_batch_size,
eval_on_tpu=pred_on_tpu,
export_to_cpu=True)
"""
Kind of hacky, but append on some junk files so we use `drop_remainder` in the dataset to get fixed batch sizes for TPU,
then we can just ignore any beyond the real amount.
Not sure if I have something configured wrong or what, but TPU prediction is like 100x faster than CPU prediction,
so I guess a bit of hackiness is worth it?
"""
if pred_on_tpu:
test_files = dummy_pad_files(test_files, all_files, pred_batch_size)
test_input_fn = functools.partial(rxinput.input_fn,
test_files,
input_fn_params=input_fn_params,
pixel_stats=GLOBAL_PIXEL_STATS,
transpose_input=pred_on_tpu,
use_bfloat16=use_bfloat16,
test=True,
dim=dim)
if pred_on_tpu:
# Also predict for all the training ones too (with garbage added on the end too) so that we can use that in other models
all_files = dummy_pad_files(all_files, test_files, pred_batch_size)
all_input_fn = functools.partial(rxinput.input_fn,
all_files,
input_fn_params=input_fn_params,
pixel_stats=GLOBAL_PIXEL_STATS,
transpose_input=pred_on_tpu,
use_bfloat16=use_bfloat16,
test=True,
dim=dim)
return classifier_pred.predict(input_fn=test_input_fn), classifier_pred.predict(input_fn=all_input_fn)
if __name__ == '__main__':
p = argparse.ArgumentParser(description='Train ResNet on rxrx1')
# TPU Parameters
p.add_argument(
'--use-tpu',
type=bool,
default=True,
help=('Use TPU to execute the model for training and evaluation. If'
' --use_tpu=false, will use whatever devices are available to'
' TensorFlow by default (e.g. CPU and GPU)'))
p.add_argument(
'--tpu',
type=str,
default=None,
help=(
'The Cloud TPU to use for training.'
' This should be either the name used when creating the Cloud TPU, '
'or a grpc://ip.address.of.tpu:8470 url.'))
p.add_argument(
'--gcp-project',
type=str,
default=None,
help=('Project name for the Cloud TPU-enabled project. '
'If not specified, we will attempt to automatically '
'detect the GCE project from metadata.'))
p.add_argument(
'--tpu-zone',
type=str,
default=None,
help=('GCE zone where the Cloud TPU is located in. '
'If not specified, we will attempt to automatically '
'detect the GCE project from metadata.'))
p.add_argument('--use-cache', type=bool, default=None)
# Dataset Parameters
p.add_argument(
'--url-base-path',
type=str,
default='gs://rxrx1-us-central1/tfrecords/random-42',
help=('Base path for tfrecord storage bucket url.'))
# Training parameters
p.add_argument(
'--model-dir',
type=str,
default=None,
help=(
'The Google Cloud Storage bucket where the model and training summaries are'
' stored.'))
p.add_argument(
'--train-epochs',
type=int,
default=1,
help=(
'Defining an epoch as one pass through every training example, '
'the number of total passes through all examples during training. '
'Implicitly sets the total train steps.'))
p.add_argument(
'--num-train-images',
type=int,
default=73000
)
p.add_argument(
'--train-batch-size',
type=int,
default=512,
help=('Batch size to use during training.'))
p.add_argument(
'--n-classes',
type=int,
default=1108,
help=('The number of label classes - typically will be 1108 '
'since there are 1108 experimental siRNA classes.'))
p.add_argument(
'--epochs-per-loop',
type=int,
default=1,
help=('The number of steps to run on TPU before outfeeding metrics '
'to the CPU. Larger values will speed up training.'))
p.add_argument(
'--log-step-count-epochs',
type=int,
default=64,
help=('The number of epochs at '
'which global step information is logged .'))
p.add_argument(
'--num-cores',
type=int,
default=8,
help=('Number of TPU cores. For a single TPU device, this is 8 because '
'each TPU has 4 chips each with 2 cores.'))
p.add_argument(
'--data-format',
type=str,
default='channels_last',
choices=[
'channels_first',
'channels_last',
],
help=('A flag to override the data format used in the model. '
'To run on CPU or TPU, channels_last should be used. '
'For GPU, channels_first will improve performance.'))
p.add_argument(
'--transpose-input',
type=bool,
default=True,
help=('Use TPU double transpose optimization.'))
p.add_argument(
'--tf-precision',
type=str,
default='bfloat16',
choices=['bfloat16', 'float32'],
help=('Tensorflow precision type used when defining the network.'))
# Optimizer Parameters
p.add_argument('--momentum', type=float, default=0.9)
p.add_argument('--weight-decay', type=float, default=1e-4)
p.add_argument(
'--base-learning-rate',
type=float,
default=0.2,
help=('Base learning rate when train batch size is 512. '
'Chosen to match the resnet paper.'))
p.add_argument(
'--warmup-epochs',
type=int,
default=5,
)
args = p.parse_args()
args = vars(args)
tf.logging.set_verbosity(tf.logging.INFO)
tf.logging.info('Parsed args: ')
for k, v in args.items():
tf.logging.info('{} : {}'.format(k, v))
main(**args)
| 1.867188 | 2 |
tests/bugs/core_3737_test.py | FirebirdSQL/firebird-qa | 1 | 12761166 | <filename>tests/bugs/core_3737_test.py
#coding:utf-8
#
# id: bugs.core_3737
# title: EXECUTE BLOCK parameters definitions are not respected and may cause wrong behavior related to character sets
# decription:
# tracker_id: CORE-3737
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set list on;
set term ^;
execute block returns(len_1252 int, len_utf8 int) as
declare s varchar(16) character set utf8 = 'ÃÂÁÀÄÅÇØßÐÑÞÆŠŒŽ'; -- http://en.wikipedia.org/wiki/Windows-1252
begin
execute statement (
'execute block (c varchar(16) character set win1252 = ?) returns (n int) as '
|| 'begin '
|| ' n = octet_length(c); '
|| ' suspend; '
|| 'end') (s)
into len_1252;
execute statement (
'execute block (c varchar(16) character set utf8 = ?) returns (n int) as '
|| 'begin '
|| ' n = octet_length(c); '
|| ' suspend; '
|| 'end') (s)
into len_utf8;
suspend;
end
^
set term ;^
commit;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
LEN_1252 16
LEN_UTF8 32
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 1.84375 | 2 |
django/engagementmanager/rest/checklist_set_state.py | onap/vvp-engagementmgr | 0 | 12761167 | <reponame>onap/vvp-engagementmgr
#
# ============LICENSE_START==========================================
# org.onap.vvp/engagementmgr
# ===================================================================
# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
# ===================================================================
#
# Unless otherwise specified, all software contained herein is licensed
# under the Apache License, Version 2.0 (the “License”);
# you may not use this software except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# Unless otherwise specified, all documentation contained herein is licensed
# under the Creative Commons License, Attribution 4.0 Intl. (the “License”);
# you may not use this documentation except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by/4.0/
#
# Unless required by applicable law or agreed to in writing, documentation
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ============LICENSE_END============================================
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
import json
from rest_framework.response import Response
from engagementmanager.decorator.auth import auth
from engagementmanager.decorator.class_decorator import classDecorator
from engagementmanager.decorator.log_func_entry import logFuncEntry
from engagementmanager.rest.vvp_api_view import VvpApiView
from engagementmanager.serializers import SuperThinChecklistModelSerializer
from engagementmanager.service.authorization_service import Permissions
from engagementmanager.service.checklist_state_service import set_state
from engagementmanager.utils.request_data_mgr import request_data_mgr
@classDecorator([logFuncEntry])
class ChecklistState(VvpApiView):
@auth(Permissions.update_checklist_state)
def put(self, request, checklistUuid):
data = request.data
decline = data['decline']
if decline == "True":
checklist = set_state(True, request_data_mgr.get_cl_uuid(),
isMoveToAutomation=False,
description=data['description'])
else:
checklist = set_state(
False, request_data_mgr.get_cl_uuid(),
description=data['description'])
cldata = json.dumps(SuperThinChecklistModelSerializer(
checklist).data, ensure_ascii=False)
return Response(cldata)
| 1.226563 | 1 |
Code/s4/1.2/download_posters.py | PacktPublishing/Advanced-Deep-Learning-with-Keras-V | 11 | 12761168 | import os
import random
import pandas as pd
import requests
import wget
from bs4 import BeautifulSoup
def get_poster(movie_id):
base_url = 'http://www.imdb.com/title/tt{}/'.format(movie_id)
print(base_url)
return BeautifulSoup(requests.get(base_url).content, 'lxml').find('div', {'class': 'poster'}).find('img').attrs[
'src']
df_id = pd.read_csv('ml-latest-small/links.csv', sep=',')
idx_to_movie = {}
for row in df_id.itertuples():
idx_to_movie[row[1] - 1] = row[2]
total_movies = 9000
movies = [0] * total_movies
for i in range(len(movies)):
if i in idx_to_movie.keys() and len(str(idx_to_movie[i])) == 6:
movies[i] = (idx_to_movie[i])
movies = list(filter(lambda imdb: imdb != 0, movies))
total_movies = len(movies)
URL = [0] * total_movies
IMDB = [0] * total_movies
URL_IMDB = {'url': [], 'imdb': []}
poster_path = 'posters'
random.shuffle(movies)
for movie_id in movies:
out = os.path.join(poster_path, str(movie_id) + '.jpg')
if not os.path.exists(out):
try:
target = get_poster(movie_id)
print('Download img from [{0}] to [{1}].'.format(target, out))
wget.download(url=target, out=out, bar=None)
except AttributeError:
pass # IMDB does not have picture for this movie.
else:
print('Image already exists {0}'.format(out))
| 3.078125 | 3 |
tests/controllers/test_collection_exercise_controller.py | ONSdigital/response-operations-ui | 3 | 12761169 | import datetime
import json
import os
import unittest
import responses
from config import TestingConfig
from response_operations_ui import create_app
from response_operations_ui.controllers import collection_exercise_controllers
from response_operations_ui.exceptions.exceptions import ApiError
ce_id = "4a084bc0-130f-4aee-ae48-1a9f9e50178f"
ce_events_by_id_url = f"{TestingConfig.COLLECTION_EXERCISE_URL}/collectionexercises/{ce_id}/events"
ce_nudge_events_by_id_url = f"{TestingConfig.COLLECTION_EXERCISE_URL}/collectionexercises/{ce_id}/events/nudge"
project_root = os.path.dirname(os.path.dirname(__file__))
with open(f"{project_root}/test_data/collection_exercise/ce_events_by_id.json") as fp:
ce_events = json.load(fp)
class TestCollectionExerciseController(unittest.TestCase):
def setUp(self):
self.app = create_app("TestingConfig")
self.client = self.app.test_client()
def test_get_ce_events_by_id_all_events(self):
with responses.RequestsMock() as rsps:
rsps.add(rsps.GET, ce_events_by_id_url, json=ce_events, status=200, content_type="applicaton/json")
with self.app.app_context():
collection_exercise = collection_exercise_controllers.get_collection_exercise_events_by_id(ce_id)
self.assertIn("mps", collection_exercise[0]["tag"], "MPS not in collection exercise events")
self.assertIn("go_live", collection_exercise[1]["tag"], "Go live not in collection exercise events")
self.assertIn("return_by", collection_exercise[2]["tag"], "Return by not in collection exercise events")
self.assertIn(
"exercise_end", collection_exercise[3]["tag"], "Exercise end not in collection exercise events"
)
def test_get_ce_events_by_id_no_events(self):
with responses.RequestsMock() as rsps:
rsps.add(rsps.GET, ce_events_by_id_url, json=[], status=200, content_type="applicaton/json")
with self.app.app_context():
collection_exercise = collection_exercise_controllers.get_collection_exercise_events_by_id(ce_id)
self.assertEqual(len(collection_exercise), 0, "Unexpected collection exercise event returned.")
def test_get_ce_events_by_id_http_error(self):
with responses.RequestsMock() as rsps:
rsps.add(rsps.GET, ce_events_by_id_url, status=400)
with self.app.app_context():
self.assertRaises(ApiError, collection_exercise_controllers.get_collection_exercise_events_by_id, ce_id)
def test_create_ce_event_success(self):
with responses.RequestsMock() as rsps:
rsps.add(rsps.POST, ce_events_by_id_url, status=200)
timestamp = datetime.datetime.strptime(
"".join("2020-01-27 07:00:00+00:00".rsplit(":", 1)), "%Y-%m-%d %H:%M:%S%z"
)
with self.app.app_context():
self.assertFalse(
collection_exercise_controllers.create_collection_exercise_event(ce_id, "mps", timestamp)
)
def test_delete_ce_event_accepted(self):
with responses.RequestsMock() as rsps:
rsps.add(rsps.POST, ce_nudge_events_by_id_url, status=200)
with self.app.app_context():
self.assertFalse(collection_exercise_controllers.delete_event(ce_id, "nudge"))
def test_create_ce_event_bad_request_return_false(self):
with responses.RequestsMock() as rsps:
rsps.add(rsps.POST, ce_events_by_id_url, body='{"error":{"message": "some message"}}', status=400)
timestamp = datetime.datetime.strptime(
"".join("2020-01-27 07:00:00+00:00".rsplit(":", 1)), "%Y-%m-%d %H:%M:%S%z"
)
with self.app.app_context():
self.assertTrue(
collection_exercise_controllers.create_collection_exercise_event(ce_id, "mps", timestamp)
)
| 2.265625 | 2 |
hbase_kernel/kernel.py | f-cg/hbase_kernel | 0 | 12761170 | from .images import (
extract_image_filenames, display_data_for_image, image_setup_cmd
)
from ipykernel.kernelbase import Kernel
import logging
from pexpect import replwrap, EOF
import pexpect
from subprocess import check_output
import os.path
import re
import signal
__version__ = '0.7.2'
version_pat = re.compile(r'version (\d+(\.\d+)+)')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class IREPLWrapper(replwrap.REPLWrapper):
"""A subclass of REPLWrapper that gives incremental output
specifically for hbase_kernel.
The parameters are the same as for REPLWrapper, except for one
extra parameter:
:param line_output_callback: a callback method to receive each batch
of incremental output. It takes one string parameter.
"""
def __init__(self, cmd_or_spawn, orig_prompt, prompt_change=None,
extra_init_cmd=None, line_output_callback=None):
self.line_output_callback = line_output_callback
replwrap.REPLWrapper.__init__(self, cmd_or_spawn, orig_prompt,
prompt_change, extra_init_cmd=extra_init_cmd)
def _expect_prompt(self, timeout=None):
if timeout == None:
# "None" means we are executing code from a Jupyter cell by way of the run_command
# in the do_execute() code below, so do incremental output.
while True:
pos = self.child.expect([r'hbase:\d+:\d+>'], timeout=None)
if pos == 2:
# End of line received
self.line_output_callback(self.child.before + '\n')
else:
if len(self.child.before) != 0:
# prompt received, but partial line precedes it
self.line_output_callback(self.child.before)
break
else:
# Otherwise, use existing non-incremental code
pos = replwrap.REPLWrapper._expect_prompt(self, timeout=timeout)
# Prompt received, so return normally
return pos
class HBaseKernel(Kernel):
implementation = 'hbase_kernel'
implementation_version = __version__
@property
def language_version(self):
m = version_pat.search(self.banner)
return m.group(1)
_banner = None
@property
def banner(self):
if self._banner is None:
self._banner = 'hbase banner'
return self._banner
language_info = {'name': 'hbase',
'codemirror_mode': 'shell',
'mimetype': 'text/x-sh',
'file_extension': '.sh'}
def __init__(self, **kwargs):
self.silent = False
logger.debug('init')
Kernel.__init__(self, **kwargs)
self._start_bash()
def _start_bash(self):
print('_start_bash')
# Signal handlers are inherited by forked processes, and we can't easily
# reset it from the subprocess. Since kernelapp ignores SIGINT except in
# message handlers, we need to temporarily reset the SIGINT handler here
# so that bash and its children are interruptible.
sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
# Note: the next few lines mirror functionality in the
# bash() function of pexpect/replwrap.py. Look at the
# source code there for comments and context for
# understanding the code here.
child = pexpect.spawn(
"/usr/local/hbase/bin/hbase", ['shell'], echo=False, encoding='utf-8', codec_errors='replace')
ps1 = replwrap.PEXPECT_PROMPT[:5] + \
u'\[\]' + replwrap.PEXPECT_PROMPT[5:]
ps2 = replwrap.PEXPECT_CONTINUATION_PROMPT[:5] + \
u'\[\]' + replwrap.PEXPECT_CONTINUATION_PROMPT[5:]
prompt_change = u"PS1='{0}' PS2='{1}' PROMPT_COMMAND=''".format(
ps1, ps2)
print(ps1)
print(ps2)
print(prompt_change)
# Using IREPLWrapper to get incremental output
# self.bashwrapper = IREPLWrapper(child, 'hbase:',
self.bashwrapper = IREPLWrapper(child, 'hbase:\d+:\d+>',
line_output_callback=self.process_output)
finally:
signal.signal(signal.SIGINT, sig)
# Register Bash function to write image data to temporary file
# self.bashwrapper.run_command(image_setup_cmd)
def process_output(self, output):
print('process line')
if not self.silent:
image_filenames, output = extract_image_filenames(output)
# Send standard output
stream_content = {'name': 'stdout', 'text': output}
self.send_response(self.iopub_socket, 'stream', stream_content)
# Send images, if any
for filename in image_filenames:
try:
data = display_data_for_image(filename)
except ValueError as e:
message = {'name': 'stdout', 'text': str(e)}
self.send_response(self.iopub_socket, 'stream', message)
else:
self.send_response(self.iopub_socket, 'display_data', data)
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
self.silent = silent
if not code.strip():
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
interrupted = False
try:
# Note: timeout=None tells IREPLWrapper to do incremental
# output. Also note that the return value from
# run_command is not needed, because the output was
# already sent by IREPLWrapper.
self.bashwrapper.run_command(code.rstrip(), timeout=None)
except KeyboardInterrupt:
self.bashwrapper.child.sendintr()
interrupted = True
self.bashwrapper._expect_prompt()
output = self.bashwrapper.child.before
self.process_output(output)
except EOF:
output = self.bashwrapper.child.before + 'Restarting Bash'
self._start_bash()
self.process_output(output)
if interrupted:
return {'status': 'abort', 'execution_count': self.execution_count}
try:
exitcode = 2
except Exception:
exitcode = 1
if exitcode:
error_content = {
'ename': '',
'evalue': str(exitcode),
'traceback': []
}
self.send_response(self.iopub_socket, 'error', error_content)
error_content['execution_count'] = self.execution_count
error_content['status'] = 'error'
return error_content
else:
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
| 2.4375 | 2 |
blind_walking/envs/env_modifiers/train_course.py | mcx-lab/rl-baselines3-zoo | 0 | 12761171 | import numpy as np
from blind_walking.envs.env_modifiers.env_modifier import EnvModifier
from blind_walking.envs.env_modifiers.heightfield import HeightField
from blind_walking.envs.env_modifiers.stairs import Stairs, boxHalfLength, boxHalfWidth
""" Train robot to walk up stairs curriculum.
Equal chances for the robot to encounter going up and going down the stairs.
"""
class TrainStairs(EnvModifier):
def __init__(self):
super().__init__()
self.step_rise_levels = [0.02, 0.05, 0.075, 0.10]
self.num_levels = len(self.step_rise_levels)
self.num_steps = 10
self.stair_gap = 1.5
self.step_run = 0.3
self.stair_length = (self.num_steps - 1) * self.step_run * 2 + boxHalfLength * 2 * 2
self._level = 0
self.stairs = []
for _ in range(self.num_levels):
self.stairs.append(Stairs())
def _generate(self, env):
start_x = self.stair_gap
for i in range(self.num_levels):
self.stairs[i]._generate(
env, start_x=start_x, num_steps=self.num_steps, step_rise=self.step_rise_levels[i], step_run=self.step_run
)
start_x += self.stair_length + self.stair_gap
def _reset(self, env):
if self._level > 0 and self.down_level(env):
# robot down-levels
self._level -= 1
print(f"DOWNGRADE TO LEVEL {self._level}")
elif self._level < self.num_levels and self.up_level(env):
# robot up-levels
self._level += 1
print(f"LEVEL UP TO LEVEL {self._level}!")
level = self._level
if level >= self.num_levels:
# Loop back to randomly selected level
level_list = np.arange(self.num_levels) + 1
level_probs = level_list / sum(level_list)
level = np.random.choice(self.num_levels, p=level_probs)
print(f"LOOP TO LEVEL {level}")
x_pos = level * (self.stair_length + self.stair_gap)
z_pos = 0
# Equal chances to encouter going up and down the stair level
if np.random.uniform() < 0.4:
x_pos += self.stair_gap + self.stair_length / 2 - 1
z_pos = self.step_rise_levels[level] * self.num_steps
self.adjust_position = (x_pos, 0, z_pos)
def up_level(self, env):
"""To succeed the current level, robot needs to climb over the current stair level
and reach the start of next stair level"""
base_pos = env._robot.GetBasePosition()
target_x = (self._level + 1) * (self.stair_length + self.stair_gap) + 0.5
return (
self.adjust_position[2] == 0
and base_pos[0] > target_x
and base_pos[1] > -boxHalfWidth
and base_pos[1] < boxHalfWidth
)
def down_level(self, env):
"""Downgrade to the previous level if robot was unable to travel a quarter of the stair length"""
start_pos = self.adjust_position
base_pos = env._robot.GetBasePosition()
x_dist_travelled = base_pos[0] - start_pos[0]
return x_dist_travelled < self.stair_length / 5
class TrainUneven(EnvModifier):
def __init__(self):
super().__init__()
self.hf = HeightField()
def _generate(self, env):
self.hf._generate(env, start_x=10, heightPerturbationRange=0.08)
class TrainMultiple(EnvModifier):
def __init__(self):
super().__init__()
self.hf_length = 20
self.hf_perturb = 0.08
self.hf = HeightField()
self.step_rise_levels = [0.02, 0.05]
self.num_levels = len(self.step_rise_levels)
self.num_steps = 10
self.stair_gap = 1.5
self.step_run = 0.3
self.stair_length = (self.num_steps - 1) * self.step_run * 2 + boxHalfLength * 2 * 2
self._stair_level = 0
self.stairs = []
for _ in range(self.num_levels):
self.stairs.append(Stairs())
self._reset_manual_override = None
def _generate(self, env):
self.hf._generate(env, start_x=10, heightPerturbationRange=self.hf_perturb)
start_x = self.stair_gap + self.hf_length
for i in range(self.num_levels):
self.stairs[i]._generate(
env, start_x=start_x, num_steps=self.num_steps, step_rise=self.step_rise_levels[i], step_run=self.step_run
)
start_x += self.stair_length + self.stair_gap
def _reset_to_heightfield(self):
"""Reset position to before the heightfield"""
self.adjust_position = (0, 0, 0)
def _select_stairs_level(self, env):
# Check if robot has succeeded current level
if self._stair_level < self.num_levels and self.succeed_level(env):
print(f"LEVEL {self._stair_level} PASSED!")
self._stair_level += 1
level = self._stair_level
if level >= self.num_levels:
# Loop back to randomly selected level
level_list = np.arange(self.num_levels) + 1
level_probs = level_list / sum(level_list)
level = np.random.choice(self.num_levels, p=level_probs)
print(f"LOOP TO LEVEL {level}")
elif level > 0 and np.random.uniform() < 0.2:
# Redo previous level
level -= 1
return level
def _reset_to_stairs(self, level):
"""Reset position to just before the stairs of a given level"""
x_pos = self.hf_length + level * (self.stair_length + self.stair_gap)
z_pos = 0
# Equal chances to encouter going up and down the stair level
if np.random.uniform() < 0.4:
x_pos += self.stair_gap + self.stair_length / 2 - 1
z_pos = self.step_rise_levels[level] * self.num_steps
self.adjust_position = (x_pos, 0, z_pos)
def _reset_randomly(self, env):
if np.random.uniform() < 0.5:
# See heightfield
self._reset_to_heightfield()
else:
# See stairs
level = self._select_stairs_level(env)
self._reset_to_stairs(level)
def _reset(self, env):
if self._reset_manual_override is not None:
self._reset_manually()
# Remove override for subsequent resets
# self._reset_manual_override = None
else:
self._reset_randomly(env)
def _reset_manually(self):
if self._reset_manual_override == "heightfield":
self._reset_to_heightfield()
elif self._reset_manual_override == "stairs_0":
self._reset_to_stairs(level=0)
elif self._reset_manual_override == "stairs_1":
self._reset_to_stairs(level=1)
else:
raise ValueError(f"Invalid override {self._reset_manual_override}")
def _override_reset(self, override: str):
"""Manually set what the next reset should be"""
assert override in ("heightfield", "stairs_0", "stairs_1")
self._reset_manual_override = override
def succeed_level(self, env):
"""To succeed the current level, robot needs to climb over the current stair level
and reach the start of next stair level"""
base_pos = env._robot.GetBasePosition()
target_x = self.hf_length + (self._stair_level + 1) * (self.stair_length + self.stair_gap) + 0.5
return (
self.adjust_position[2] == 0
and base_pos[0] > target_x
and base_pos[1] > -boxHalfWidth
and base_pos[1] < boxHalfWidth
)
| 3.15625 | 3 |
utils/test/test_rf.py | lejeunel/glia | 0 | 12761172 | <filename>utils/test/test_rf.py
from os.path import join as pjoin
from skimage import io, segmentation
import matplotlib.pyplot as plt
from glia import libglia
import numpy as np
import os
import sys
if __name__ == "__main__":
n, D = 2000, 4
X = np.random.randn(n, D)
Xarea0 = np.abs(np.random.uniform(0.01, 0.05, size=n))
Xarea1 = np.abs(np.random.normal(0.01, 0.05, size=n))
X = np.concatenate((Xarea0[..., None], X), axis=1)
X = np.concatenate((Xarea1[..., None], X), axis=1)
Y = np.random.choice([1, 0], n)
median = np.median(np.concatenate((Xarea0, Xarea1)))
print('median numpy: {}'.format(median))
# print(X)
# print('testing feature conversion')
# print(X)
# libglia.test_conversion_shogun_feats(X)
# print('testing label conversion')
# print(Y)
# libglia.test_conversion_shogun_labels(Y)
print('testing rf')
hmt = libglia.hmt.create()
hmt.config(3, 255, 0, 0.7, True)
hmt.train_rf(X, Y)
models = hmt.get_models()
# print(models)
# hmt.load_models(models)
| 2.0625 | 2 |
nap/agent/visual_search.py | napratin/nap | 1 | 12761173 | <reponame>napratin/nap<gh_stars>1-10
"""Visual search using features from visual system and ocular motion."""
import os
from math import hypot
import logging
import argparse
import numpy as np
import cv2
from collections import namedtuple, OrderedDict
from lumos.context import Context
from lumos.input import InputRunner
from lumos import rpc
from lumos.net import ImageServer
from lumos.util import Enum
from ..vision.visual_system import VisualSystem, Finst, VisionManager, FeatureManager, default_feature_weight, default_feature_weight_rest, default_window_flags
class VisualSearchAgent(object):
"""A simple visual search agent that scans input stream for locations with desired features."""
image_size = (512, 512) #VisualSystem.default_image_size # size of retina to project on
screen_background = np.uint8([0, 0, 0]) #VisionManager.default_screen_background
def __init__(self):
# * Create application context, passing in custom arguments, and get a logger
argParser = argparse.ArgumentParser(add_help=False)
argParser.add_argument('--features', type=str, default=None, help="features to look for, comma separated")
self.context = Context.createInstance(description=self.__class__.__name__, parent_argparsers=[argParser])
self.logger = logging.getLogger(self.__class__.__name__)
# * Parse arguments
self.features = self.context.options.features.split(',') if (hasattr(self.context.options, 'features') and self.context.options.features is not None) else []
self.featureWeights = dict()
for feature in self.features:
if ':' in feature: # check for explicit weights, e.g. RG:0.8,BY:0.75
try:
featureSpec = feature.split(':')
self.featureWeights[featureSpec[0].strip()] = float(featureSpec[1].strip())
except Exception as e:
self.logger.warn("Invalid feature specification '%s': %s", feature, e)
else: # use default weight
self.featureWeights[feature.strip()] = default_feature_weight
if 'rest' not in self.featureWeights:
self.featureWeights['rest'] = default_feature_weight_rest # explicitly specify rest, otherwise previous weights will remain
self.logger.info("Searching with feature weights: %s", self.featureWeights)
# * Create systems and associated managers
self.context.update() # get fresh time
self.visSys = VisualSystem(imageSize=self.image_size, timeNow=self.context.timeNow, showMonitor=False)
self.visMan = VisionManager(self.visSys, screen_background=self.screen_background)
# TODO: Design a better way to share systems/managers (every system has a parent/containing agent?)
# * Export RPC calls, if enabled
if self.context.isRPCEnabled:
self.logger.info("Exporting RPC calls")
rpc.export(self.visSys)
rpc.export(self.visMan)
rpc.refresh() # Context is expected to have started RPC server
def run(self):
# * Set visual system buffers and send intent
self.visSys.setBuffer('weights', self.featureWeights)
self.visSys.setBuffer('intent', 'find')
# * Run vision manager and ocular motion system
runner = InputRunner(self.visMan)
self.context.resetTime()
while runner.update(): # should update context time
if not self.update():
break
runner.cleanUp()
def update(self):
"""Subclasses should override this to implement per-iteration behavior."""
return True # return False to end run
class ZelinksyFinder(VisualSearchAgent):
"""A visual search agent that tries to find a target in a field of distractors (as per Zelinsky et al. 1995, 1997)."""
default_fixation_symbol = '+' # just a name, actual pattern loaded from file
default_target = 'Q' # 'O' or 'Q' depending on condition
default_distractor = 'O' # commonly-used distractor
default_distractors = [default_distractor] # one or more types of distractors
default_num_stimuli = 5 # 5 or 17 depending on condition
default_num_trials = 4 # total no. of trials we should run for
#pattern_size = (48, 48) # size of pattern image [unused]
#o_radius = 16 # radius of O pattern [unused]
# NOTE: Pathnames relative to root of repository (add more to enable matching with other shapes/patterns)
pattern_files = dict()
pattern_files[default_fixation_symbol] = "res/data/visual-search/zelinsky-patterns/fixcross-pattern.png"
pattern_files[default_target] = "res/data/visual-search/zelinsky-patterns/q-pattern.png"
pattern_files[default_distractor] = "res/data/visual-search/zelinsky-patterns/o-pattern.png"
# TODO: These need to be updated according to fovea and pattern size
max_match_sqdiff = 0.01 # max value of SQDIFF matching to be counted as a valid match
min_confidence_sqdiff = 0.008 # min desired difference between activations for target and distractor (to avoid jumping to conclusion when there is confusion)
State = Enum(('NONE', 'PRE_TRIAL', 'TRIAL', 'POST_TRIAL')) # explicit tracking of experiment state (TODO: implement actual usage)
def __init__(self, fixationSymbol=default_fixation_symbol, target=default_target, distractors=default_distractors, numStimuli=default_num_stimuli, numTrials=default_num_trials, featureChannel='V'):
VisualSystem.num_finsts = numStimuli # override FINST size (TODO: make FINSTs fade out, design multi-scale FINSTs to cover larger areas/clusters for a better model)
VisualSystem.finst_decay_enabled = True
Finst.half_life = numStimuli * 4.0
Finst.default_radius = 64
VisualSearchAgent.__init__(self)
self.fixationSymbol = fixationSymbol
self.target = target
self.distractors = distractors # None (not supported yet) could mean everything else is a distractor (other than fixation symbol)
self.featureChannel = featureChannel
self.numStimuli = numStimuli
self.numTrials = numTrials # NOTE(04/19/2014): currently unused
# * Configure visual system as needed (shorter times for fast completion goal, may result in some inaccuracy)
self.visSys.max_free_duration = 0.25
self.visSys.max_fixation_duration = 0.5 # we don't need this to be high as we are using the hold-release pattern
self.visSys.max_hold_duration = 2.0
self.visSys.min_good_salience = 0.2 # this task is generally very low-salience
#self.visSys.min_saccade_salience = 0.1
self.visSys.ocularMotionSystem.enableEventLogging("ocular-events_{}".format(self.target))
# * Initialize pattern matchers (load only those patterns that are needed; TODO: use different flags for each pattern? color/grayscale)
self.patternMatchers = OrderedDict()
missingPatterns = []
if self.fixationSymbol in self.pattern_files:
self.patternMatchers[self.fixationSymbol] = PatternMatcher(self.fixationSymbol, self.pattern_files[self.fixationSymbol], flags=0) # pass flags=0 for grayscale
else:
missingPatterns.append(self.fixationSymbol)
if self.target in self.pattern_files:
self.patternMatchers[self.target] = PatternMatcher(self.target, self.pattern_files[self.target], flags=0)
else:
missingPatterns.append(self.target)
if self.distractors is not None:
for distractor in self.distractors:
if distractor in self.pattern_files:
self.patternMatchers[distractor] = PatternMatcher(distractor, self.pattern_files[distractor], flags=0)
else:
missingPatterns.append(self.fixationSymbol)
if missingPatterns:
self.logger.error("Patterns missing (matching may not be correct): {}".format(missingPatterns))
# * Initialize matching-related objects
# TODO Turn this into a state machine + some vars
self.numDistractorsSeen = 0 # if numDistractorsSeen >= numStimuli, no target is present
self.newFixation = False
self.processFixation = False # used to prevent repeatedly processing a fixation period once a conclusive result has been reached
self.maxFixations = (self.numStimuli * 1.5) # TODO: Make this a configurable param
self.numFixations = 0
self.firstSaccadeLatency = None
self.imageInFocus = self.visSys.getFixatedImage(self.featureChannel)
self.targetFound = None # flag, mainly for visualization
# * Trial awareness
self.trialCount = 0
self.trialStarted = None # None means we haven't seen fixation point yet, otherwise time trial began
# * Response output
self.rpcClient = None
try:
self.rpcClient = rpc.Client(port=ImageServer.default_port, timeout=1000)
self.logger.info("Checking for remote keyboard via RPC")
testResult = self.rpcClient.call('rpc.list')
if testResult == None:
self.logger.warning("Did not get RPC result (no remote keyboard)")
self.rpcClient.close()
self.rpcClient = None
else:
self.logger.info("Remote keyboard connected (at least an RPC server is there)")
except Exception as e:
self.logger.error("Error initializing RPC client (no remote keyboard): {}".format(e))
if self.context.options.gui:
self.isOutputInverted = True # True = black on white, False = white on black
self.winName = "Zelinsky visual search agent" # primary output window name
self.imageOut = np.full((self.visSys.imageSize[1] * 2, self.visSys.imageSize[0] * 2, 3), 0.0, dtype=self.visSys.imageTypeInt) # 3-channel composite output image
cv2.namedWindow(self.winName, flags=default_window_flags)
def update(self):
# TODO: If vision is fixated, hold, match shape patterns (templates) with fixation region
# If it's a match for target, respond 'y' and end current trial
# If not (and matches a distractor), increment distractor count; if numDistractorsSeen >= numStimuli, respond 'n' and end trial
# NOTE(4/8): Above logic has mostly been implemented here, save for repeated trial handling
visState = self.visSys.getBuffer('state')
if visState != VisualSystem.State.FIXATE:
self.newFixation = True # if system is not currently fixated, next one must be a fresh one
self.processFixation = True # similarly reset this (NOTE: this is somewhat inefficient - setting flags repeatedly)
if visState == VisualSystem.State.SACCADE and self.firstSaccadeLatency is None:
self.firstSaccadeLatency = self.context.timeNow
self.logger.info("First saccade at: {}".format(self.firstSaccadeLatency))
if self.context.options.gui:
self.visualize() # duplicate call (TODO: consolidate)
return True
if self.newFixation:
self.visSys.setBuffer('intent', 'hold') # ask visual system to hold fixation till we've processed it (subject to a max hold time)
self.targetFound = None
self.newFixation = False # pass hold intent only once
if not self.processFixation: # to prevent repeated processing of the same fixation location (double-counting)
if self.context.options.gui:
self.visualize() # duplicate call (TODO: consolidate)
return True
# Get foveal/fixated image area (image-in-focus): Use key/channel = 'BGR' for full-color matching, 'V' for intensity only, 'H' for hue only (colored bars), etc.
#self.imageInFocus = self.visSys.getFovealImage(self.featureChannel)
self.imageInFocus = self.visSys.getFixatedImage(self.featureChannel)
#self.imageInFocus = cv2.cvtColor(self.imageInFocus, cv2.COLOR_BGR2GRAY) # convert BGR to grayscale, if required
#cv2.imshow("Focus", self.imageInFocus) # [debug]
if self.imageInFocus.shape[0] < self.visSys.foveaSize[0] or self.imageInFocus.shape[1] < self.visSys.foveaSize[1]:
if self.context.options.gui:
self.visualize() # duplicate call (TODO: consolidate)
return True # fixated on a weird, incomplete spot
# Compute matches and best match in a loop
matches = [matcher.match(self.imageInFocus) for matcher in self.patternMatchers.itervalues()] # combined matching
self.logger.info("Matches: %s", ", ".join("{}: {:.3f} at {}".format(match.matcher.name, match.value, match.location) for match in matches)) # combined reporting
#matchO = self.patternMatchers['O'].match(self.imageInFocus) # individual matching
#self.logger.info("Match (O): value: {:.3f} at {}".format(matchO.value, matchO.location)) # individual reporting
bestMatch = min(matches, key=lambda match: match.value) # TODO: sort to find difference between two best ones: (bestMatch[1].value - bestMatch[0].value) >= self.min_confidence_sqdiff
if bestMatch.value <= self.max_match_sqdiff: # for methods CCORR, CCOEFF, use: bestMatch.value >= self.min_match_XXX
# We have a good match
self.logger.info("Good match: {}: {:.3f} at {}".format(bestMatch.matcher.name, bestMatch.value, bestMatch.location))
self.numFixations += 1 # can be treated as a valid fixation
self.processFixation = False # make sure we don't process this fixated location again
if self.context.options.gui: # if GUI, mark matched region (NOTE: this modifies foveal image!)
cv2.rectangle(self.imageInFocus, bestMatch.location, (bestMatch.location[0] + bestMatch.matcher.pattern.shape[1], bestMatch.location[1] + bestMatch.matcher.pattern.shape[0]), int(255.0 * (1.0 - bestMatch.value)))
cv2.putText(self.imageInFocus, str(bestMatch.matcher.name), (bestMatch.location[0] + 2, bestMatch.location[1] + 10), cv2.FONT_HERSHEY_PLAIN, 0.67, 200)
# Now decide what to do based on match
symbol = bestMatch.matcher.name # bestMatch.matcher.name contains the symbol specified when creating the corresponding pattern
if symbol == self.fixationSymbol: # NOTE: system doesn't always catch fixation symbol, so don't rely on this - keep playing
if bestMatch.value <= 0.001 and \
hypot(*self.visSys.getBuffer('location')) < 20.0 and \
hypot(*self.visSys.ocularMotionSystem.getFocusOffset()) < 20.0: # a fixation cross, and approx. in the center!
self.logger.info("Fixation symbol!")
# TODO: Prepare vision system to reset (hold till cross disappears? don't add FINST?) and be ready (use state to prevent double action)
if self.trialStarted is None:
self.logger.info("Trial {}: Fixation symbol seen; assuming trial starts now".format(self.trialCount))
self.trialStarted = self.context.timeNow
self.visSys.setBuffer('intent', 'release') # let visual system inhibit and move on
elif symbol == self.target: # found the target!
self.respond('y') # response is primary!
self.logger.info("Target found!")
self.targetFound = True
self.nextTrial()
#return True # ready for new trial (will happen anyways, might as well show foveal image)
#return False # end trial and stop this run
elif self.distractors is None or symbol in self.distractors:
self.numDistractorsSeen += 1
self.logger.info("Distractor {}".format(self.numDistractorsSeen))
if self.numDistractorsSeen >= self.numStimuli or self.numFixations >= self.maxFixations: # all stimuli were (probably) distractors, no target
self.respond('n')
self.targetFound = False
self.nextTrial()
#return True # ready for new trial (will happen anyways, might as well show foveal image)
#return False # end trial and stop this run
else:
self.visSys.setBuffer('intent', 'release') # let visual system inhibit and move on
else:
self.logger.warning("I don't know what that is; ignoring fixation")
self.visSys.setBuffer('intent', 'release') # let visual system inhibit and move on
# Visualize system operation
if self.context.options.gui:
self.visualize()
#if self.context.options.debug:
# for match in matches:
# cv2.imshow("Match ({})".format(match.matcher.name), match.result)
return True
def respond(self, key):
self.logger.info("Response: {}, time: {}, numFixations: {}, firstSaccadeLatency: {}, input: {}".format(key, self.context.timeNow, self.numFixations, self.firstSaccadeLatency, os.path.basename(self.context.options.input_source) if (self.context.isImage or self.context.isVideo) else 'live/rpc'))
if not self.rpcClient is None:
try:
self.rpcClient.call('Keyboard.keyPress', params={'symbol': key})
except Exception as e:
self.logger.error("Error sending response key (will not retry): {}".format(e))
self.rpcClient = None
def nextTrial(self):
self.trialCount += 1
self.trialStarted = None
self.numDistractorsSeen = 0
self.newFixation = False
self.processFixation = False
self.numFixations = 0
self.firstSaccadeLatency = None
self.visSys.setBuffer('intent', 'reset')
def visualize(self):
# Combine individual outputs into giant composite image
self.imageOut[0:self.visMan.imageSize[1], 0:self.visMan.imageSize[0]] = self.visMan.image # input
self.imageOut[0:self.visSys.imageSize[1], (self.imageOut.shape[1] - self.visSys.imageSize[0]):self.imageOut.shape[1]] = self.visSys.images['BGR'] # retina
self.imageOut[0:self.imageInFocus.shape[0], (self.imageOut.shape[1] - self.imageInFocus.shape[1]):self.imageOut.shape[1]] = cv2.cvtColor(self.imageInFocus, cv2.COLOR_GRAY2BGR) # foveal image, inset top-right
self.imageOut[(self.imageOut.shape[0] - self.visSys.imageSize[1]):self.imageOut.shape[0], 0:self.visSys.imageSize[0]].fill(0)
if self.context.options.debug:
#self.imageOut[(self.imageOut.shape[0] - self.visSys.imageSize[1]):self.imageOut.shape[0], 0:self.visSys.imageSize[0]] = cv2.cvtColor(self.visSys.imageSalienceOutCombined, cv2.COLOR_GRAY2BGR) # combined salience (neuron firings)
self.imageOut[(self.imageOut.shape[0] - self.visSys.imageSize[1]):self.imageOut.shape[0], 0:self.visSys.imageSize[0], 1] = cv2.convertScaleAbs(self.visSys.imageSalienceOutCombined, alpha=5, beta=0) # combined salience (neuron firings): Magenta = high
#self.imageOut[(self.imageOut.shape[0] - self.visSys.imageSize[1]):self.imageOut.shape[0], (self.imageOut.shape[1] - self.visSys.imageSize[0]):self.imageOut.shape[1]] = cv2.cvtColor(self.visSys.imageOut, cv2.COLOR_GRAY2BGR) # VisualSystem output salience and labels/marks
if self.isOutputInverted:
self.imageOut = 255 - self.imageOut
# Colored visualizations, post-inversion
self.imageOut[(self.imageOut.shape[0] - self.visSys.imageSize[1]):self.imageOut.shape[0], (self.imageOut.shape[1] - self.visSys.imageSize[0]):self.imageOut.shape[1], 0] = 255 - self.visSys.imageOut # VisualSystem output salience and labels/marks: Blue = low
self.imageOut[(self.imageOut.shape[0] - self.visSys.imageSize[1]):self.imageOut.shape[0], (self.imageOut.shape[1] - self.visSys.imageSize[0]):self.imageOut.shape[1], 1] = cv2.convertScaleAbs(self.visSys.imageOut, alpha=4, beta=0) # VisualSystem output salience and labels/marks: Green = high
self.imageOut[(self.imageOut.shape[0] - self.visSys.imageSize[1]):self.imageOut.shape[0], (self.imageOut.shape[1] - self.visSys.imageSize[0]):self.imageOut.shape[1], 2] = cv2.convertScaleAbs(self.visSys.imageOut, alpha=4, beta=0) # VisualSystem output salience and labels/marks: Red = high (combined with Green, Yellow = high)
#cv2.convertScaleAbs(self.imageOut, dst=self.imageOut, alpha=0, beta=255)
#imageOutGray = cv2.cvtColor(self.imageOut, cv2.COLOR_BGR2GRAY) # converting back to grayscale, very inefficient
#cv2.equalizeHist(imageOutGray, dst=imageOutGray)
# Draw frames, labels, marks and show
cv2.rectangle(self.imageOut, (0, 0), (self.imageOut.shape[1] - 1, self.imageOut.shape[0] - 1), (128, 128, 128), 2) # outer border
cv2.line(self.imageOut, (0, self.imageOut.shape[0] / 2), (self.imageOut.shape[1], self.imageOut.shape[0] / 2), (128, 128, 128), 2) # inner border, horizontal
cv2.line(self.imageOut, (self.imageOut.shape[1] / 2, 0), (self.imageOut.shape[1] / 2, self.imageOut.shape[0]), (128, 128, 128), 2) # inner border, vertical
cv2.rectangle(self.imageOut, (self.imageOut.shape[1] - self.imageInFocus.shape[1], 0), (self.imageOut.shape[1] - 1, self.imageInFocus.shape[0] - 1), (128, 128, 128), 2) # inset border, top-right
self.labelImage(self.imageOut, "Input", (20, self.imageOut.shape[0] / 2 - 20))
self.labelImage(self.imageOut, "Retina", (self.imageOut.shape[1] / 2 + 20, self.imageOut.shape[0] / 2 - 20))
self.labelImage(self.imageOut, "Focus", (self.imageOut.shape[1] - self.imageInFocus.shape[1] + 12, self.imageInFocus.shape[0] + 24), color=(128, 128, 128), bgColor=None)
self.labelImage(self.imageOut, "Neuron activity", (20, self.imageOut.shape[0] - 20))
self.labelImage(self.imageOut, "Output visualization", (self.imageOut.shape[1] / 2 + 20, self.imageOut.shape[0] - 20))
focusRectColor = (64, 64, 64) # gray, default
if self.targetFound is not None:
focusRectColor = (16, 128, 16) if self.targetFound else (16, 16, 128) # green if targetFound else red
self.labelImage(self.imageOut, "Y" if self.targetFound else "N", (self.imageOut.shape[1] / 2 + self.visSys.fixationSlice[1].start + 6, self.imageOut.shape[0] / 2 + self.visSys.fixationSlice[0].stop - 6), color=focusRectColor, bgColor=None)
cv2.rectangle(self.imageOut, (self.imageOut.shape[1] / 2 + self.visSys.fixationSlice[1].start, self.imageOut.shape[0] / 2 + self.visSys.fixationSlice[0].start), (self.imageOut.shape[1] / 2 + self.visSys.fixationSlice[1].stop, self.imageOut.shape[0] / 2 + self.visSys.fixationSlice[0].stop), focusRectColor, 2) # focus rect in output image
cv2.imshow(self.winName, self.imageOut)
#cv2.imshow(self.winName, imageOutGray)
def labelImage(self, img, text, org, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.8, color=(200, 200, 200), thickness=2, bgColor=(32, 32, 32)):
"""Wrapper around cv2.putText to add a background/box."""
# TODO: Make this a util function
if bgColor is not None:
textSize, baseline = cv2.getTextSize(text, fontFace, fontScale, thickness)
cv2.rectangle(img, (org[0] - baseline, org[1] + baseline + 2), (org[0] + textSize[0] + baseline, org[1] - textSize[1] - baseline), bgColor, -1)
cv2.putText(img, text, org, fontFace, fontScale, color, thickness)
PatternMatch = namedtuple('PatternMatch', ['value', 'location', 'result', 'matcher'])
class PatternMatcher(object):
"""Helper class to perform simple pattern matching - a higher-level visual function not modeled in the framework."""
default_method = cv2.TM_SQDIFF
def __init__(self, name, pattern_file, flags=1, method=default_method):
"""Load a pattern from file and specify a method for matching. Param flags is directly passed on to cv2.imread(): 1 = auto, 0 = grayscale."""
self.name = name
self.pattern_file = pattern_file
self.method = method
self.pattern = cv2.imread(self.pattern_file, flags) # flags=0 for grayscale
#self.pattern = cv2.blur(self.pattern, (3, 3)) # not good for precise stimuli discrimination, like between O and Q-like
#cv2.imshow("Pattern ({})".format(self.name), self.pattern) ## [debug]
def match(self, image):
"""Match pattern with image, return a 3-tuple: (<uint8 result map>, <best match value>, <best match location>)."""
result = cv2.matchTemplate(image, self.pattern, self.method)
result /= (result.shape[1] * result.shape[0] * 255.0 * 255.0) # normalize result, dividing by sum of max possible differences
#result = np.abs(result)
#val, result = cv2.threshold(result, 0.01, 0, cv2.THRESH_TOZERO)
minMatch, maxMatch, minMatchLoc, maxMatchLoc = cv2.minMaxLoc(result)
#result_uint8 = cv2.normalize(result, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U) # normalize (issue is variable scale)
result_uint8 = np.uint8(result * 255.0) # scale, for display (better avoid and return None if no GUI)
#return result_uint8, minMatch, maxMatch, minMatchLoc, maxMatchLoc # too many returns, generalize to *best* match value and loc
if self.method == cv2.TM_SQDIFF or self.method == cv2.TM_SQDIFF_NORMED:
return PatternMatch(value=minMatch, location=minMatchLoc, result=result_uint8, matcher=self)
else: # TM_CCORR or TM_CCOEFF
return PatternMatch(value=maxMatch, location=maxMatchLoc, result=result_uint8, matcher=self)
if __name__ == "__main__":
argParser = argparse.ArgumentParser(add_help=False)
argParser.add_argument('--zelinsky', action='store_true', help="run a Zelinsky search agent")
argParser.add_argument('--target', default='Q', choices=('Q', 'O'), help='target symbol (Q or O)')
argParser.add_argument('--size', dest='num_stimuli', type=int, default=5, help='display size (no. of stimuli) to expect')
argParser.add_argument('--features', type=str, default=None, help="features to look for, comma separated") # duplicated for VisualSearchAgent (TODO: Find a better way to unify args, parsers)
context = Context.createInstance(description="Zelinsky search agent", parent_argparsers=[argParser])
if context.options.zelinsky:
if context.options.features is None:
context.options.features = 'OFF:1.0' # Zelinsky-specific
ZelinksyFinder(target=context.options.target, distractors=('O' if context.options.target == 'Q' else 'Q'), numStimuli=context.options.num_stimuli).run()
else:
VisualSearchAgent().run()
# Some example invocations
#ZelinksyFinder(target='Q', distractors=['O'], numStimuli= 5).run() # target: 'Q', distractor: 'O'; size: 5 [default]
#ZelinksyFinder(target='Q', distractors=['O'], numStimuli=17).run() # target: 'Q', distractor: 'O'; size: 17
#ZelinksyFinder(target='O', distractors=['Q'], numStimuli= 5).run() # target: 'O', distractor: 'Q'; size: 5
#ZelinksyFinder(target='O', distractors=['Q'], numStimuli=17).run() # target: 'O', distractor: 'Q'; size: 17
| 2.578125 | 3 |
scripts/add_segment_marker.py | DenDen047/data2text-macro-plan-py | 20 | 12761174 | import argparse
import nltk
def process(input_file_name, output_file_name):
input_file = open(input_file_name, mode='r', encoding='utf-8')
output_file = open(output_file_name, mode='w', encoding='utf-8')
for line in input_file:
sentences = nltk.sent_tokenize(line)
output_line = " <segment> ".join(sentences)
output_file.write(output_line)
output_file.write("\n")
output_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Add <segment> markers between each sentence')
parser.add_argument('-input_file', type=str,
help='path of input file', default=None)
parser.add_argument('-output_file', type=str,
help='path of output file', default=None)
args = parser.parse_args()
process(args.input_file, args.output_file)
| 3.375 | 3 |
doc/Programs/ConjugateGradient/python/qdotBroyden.py | GabrielSCabrera/ComputationalPhysics2 | 87 | 12761175 | # 2-electron VMC code for 2dim quantum dot with importance sampling
# Using gaussian rng for new positions and Metropolis- Hastings
# Added energy minimization
from math import exp, sqrt
from random import random, seed, normalvariate
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from scipy.optimize import minimize
import sys
# Trial wave function for the 2-electron quantum dot in two dims
def WaveFunction(r,alpha,beta):
r1 = r[0,0]**2 + r[0,1]**2
r2 = r[1,0]**2 + r[1,1]**2
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = r12/(1+beta*r12)
return exp(-0.5*alpha*(r1+r2)+deno)
# Local energy for the 2-electron quantum dot in two dims, using analytical local energy
def LocalEnergy(r,alpha,beta):
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
return 0.5*(1-alpha*alpha)*(r1 + r2) +2.0*alpha + 1.0/r12+deno2*(alpha*r12-deno2+2*beta*deno-1.0/r12)
# Derivate of wave function ansatz as function of variational parameters
def DerivativeWFansatz(r,alpha,beta):
WfDer = np.zeros((2), np.double)
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
WfDer[0] = -0.5*(r1+r2)
WfDer[1] = -r12*r12*deno2
return WfDer
# Setting up the quantum force for the two-electron quantum dot, recall that it is a vector
def QuantumForce(r,alpha,beta):
qforce = np.zeros((NumberParticles,Dimension), np.double)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
qforce[0,:] = -2*r[0,:]*alpha*(r[0,:]-r[1,:])*deno*deno/r12
qforce[1,:] = -2*r[1,:]*alpha*(r[1,:]-r[0,:])*deno*deno/r12
return qforce
# Computing the derivative of the energy and the energy
def EnergyDerivative(x0):
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
energy = 0.0
DeltaE = 0.0
alpha = x0[0]
beta = x0[1]
EnergyDer = 0.0
DeltaPsi = 0.0
DerivativePsiE = 0.0
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha,beta)
QuantumForceOld = QuantumForce(PositionOld,alpha, beta)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\
QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha,beta)
QuantumForceNew = QuantumForce(PositionNew,alpha, beta)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\
(D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\
PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha,beta)
DerPsi = DerivativeWFansatz(PositionOld,alpha,beta)
DeltaPsi += DerPsi
energy += DeltaE
DerivativePsiE += DerPsi*DeltaE
# We calculate mean values
energy /= NumberMCcycles
DerivativePsiE /= NumberMCcycles
DeltaPsi /= NumberMCcycles
EnergyDer = 2*(DerivativePsiE-DeltaPsi*energy)
return EnergyDer
# Computing the expectation value of the local energy
def Energy(x0):
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
energy = 0.0
DeltaE = 0.0
alpha = x0[0]
beta = x0[1]
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha,beta)
QuantumForceOld = QuantumForce(PositionOld,alpha, beta)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\
QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha,beta)
QuantumForceNew = QuantumForce(PositionNew,alpha, beta)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\
(D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\
PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha,beta)
energy += DeltaE
if Printout:
outfile.write('%f\n' %(energy/(MCcycle+1.0)))
# We calculate mean values
energy /= NumberMCcycles
return energy
#Here starts the main program with variable declarations
NumberParticles = 2
Dimension = 2
# seed for rng generator
seed()
# Monte Carlo cycles for parameter optimization
Printout = False
NumberMCcycles= 10000
# guess for variational parameters
x0 = np.array([0.9,0.2])
# Using Broydens method to find optimal parameters
res = minimize(Energy, x0, method='BFGS', jac=EnergyDerivative, options={'gtol': 1e-4,'disp': True})
x0 = res.x
print(x0)
# Compute the energy again with the optimal parameters and increased number of Monte Cycles
NumberMCcycles= 100000
Printout = True
outfile = open("Energies.dat",'w')
print(Energy(x0))
outfile.close()
| 2.78125 | 3 |
codeforces/math数学/900/1208A异或序列.py | yofn/pyacm | 0 | 12761176 | #!/usr/bin/env python3
# https://codeforces.com/problemset/problem/1208/A
# xor性质~~
def f(l):
a,b,n = l
l[2] = a^b
return l[n%3]
t = int(input())
for _ in range(t):
l = list(map(int,input().split()))
print(f(l))
| 3.578125 | 4 |
trainNN/train_sc.py | yztxwd/Bichrom | 3 | 12761177 | import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import average_precision_score as auprc
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, concatenate, Input, LSTM
from tensorflow.keras.layers import Conv1D, Reshape, Lambda
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.callbacks import ModelCheckpoint
import tensorflow.keras.backend as K
from iterutils import train_generator
def data_generator(path, batchsize, seqlen, bin_size):
dat_seq = train_generator(path['seq'], batchsize, seqlen, 'seq', 'repeat')
dat_chromatin = []
for chromatin_track in path['chromatin_tracks']:
dat_chromatin.append(
train_generator(chromatin_track, batchsize, seqlen, 'chrom', 'repeat'))
y = train_generator(path['labels'], batchsize, seqlen, 'labels', 'repeat')
while True:
combined_chrom_data = []
for chromatin_track_generators in dat_chromatin:
curr_chromatin_mark = next(chromatin_track_generators)
mark_resolution = curr_chromatin_mark.shape
assert (mark_resolution == (batchsize, seqlen/bin_size)),\
"Please check binning, specified bin size=50"
combined_chrom_data.append(pd.DataFrame(curr_chromatin_mark))
chromatin_features = pd.concat(combined_chrom_data, axis=1).values
print(chromatin_features.shape)
sequence_features = next(dat_seq)
labels = next(y)
yield [sequence_features, chromatin_features], labels
def add_new_layers(base_model, seq_len, no_of_chromatin_tracks, bin_size):
"""
Takes a pre-existing M-SEQ (Definition in README) & adds structure to \
use it as part of a bimodal DNA sequence + prior chromatin network
Parameters:
base_model (keras Model): A pre-trained sequence-only (M-SEQ) model
chrom_size (int) : The expected number of chromatin tracks
Returns:
model: a Keras Model
"""
def permute(x):
return K.permute_dimensions(x, (0, 2, 1))
# Transfer from a pre-trained M-SEQ
curr_layer = base_model.get_layer(name='dense_2')
curr_tensor = curr_layer.output
xs = Dense(1, name='MSEQ-dense-new', activation='tanh')(curr_tensor)
# Defining a M-C sub-network
chrom_input = Input(shape=(no_of_chromatin_tracks * int(seq_len/bin_size),), name='chrom_input')
ci = Reshape((no_of_chromatin_tracks, int(seq_len/bin_size)),
input_shape=(no_of_chromatin_tracks * int(seq_len/bin_size),))(chrom_input)
# Permuting the input dimensions to match Keras input requirements:
permute_func = Lambda(permute)
ci = permute_func(ci)
xc = Conv1D(15, 1, padding='valid', activation='relu', name='MC-conv1d')(ci)
xc = LSTM(5, activation='relu', name='MC-lstm')(xc)
xc = Dense(1, activation='tanh', name='MC-dense')(xc)
# Concatenating sequence (MSEQ) and chromatin (MC) networks:
merged_layer = concatenate([xs, xc])
result = Dense(1, activation='sigmoid', name='MSC-dense')(merged_layer)
model = Model(inputs=[base_model.input, chrom_input], outputs=result)
return model
class PrecisionRecall(Callback):
def __init__(self, val_data):
super().__init__()
self.validation_data = val_data
def on_train_begin(self, logs=None):
self.val_auprc = []
self.train_auprc = []
def on_epoch_end(self, epoch, logs=None):
(x_val, c_val), y_val = self.validation_data
predictions = self.model.predict([x_val, c_val])
aupr = auprc(y_val, predictions)
self.val_auprc.append(aupr)
def save_metrics(hist_object, pr_history, records_path):
loss = hist_object.history['loss']
val_loss = hist_object.history['val_loss']
val_pr = pr_history.val_auprc
# Saving the training metrics
np.savetxt(records_path + 'trainingLoss.txt', loss, fmt='%1.2f')
np.savetxt(records_path + 'valLoss.txt', val_loss, fmt='%1.2f')
np.savetxt(records_path + 'valPRC.txt', val_pr, fmt='%1.2f')
return loss, val_pr
def transfer(train_path, val_path, basemodel, model, steps_per_epoch,
batchsize, records_path, bin_size, seq_len):
"""
Trains the M-SC, transferring weights from the pre-trained M-SEQ.
The M-SEQ weights are kept fixed except for the final layer.
Parameters:
train_path (str): Path + prefix to training data
val_path (str): Path + prefix to the validation data
basemodel (Model): Pre-trained keras M-SEQ model
model (Model): Defined bimodal network
steps_per_epoch (int): Len(training_data/batchsize)
batchsize (int): Batch size used in SGD
records_path (str): Path + prefix to output directory
Returns:
loss (ndarray): An array with the validation loss at each epoch
"""
# Making the base model layers non-trainable:
for layer in basemodel.layers:
layer.trainable = False
# Training rest of the model.
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=sgd)
# Get train and validation data
train_data_generator = data_generator(train_path, batchsize, seqlen=seq_len, bin_size=bin_size)
val_data_generator = data_generator(val_path, 200000, seqlen=seq_len, bin_size=bin_size)
validation_data = next(val_data_generator)
precision_recall_history = PrecisionRecall(validation_data)
checkpointer = ModelCheckpoint(records_path + 'model_epoch{epoch}.hdf5',
verbose=1, save_best_only=False)
hist = model.fit_generator(epochs=15, steps_per_epoch=steps_per_epoch,
generator=train_data_generator,
validation_data=validation_data,
callbacks=[precision_recall_history,
checkpointer])
loss, val_pr = save_metrics(hist_object=hist, pr_history=precision_recall_history,
records_path=records_path)
return loss, val_pr
def transfer_and_train_msc(train_path, val_path, basemodel,
batch_size, records_path, bin_size, seq_len):
# Calculate size of the training set:
training_set_size = len(np.loadtxt(train_path['labels']))
# Calculate the steps per epoch
steps_per_epoch = training_set_size / batch_size
# Calculate number of chromatin tracks
no_of_chrom_tracks = len(train_path['chromatin_tracks'])
model = add_new_layers(basemodel, seq_len, no_of_chrom_tracks, bin_size)
loss, val_pr = transfer(train_path, val_path, basemodel, model, steps_per_epoch,
batch_size, records_path, bin_size, seq_len)
return loss, val_pr
| 2.125 | 2 |
CellProfiler/cellprofiler/modules/measureobjectsizeshape.py | aidotse/Team-rahma.ai | 0 | 12761178 | <reponame>aidotse/Team-rahma.ai<gh_stars>0
import centrosome.cpmorphology
import centrosome.zernike
import numpy
import scipy.ndimage
import skimage.measure
from cellprofiler_core.constants.measurement import COLTYPE_FLOAT
from cellprofiler_core.module import Module
from cellprofiler_core.object import Objects
from cellprofiler_core.setting import Divider, Binary, ValidationError
from cellprofiler_core.setting.subscriber import LabelListSubscriber
import cellprofiler.gui.help.content
import cellprofiler.icons
__doc__ = """\
MeasureObjectSizeShape
======================
**MeasureObjectSizeShape** measures several area and shape features
of identified objects.
Given an image with identified objects (e.g., nuclei or cells), this
module extracts area and shape features of each one. Note that these
features are only reliable for objects that are completely inside the
image borders, so you may wish to exclude objects touching the edge of
the image using **Identify** settings for 2D objects, or by applying
**FilterObjects** downstream.
The display window for this module shows per-image
aggregates for the per-object measurements. If you want to view the
per-object measurements themselves, you will need to use an
**Export** module to export them, or use **DisplayDataOnImage** to
display the object measurements of choice overlaid on an image of
choice.
|
============ ============ ===============
Supports 2D? Supports 3D? Respects masks?
============ ============ ===============
YES YES NO
============ ============ ===============
See also
^^^^^^^^
See also **MeasureImageAreaOccupied**.
Measurements made by this module
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Some measurements are available for 3D and 2D objects, while some are 2D
only.
See the *Technical Notes* below for an explanation of a key step
underlying many of the following metrics: creating an
ellipse with the same second-moments as each object.
- *Area:* *(2D only)* The number of pixels in the region.
- *Volume:* *(3D only)* The number of voxels in the region.
- *Perimeter:* *(2D only)* The total number of pixels around the boundary of each
region in the image.
- *SurfaceArea:* *(3D only)* The total number of voxels around the boundary of
each region in the image.
- *FormFactor:* *(2D only)* Calculated as 4\*π\*Area/Perimeter\ :sup:`2`. Equals 1
for a perfectly circular object.
- *Solidity:* The proportion of the pixels in the convex hull that are
also in the object, i.e., *ObjectArea/ConvexHullArea*.
- *Extent:* The proportion of the pixels (2D) or voxels (3D) in the bounding box
that are also in the region. Computed as the area/volume of the object divided
by the area/volume of the bounding box.
- *EulerNumber:* The number of objects in the region minus the number
of holes in those objects, assuming 8-connectivity.
- *Center\_X, Center\_Y, Center\_Z:* The *x*-, *y*-, and (for 3D objects) *z-*
coordinates of the point farthest away from any object edge (the *centroid*).
Note that this is not the same as the *Location-X* and *-Y* measurements
produced by the **Identify** or **Watershed**
modules or the *Location-Z* measurement produced by the **Watershed** module.
- *BoundingBoxMinimum/Maximum\_X/Y/Z:* The minimum/maximum *x*-, *y*-, and (for 3D objects)
*z-* coordinates of the object.
- *BoundingBoxArea:* *(2D only)* The area of a box containing the object.
- *BoundingBoxVolume:* *(3D only)* The volume of a box containing the object.
- *Eccentricity:* *(2D only)* The eccentricity of the ellipse that has the same
second-moments as the region. The eccentricity is the ratio of the
distance between the foci of the ellipse and its major axis length.
The value is between 0 and 1. (0 and 1 are degenerate cases; an
ellipse whose eccentricity is 0 is actually a circle, while an
ellipse whose eccentricity is 1 is a line segment.)
|MOSS_image0|
- *MajorAxisLength:* The length (in pixels) of the major axis of the
ellipse that has the same normalized second central moments as the
region.
- *MinorAxisLength:* The length (in pixels) of the minor axis of the
ellipse that has the same normalized second central moments as the
region.
- *EquivalentDiameter:* The diameter of a circle or sphere with the same area
as the object.
- *Orientation:* *(2D only)* The angle (in degrees ranging from -90 to 90 degrees)
between the x-axis and the major axis of the ellipse that has the
same second-moments as the region.
- *Compactness:* *(2D only)* The mean squared distance of the object’s pixels from
the centroid divided by the area. A filled circle will have a
compactness of 1, with irregular objects or objects with holes having
a value greater than 1.
- *MaximumRadius:* *(2D only)* The maximum distance of any pixel in the object to
the closest pixel outside of the object. For skinny objects, this is
1/2 of the maximum width of the object.
- *MedianRadius:* *(2D only)* The median distance of any pixel in the object to the
closest pixel outside of the object.
- *MeanRadius:* *(2D only)* The mean distance of any pixel in the object to the
closest pixel outside of the object.
- *MinFeretDiameter, MaxFeretDiameter:* *(2D only)* The Feret diameter is the
distance between two parallel lines tangent on either side of the
object (imagine taking a caliper and measuring the object at various
angles). The minimum and maximum Feret diameters are the smallest and
largest possible diameters, rotating the calipers along all possible
angles.
- *Zernike shape features:* *(2D only)* These metrics of shape describe a binary object
(or more precisely, a patch with background and an object in the
center) in a basis of Zernike polynomials, using the coefficients as
features (*Boland et al., 1998*). Currently, Zernike polynomials from
order 0 to order 9 are calculated, giving in total 30 measurements.
While there is no limit to the order which can be calculated (and
indeed you could add more by adjusting the code), the higher order
polynomials carry less information.
- *Spatial Moment features:* *(2D only)* A series of weighted averages
representing the shape, size, rotation and location of the object.
- *Central Moment features:* *(2D only)* Similar to spatial moments, but
normalized to the object's centroid. These are therefore not influenced
by an object's location within an image.
- *Normalized Moment features:* *(2D only)* Similar to central moments,
but further normalized to be scale invariant. These moments are therefore
not impacted by an object's size (or location).
- *Hu Moment features:* *(2D only)* Hu's set of image moment features. These
are not altered by the object's location, size or rotation. This means that
they primarily describe the shape of the object.
- *Inertia Tensor features:* *(2D only)* A representation of rotational
inertia of the object relative to it's center.
- *Inertia Tensor Eigenvalues features:* *(2D only)* Values describing
the movement of the Inertia Tensor array.
Technical notes
^^^^^^^^^^^^^^^
A number of the object measurements are generated by creating an ellipse
with the same second-moments as the original object region. This is
essentially the best-fitting ellipse for a given object with the same
statistical properties. Furthermore, they are not affected by the
translation or uniform scaling of a region.
Following computer vision conventions, the origin of the X and Y axes is at the top
left of the image rather than the bottom left; the orientation of objects whose topmost point
is on their right (or are rotated counter-clockwise from the horizontal) will therefore
have a negative orientation, while objects whose topmost point is on their left
(or are rotated clockwise from the horizontal) will have a positive orientation.
The Zernike features are computed within the minimum enclosing circle of
the object, i.e., the circle of the smallest diameter that contains all
of the object’s pixels.
References
^^^^^^^^^^
- <NAME>, <NAME>, Carvalho PCP, “Image moments-based structuring and
tracking of objects”, Proceedings from XV Brazilian Symposium on
Computer Graphics and Image Processing, 2002. `(pdf)`_
- Principles of Digital Image Processing: Core Algorithms
(Undergraduate Topics in Computer Science): `Section 2.4.3 -
Statistical shape properties`_
- <NAME> (1885), “On the problem to construct the minimum circle
enclosing n given points in a plane”, *Proceedings of the Edinburgh
Mathematical Society*, vol 3, p. 30
- <NAME> (1962), “Visual pattern recognition by moment invariants”, *IRE
transactions on information theory*, 8(2), pp.179-187 `(link)`_
.. _(pdf): http://sibgrapi.sid.inpe.br/col/sid.inpe.br/banon/2002/10.23.11.34/doc/35.pdf
.. _Section 2.4.3 - Statistical shape properties: http://www.scribd.com/doc/58004056/Principles-of-Digital-Image-Processing#page=49
.. _(link): https://ieeexplore.ieee.org/abstract/document/1057692
.. |MOSS_image0| image:: {ECCENTRICITY_ICON}
""".format(
**{
"ECCENTRICITY_ICON": cellprofiler.gui.help.content.image_resource(
"MeasureObjectSizeShape_Eccentricity.png"
)
}
)
"""The category of the per-object measurements made by this module"""
AREA_SHAPE = "AreaShape"
"""Calculate Zernike features for N,M where N=0 through ZERNIKE_N"""
ZERNIKE_N = 9
F_AREA = "Area"
F_PERIMETER = "Perimeter"
F_VOLUME = "Volume"
F_SURFACE_AREA = "SurfaceArea"
F_ECCENTRICITY = "Eccentricity"
F_SOLIDITY = "Solidity"
F_EXTENT = "Extent"
F_CENTER_X = "Center_X"
F_CENTER_Y = "Center_Y"
F_CENTER_Z = "Center_Z"
F_BBOX_AREA = "BoundingBoxArea"
F_BBOX_VOLUME = "BoundingBoxVolume"
F_MIN_X = "BoundingBoxMinimum_X"
F_MAX_X = "BoundingBoxMaximum_X"
F_MIN_Y = "BoundingBoxMinimum_Y"
F_MAX_Y = "BoundingBoxMaximum_Y"
F_MIN_Z = "BoundingBoxMinimum_Z"
F_MAX_Z = "BoundingBoxMaximum_Z"
F_EULER_NUMBER = "EulerNumber"
F_FORM_FACTOR = "FormFactor"
F_MAJOR_AXIS_LENGTH = "MajorAxisLength"
F_MINOR_AXIS_LENGTH = "MinorAxisLength"
F_ORIENTATION = "Orientation"
F_COMPACTNESS = "Compactness"
F_INERTIA = "InertiaTensor"
F_MAXIMUM_RADIUS = "MaximumRadius"
F_MEDIAN_RADIUS = "MedianRadius"
F_MEAN_RADIUS = "MeanRadius"
F_MIN_FERET_DIAMETER = "MinFeretDiameter"
F_MAX_FERET_DIAMETER = "MaxFeretDiameter"
F_CENTRAL_MOMENT_0_0 = "CentralMoment_0_0"
F_CENTRAL_MOMENT_0_1 = "CentralMoment_0_1"
F_CENTRAL_MOMENT_0_2 = "CentralMoment_0_2"
F_CENTRAL_MOMENT_0_3 = "CentralMoment_0_3"
F_CENTRAL_MOMENT_1_0 = "CentralMoment_1_0"
F_CENTRAL_MOMENT_1_1 = "CentralMoment_1_1"
F_CENTRAL_MOMENT_1_2 = "CentralMoment_1_2"
F_CENTRAL_MOMENT_1_3 = "CentralMoment_1_3"
F_CENTRAL_MOMENT_2_0 = "CentralMoment_2_0"
F_CENTRAL_MOMENT_2_1 = "CentralMoment_2_1"
F_CENTRAL_MOMENT_2_2 = "CentralMoment_2_2"
F_CENTRAL_MOMENT_2_3 = "CentralMoment_2_3"
F_EQUIVALENT_DIAMETER = "EquivalentDiameter"
F_HU_MOMENT_0 = "HuMoment_0"
F_HU_MOMENT_1 = "HuMoment_1"
F_HU_MOMENT_2 = "HuMoment_2"
F_HU_MOMENT_3 = "HuMoment_3"
F_HU_MOMENT_4 = "HuMoment_4"
F_HU_MOMENT_5 = "HuMoment_5"
F_HU_MOMENT_6 = "HuMoment_6"
F_INERTIA_TENSOR_0_0 = "InertiaTensor_0_0"
F_INERTIA_TENSOR_0_1 = "InertiaTensor_0_1"
F_INERTIA_TENSOR_1_0 = "InertiaTensor_1_0"
F_INERTIA_TENSOR_1_1 = "InertiaTensor_1_1"
F_INERTIA_TENSOR_EIGENVALUES_0 = "InertiaTensorEigenvalues_0"
F_INERTIA_TENSOR_EIGENVALUES_1 = "InertiaTensorEigenvalues_1"
F_NORMALIZED_MOMENT_0_0 = "NormalizedMoment_0_0"
F_NORMALIZED_MOMENT_0_1 = "NormalizedMoment_0_1"
F_NORMALIZED_MOMENT_0_2 = "NormalizedMoment_0_2"
F_NORMALIZED_MOMENT_0_3 = "NormalizedMoment_0_3"
F_NORMALIZED_MOMENT_1_0 = "NormalizedMoment_1_0"
F_NORMALIZED_MOMENT_1_1 = "NormalizedMoment_1_1"
F_NORMALIZED_MOMENT_1_2 = "NormalizedMoment_1_2"
F_NORMALIZED_MOMENT_1_3 = "NormalizedMoment_1_3"
F_NORMALIZED_MOMENT_2_0 = "NormalizedMoment_2_0"
F_NORMALIZED_MOMENT_2_1 = "NormalizedMoment_2_1"
F_NORMALIZED_MOMENT_2_2 = "NormalizedMoment_2_2"
F_NORMALIZED_MOMENT_2_3 = "NormalizedMoment_2_3"
F_NORMALIZED_MOMENT_3_0 = "NormalizedMoment_3_0"
F_NORMALIZED_MOMENT_3_1 = "NormalizedMoment_3_1"
F_NORMALIZED_MOMENT_3_2 = "NormalizedMoment_3_2"
F_NORMALIZED_MOMENT_3_3 = "NormalizedMoment_3_3"
F_SPATIAL_MOMENT_0_0 = "SpatialMoment_0_0"
F_SPATIAL_MOMENT_0_1 = "SpatialMoment_0_1"
F_SPATIAL_MOMENT_0_2 = "SpatialMoment_0_2"
F_SPATIAL_MOMENT_0_3 = "SpatialMoment_0_3"
F_SPATIAL_MOMENT_1_0 = "SpatialMoment_1_0"
F_SPATIAL_MOMENT_1_1 = "SpatialMoment_1_1"
F_SPATIAL_MOMENT_1_2 = "SpatialMoment_1_2"
F_SPATIAL_MOMENT_1_3 = "SpatialMoment_1_3"
F_SPATIAL_MOMENT_2_0 = "SpatialMoment_2_0"
F_SPATIAL_MOMENT_2_1 = "SpatialMoment_2_1"
F_SPATIAL_MOMENT_2_2 = "SpatialMoment_2_2"
F_SPATIAL_MOMENT_2_3 = "SpatialMoment_2_3"
"""The non-Zernike features"""
F_STD_2D = [
F_AREA,
F_PERIMETER,
F_MAXIMUM_RADIUS,
F_MEAN_RADIUS,
F_MEDIAN_RADIUS,
F_MIN_FERET_DIAMETER,
F_MAX_FERET_DIAMETER,
F_ORIENTATION,
F_ECCENTRICITY,
F_FORM_FACTOR,
F_SOLIDITY,
F_COMPACTNESS,
F_BBOX_AREA,
]
F_STD_3D = [
F_VOLUME,
F_SURFACE_AREA,
F_CENTER_Z,
F_BBOX_VOLUME,
F_MIN_Z,
F_MAX_Z,
]
F_ADV_2D = [
F_SPATIAL_MOMENT_0_0,
F_SPATIAL_MOMENT_0_1,
F_SPATIAL_MOMENT_0_2,
F_SPATIAL_MOMENT_0_3,
F_SPATIAL_MOMENT_1_0,
F_SPATIAL_MOMENT_1_1,
F_SPATIAL_MOMENT_1_2,
F_SPATIAL_MOMENT_1_3,
F_SPATIAL_MOMENT_2_0,
F_SPATIAL_MOMENT_2_1,
F_SPATIAL_MOMENT_2_2,
F_SPATIAL_MOMENT_2_3,
F_CENTRAL_MOMENT_0_0,
F_CENTRAL_MOMENT_0_1,
F_CENTRAL_MOMENT_0_2,
F_CENTRAL_MOMENT_0_3,
F_CENTRAL_MOMENT_1_0,
F_CENTRAL_MOMENT_1_1,
F_CENTRAL_MOMENT_1_2,
F_CENTRAL_MOMENT_1_3,
F_CENTRAL_MOMENT_2_0,
F_CENTRAL_MOMENT_2_1,
F_CENTRAL_MOMENT_2_2,
F_CENTRAL_MOMENT_2_3,
F_NORMALIZED_MOMENT_0_0,
F_NORMALIZED_MOMENT_0_1,
F_NORMALIZED_MOMENT_0_2,
F_NORMALIZED_MOMENT_0_3,
F_NORMALIZED_MOMENT_1_0,
F_NORMALIZED_MOMENT_1_1,
F_NORMALIZED_MOMENT_1_2,
F_NORMALIZED_MOMENT_1_3,
F_NORMALIZED_MOMENT_2_0,
F_NORMALIZED_MOMENT_2_1,
F_NORMALIZED_MOMENT_2_2,
F_NORMALIZED_MOMENT_2_3,
F_NORMALIZED_MOMENT_3_0,
F_NORMALIZED_MOMENT_3_1,
F_NORMALIZED_MOMENT_3_2,
F_NORMALIZED_MOMENT_3_3,
F_HU_MOMENT_0,
F_HU_MOMENT_1,
F_HU_MOMENT_2,
F_HU_MOMENT_3,
F_HU_MOMENT_4,
F_HU_MOMENT_5,
F_HU_MOMENT_6,
F_INERTIA_TENSOR_0_0,
F_INERTIA_TENSOR_0_1,
F_INERTIA_TENSOR_1_0,
F_INERTIA_TENSOR_1_1,
F_INERTIA_TENSOR_EIGENVALUES_0,
F_INERTIA_TENSOR_EIGENVALUES_1,
]
F_ADV_3D = [F_SOLIDITY]
F_STANDARD = [
F_EXTENT,
F_EULER_NUMBER,
F_EQUIVALENT_DIAMETER,
F_MAJOR_AXIS_LENGTH,
F_MINOR_AXIS_LENGTH,
F_CENTER_X,
F_CENTER_Y,
F_MIN_X,
F_MIN_Y,
F_MAX_X,
F_MAX_Y,
]
class MeasureObjectSizeShape(Module):
module_name = "MeasureObjectSizeShape"
variable_revision_number = 3
category = "Measurement"
def create_settings(self):
"""Create the settings for the module at startup and set the module name
The module allows for an unlimited number of measured objects, each
of which has an entry in self.object_groups.
"""
self.objects_list = LabelListSubscriber(
"Select object sets to measure",
[],
doc="""Select the object sets whose size and shape you want to measure.""",
)
self.spacer = Divider(line=True)
self.calculate_advanced = Binary(
text="Calculate the advanced features?",
value=False,
doc="""\
Select *{YES}* to calculate additional statistics for object moments
and intertia tensors in **2D mode**. These features should not require much additional time
to calculate, but do add many additional columns to the resulting output
files.
In **3D mode** this setting enables the Solidity measurement, which can be time-consuming
to calculate.""".format(
**{"YES": "Yes"}
),
)
self.calculate_zernikes = Binary(
text="Calculate the Zernike features?",
value=True,
doc="""\
Select *{YES}* to calculate the Zernike shape features. Because the
first 10 Zernike polynomials (from order 0 to order 9) are calculated,
this operation can be time consuming if the image contains a lot of
objects. Select *{NO}* if you are measuring 3D objects with this
module.""".format(
**{"YES": "Yes", "NO": "No"}
),
)
def settings(self):
"""The settings as they appear in the save file"""
result = [self.objects_list, self.calculate_zernikes, self.calculate_advanced]
return result
def visible_settings(self):
"""The settings as they appear in the module viewer"""
result = [
self.objects_list,
self.spacer,
self.calculate_zernikes,
self.calculate_advanced,
]
return result
def validate_module(self, pipeline):
"""Make sure chosen objects are selected only once"""
objects = set()
if len(self.objects_list.value) == 0:
raise ValidationError("No object sets selected", self.objects_list)
for object_name in self.objects_list.value:
if object_name in objects:
raise ValidationError(
"%s has already been selected" % object_name, object_name
)
objects.add(object_name)
def get_categories(self, pipeline, object_name):
"""Get the categories of measurements supplied for the given object name
pipeline - pipeline being run
object_name - name of labels in question (or 'Images')
returns a list of category names
"""
for object_set in self.objects_list.value:
if object_set == object_name:
return [AREA_SHAPE]
else:
return []
def get_zernike_numbers(self):
"""The Zernike numbers measured by this module"""
if self.calculate_zernikes.value:
return centrosome.zernike.get_zernike_indexes(ZERNIKE_N + 1)
else:
return []
def get_zernike_name(self, zernike_index):
"""Return the name of a Zernike feature, given a (N,M) 2-tuple
zernike_index - a 2 element sequence organized as N,M
"""
return "Zernike_%d_%d" % (zernike_index[0], zernike_index[1])
def get_feature_names(self, pipeline):
"""Return the names of the features measured"""
feature_names = list(F_STANDARD)
if pipeline.volumetric():
feature_names += list(F_STD_3D)
if self.calculate_advanced.value:
feature_names += list(F_ADV_3D)
else:
feature_names += list(F_STD_2D)
if self.calculate_zernikes.value:
feature_names += [
self.get_zernike_name(index) for index in self.get_zernike_numbers()
]
if self.calculate_advanced.value:
feature_names += list(F_ADV_2D)
return feature_names
def get_measurements(self, pipeline, object_name, category):
"""Return the measurements that this module produces
object_name - return measurements made on this object
(or 'Image' for image measurements)
category - return measurements made in this category
"""
if category == AREA_SHAPE and self.get_categories(pipeline, object_name):
return self.get_feature_names(pipeline)
return []
def run(self, workspace):
"""Run, computing the area measurements for the objects"""
if self.show_window:
workspace.display_data.col_labels = (
"Object",
"Feature",
"Mean",
"Median",
"STD",
)
workspace.display_data.statistics = []
for object_name in self.objects_list.value:
self.run_on_objects(object_name, workspace)
def run_on_objects(self, object_name, workspace):
"""Determine desired measurements and pass in object arrays for analysis"""
objects = workspace.get_objects(object_name)
# Don't analyze if there are no objects at all.
if len(objects.indices) == 0:
# No objects to process
self.measurements_without_objects(workspace, object_name)
return
# Determine which properties we're measuring.
if len(objects.shape) == 2:
desired_properties = [
"label",
"image",
"area",
"perimeter",
"bbox",
"bbox_area",
"major_axis_length",
"minor_axis_length",
"orientation",
"centroid",
"equivalent_diameter",
"extent",
"eccentricity",
"solidity",
"euler_number",
]
if self.calculate_advanced.value:
desired_properties += [
"inertia_tensor",
"inertia_tensor_eigvals",
"moments",
"moments_central",
"moments_hu",
"moments_normalized",
]
else:
desired_properties = [
"label",
"image",
"area",
"centroid",
"bbox",
"bbox_area",
"major_axis_length",
"minor_axis_length",
"extent",
"equivalent_diameter",
"euler_number",
]
if self.calculate_advanced.value:
desired_properties += [
"solidity",
]
# Check for overlapping object sets
if not objects.overlapping():
features_to_record = self.analyze_objects(objects, desired_properties)
else:
# Objects are overlapping, process as single arrays
coords_array = objects.ijv
features_to_record = {}
for label in objects.indices:
omap = numpy.zeros(objects.shape)
ocoords = coords_array[coords_array[:, 2] == label, 0:2]
numpy.put(omap, numpy.ravel_multi_index(ocoords.T, omap.shape), 1)
tempobject = Objects()
tempobject.segmented = omap
buffer = self.analyze_objects(tempobject, desired_properties)
for f, m in buffer.items():
if f in features_to_record:
features_to_record[f] = numpy.concatenate(
(features_to_record[f], m)
)
else:
features_to_record[f] = m
for f, m in features_to_record.items():
self.record_measurement(workspace, object_name, f, m)
def analyze_objects(self, objects, desired_properties):
"""Computing the measurements for a single map of objects"""
labels = objects.segmented
nobjects = len(objects.indices)
if len(objects.shape) == 2:
props = skimage.measure.regionprops_table(
labels, properties=desired_properties
)
formfactor = 4.0 * numpy.pi * props["area"] / props["perimeter"] ** 2
denom = [max(x, 1) for x in 4.0 * numpy.pi * props["area"]]
compactness = props["perimeter"] ** 2 / denom
max_radius = numpy.zeros(nobjects)
median_radius = numpy.zeros(nobjects)
mean_radius = numpy.zeros(nobjects)
min_feret_diameter = numpy.zeros(nobjects)
max_feret_diameter = numpy.zeros(nobjects)
zernike_numbers = self.get_zernike_numbers()
zf = {}
for n, m in zernike_numbers:
zf[(n, m)] = numpy.zeros(nobjects)
for index, mini_image in enumerate(props["image"]):
# Pad image to assist distance tranform
mini_image = numpy.pad(mini_image, 1)
distances = scipy.ndimage.distance_transform_edt(mini_image)
max_radius[index] = centrosome.cpmorphology.fixup_scipy_ndimage_result(
scipy.ndimage.maximum(distances, mini_image)
)
mean_radius[index] = centrosome.cpmorphology.fixup_scipy_ndimage_result(
scipy.ndimage.mean(distances, mini_image)
)
median_radius[index] = centrosome.cpmorphology.median_of_labels(
distances, mini_image.astype("int"), [1]
)
#
# Zernike features
#
if self.calculate_zernikes.value:
zf_l = centrosome.zernike.zernike(
zernike_numbers, labels, objects.indices
)
for (n, m), z in zip(zernike_numbers, zf_l.transpose()):
zf[(n, m)] = z
if nobjects > 0:
chulls, chull_counts = centrosome.cpmorphology.convex_hull_ijv(
objects.ijv, objects.indices
)
#
# Feret diameter
#
(
min_feret_diameter,
max_feret_diameter,
) = centrosome.cpmorphology.feret_diameter(
chulls, chull_counts, objects.indices
)
features_to_record = {
F_AREA: props["area"],
F_PERIMETER: props["perimeter"],
F_MAJOR_AXIS_LENGTH: props["major_axis_length"],
F_MINOR_AXIS_LENGTH: props["minor_axis_length"],
F_ECCENTRICITY: props["eccentricity"],
F_ORIENTATION: props["orientation"],
F_CENTER_X: props["centroid-1"],
F_CENTER_Y: props["centroid-0"],
F_BBOX_AREA: props["bbox_area"],
F_MIN_X: props["bbox-1"],
F_MAX_X: props["bbox-3"],
F_MIN_Y: props["bbox-0"],
F_MAX_Y: props["bbox-2"],
F_FORM_FACTOR: formfactor,
F_EXTENT: props["extent"],
F_SOLIDITY: props["solidity"],
F_COMPACTNESS: compactness,
F_EULER_NUMBER: props["euler_number"],
F_MAXIMUM_RADIUS: max_radius,
F_MEAN_RADIUS: mean_radius,
F_MEDIAN_RADIUS: median_radius,
F_MIN_FERET_DIAMETER: min_feret_diameter,
F_MAX_FERET_DIAMETER: max_feret_diameter,
F_EQUIVALENT_DIAMETER: props["equivalent_diameter"],
}
if self.calculate_advanced.value:
features_to_record.update(
{
F_SPATIAL_MOMENT_0_0: props["moments-0-0"],
F_SPATIAL_MOMENT_0_1: props["moments-0-1"],
F_SPATIAL_MOMENT_0_2: props["moments-0-2"],
F_SPATIAL_MOMENT_0_3: props["moments-0-3"],
F_SPATIAL_MOMENT_1_0: props["moments-1-0"],
F_SPATIAL_MOMENT_1_1: props["moments-1-1"],
F_SPATIAL_MOMENT_1_2: props["moments-1-2"],
F_SPATIAL_MOMENT_1_3: props["moments-1-3"],
F_SPATIAL_MOMENT_2_0: props["moments-2-0"],
F_SPATIAL_MOMENT_2_1: props["moments-2-1"],
F_SPATIAL_MOMENT_2_2: props["moments-2-2"],
F_SPATIAL_MOMENT_2_3: props["moments-2-3"],
F_CENTRAL_MOMENT_0_0: props["moments_central-0-0"],
F_CENTRAL_MOMENT_0_1: props["moments_central-0-1"],
F_CENTRAL_MOMENT_0_2: props["moments_central-0-2"],
F_CENTRAL_MOMENT_0_3: props["moments_central-0-3"],
F_CENTRAL_MOMENT_1_0: props["moments_central-1-0"],
F_CENTRAL_MOMENT_1_1: props["moments_central-1-1"],
F_CENTRAL_MOMENT_1_2: props["moments_central-1-2"],
F_CENTRAL_MOMENT_1_3: props["moments_central-1-3"],
F_CENTRAL_MOMENT_2_0: props["moments_central-2-0"],
F_CENTRAL_MOMENT_2_1: props["moments_central-2-1"],
F_CENTRAL_MOMENT_2_2: props["moments_central-2-2"],
F_CENTRAL_MOMENT_2_3: props["moments_central-2-3"],
F_NORMALIZED_MOMENT_0_0: props["moments_normalized-0-0"],
F_NORMALIZED_MOMENT_0_1: props["moments_normalized-0-1"],
F_NORMALIZED_MOMENT_0_2: props["moments_normalized-0-2"],
F_NORMALIZED_MOMENT_0_3: props["moments_normalized-0-3"],
F_NORMALIZED_MOMENT_1_0: props["moments_normalized-1-0"],
F_NORMALIZED_MOMENT_1_1: props["moments_normalized-1-1"],
F_NORMALIZED_MOMENT_1_2: props["moments_normalized-1-2"],
F_NORMALIZED_MOMENT_1_3: props["moments_normalized-1-3"],
F_NORMALIZED_MOMENT_2_0: props["moments_normalized-2-0"],
F_NORMALIZED_MOMENT_2_1: props["moments_normalized-2-1"],
F_NORMALIZED_MOMENT_2_2: props["moments_normalized-2-2"],
F_NORMALIZED_MOMENT_2_3: props["moments_normalized-2-3"],
F_NORMALIZED_MOMENT_3_0: props["moments_normalized-3-0"],
F_NORMALIZED_MOMENT_3_1: props["moments_normalized-3-1"],
F_NORMALIZED_MOMENT_3_2: props["moments_normalized-3-2"],
F_NORMALIZED_MOMENT_3_3: props["moments_normalized-3-3"],
F_HU_MOMENT_0: props["moments_hu-0"],
F_HU_MOMENT_1: props["moments_hu-1"],
F_HU_MOMENT_2: props["moments_hu-2"],
F_HU_MOMENT_3: props["moments_hu-3"],
F_HU_MOMENT_4: props["moments_hu-4"],
F_HU_MOMENT_5: props["moments_hu-5"],
F_HU_MOMENT_6: props["moments_hu-6"],
F_INERTIA_TENSOR_0_0: props["inertia_tensor-0-0"],
F_INERTIA_TENSOR_0_1: props["inertia_tensor-0-1"],
F_INERTIA_TENSOR_1_0: props["inertia_tensor-1-0"],
F_INERTIA_TENSOR_1_1: props["inertia_tensor-1-1"],
F_INERTIA_TENSOR_EIGENVALUES_0: props[
"inertia_tensor_eigvals-0"
],
F_INERTIA_TENSOR_EIGENVALUES_1: props[
"inertia_tensor_eigvals-1"
],
}
)
if self.calculate_zernikes.value:
features_to_record.update(
{
self.get_zernike_name((n, m)): zf[(n, m)]
for n, m in zernike_numbers
}
)
else:
props = skimage.measure.regionprops_table(
labels, properties=desired_properties
)
# SurfaceArea
surface_areas = numpy.zeros(len(props["label"]))
for index, label in enumerate(props["label"]):
volume = numpy.zeros_like(labels, dtype="bool")
volume[labels == label] = True
verts, faces, _, _ = skimage.measure.marching_cubes_lewiner(
volume,
spacing=objects.parent_image.spacing
if objects.has_parent_image
else (1.0,) * labels.ndim,
level=0,
)
surface_areas[index] = skimage.measure.mesh_surface_area(verts, faces)
features_to_record = {
F_VOLUME: props["area"],
F_SURFACE_AREA: surface_areas,
F_MAJOR_AXIS_LENGTH: props["major_axis_length"],
F_MINOR_AXIS_LENGTH: props["minor_axis_length"],
F_CENTER_X: props["centroid-2"],
F_CENTER_Y: props["centroid-1"],
F_CENTER_Z: props["centroid-0"],
F_BBOX_VOLUME: props["bbox_area"],
F_MIN_X: props["bbox-2"],
F_MAX_X: props["bbox-5"],
F_MIN_Y: props["bbox-1"],
F_MAX_Y: props["bbox-4"],
F_MIN_Z: props["bbox-0"],
F_MAX_Z: props["bbox-3"],
F_EXTENT: props["extent"],
F_EULER_NUMBER: props["euler_number"],
F_EQUIVALENT_DIAMETER: props["equivalent_diameter"],
}
if self.calculate_advanced.value:
features_to_record[F_SOLIDITY] = props["solidity"]
return features_to_record
def display(self, workspace, figure):
figure.set_subplots((1, 1))
figure.subplot_table(
0,
0,
workspace.display_data.statistics,
col_labels=workspace.display_data.col_labels,
title="default",
)
def perform_measurement(self, workspace, function, object_name, feature_name):
"""Perform a measurement on a label matrix
workspace - the workspace for the run
function - a function with the following sort of signature:
image - an image to be fed into the function which for
our case is all ones
labels - the label matrix from the objects
index - a sequence of label indexes to pay attention to
object_name - name of object to retrieve from workspace and deposit
in measurements
feature_name- name of feature to deposit in measurements
"""
objects = workspace.get_objects(object_name)
if len(objects.indices) > 0:
data = objects.fn_of_label_and_index(function)
else:
data = numpy.zeros((0,))
self.record_measurement(workspace, object_name, feature_name, data)
def perform_ndmeasurement(self, workspace, function, object_name, feature_name):
"""Perform a scipy.ndimage-style measurement on a label matrix
workspace - the workspace for the run
function - a function with the following sort of signature:
image - an image to be fed into the function which for
our case is all ones
labels - the label matrix from the objects
index - a sequence of label indexes to pay attention to
object_name - name of object to retrieve from workspace and deposit
in measurements
feature_name- name of feature to deposit in measurements
"""
objects = workspace.get_objects(object_name)
if len(objects.indices) > 0:
data = objects.fn_of_ones_label_and_index(function)
else:
data = numpy.zeros((0,))
self.record_measurement(workspace, object_name, feature_name, data)
def record_measurement(self, workspace, object_name, feature_name, result):
"""Record the result of a measurement in the workspace's measurements"""
data = centrosome.cpmorphology.fixup_scipy_ndimage_result(result)
workspace.add_measurement(
object_name, "%s_%s" % (AREA_SHAPE, feature_name), data
)
if self.show_window and numpy.any(numpy.isfinite(data)) > 0:
data = data[numpy.isfinite(data)]
workspace.display_data.statistics.append(
(
object_name,
feature_name,
"%.2f" % numpy.mean(data),
"%.2f" % numpy.median(data),
"%.2f" % numpy.std(data),
)
)
def get_measurement_columns(self, pipeline):
"""Return measurement column definitions.
All cols returned as float even though "Area" will only ever be int"""
measurement_names = self.get_feature_names(pipeline)
cols = []
for oname in self.objects_list.value:
for mname in measurement_names:
cols += [(oname, AREA_SHAPE + "_" + mname, COLTYPE_FLOAT,)]
return cols
def upgrade_settings(self, setting_values, variable_revision_number, module_name):
"""Adjust the setting_values for older save file versions"""
if variable_revision_number == 1:
objects_list = setting_values[:-1]
setting_values = [", ".join(map(str, objects_list)), setting_values[-1]]
variable_revision_number = 2
if variable_revision_number == 2:
# Add advanced features toggle
setting_values.append("No")
variable_revision_number = 3
return setting_values, variable_revision_number
def volumetric(self):
return True
def measurements_without_objects(self, workspace, object_name):
# Create column headers even if there were no objects in a set.
features_to_record = self.get_feature_names(workspace.pipeline)
empty_measure = numpy.zeros((0,))
for feature_name in features_to_record:
self.record_measurement(workspace, object_name, feature_name, empty_measure)
MeasureObjectAreaShape = MeasureObjectSizeShape
| 1.921875 | 2 |
lotlan_scheduler/parser/LoTLanParserListener.py | iml130/lotlan-scheduler | 0 | 12761179 | <filename>lotlan_scheduler/parser/LoTLanParserListener.py
# Generated from LoTLanParser.g4 by ANTLR 4.8
from antlr4 import *
if __name__ is not None and "." in __name__:
from .LoTLanParser import LoTLanParser
else:
from LoTLanParser import LoTLanParser
# This class defines a complete listener for a parse tree produced by LoTLanParser.
class LoTLanParserListener(ParseTreeListener):
# Enter a parse tree produced by LoTLanParser#program.
def enterProgram(self, ctx:LoTLanParser.ProgramContext):
pass
# Exit a parse tree produced by LoTLanParser#program.
def exitProgram(self, ctx:LoTLanParser.ProgramContext):
pass
# Enter a parse tree produced by LoTLanParser#template.
def enterTemplate(self, ctx:LoTLanParser.TemplateContext):
pass
# Exit a parse tree produced by LoTLanParser#template.
def exitTemplate(self, ctx:LoTLanParser.TemplateContext):
pass
# Enter a parse tree produced by LoTLanParser#templateStart.
def enterTemplateStart(self, ctx:LoTLanParser.TemplateStartContext):
pass
# Exit a parse tree produced by LoTLanParser#templateStart.
def exitTemplateStart(self, ctx:LoTLanParser.TemplateStartContext):
pass
# Enter a parse tree produced by LoTLanParser#instance.
def enterInstance(self, ctx:LoTLanParser.InstanceContext):
pass
# Exit a parse tree produced by LoTLanParser#instance.
def exitInstance(self, ctx:LoTLanParser.InstanceContext):
pass
# Enter a parse tree produced by LoTLanParser#instanceStart.
def enterInstanceStart(self, ctx:LoTLanParser.InstanceStartContext):
pass
# Exit a parse tree produced by LoTLanParser#instanceStart.
def exitInstanceStart(self, ctx:LoTLanParser.InstanceStartContext):
pass
# Enter a parse tree produced by LoTLanParser#memberVariable.
def enterMemberVariable(self, ctx:LoTLanParser.MemberVariableContext):
pass
# Exit a parse tree produced by LoTLanParser#memberVariable.
def exitMemberVariable(self, ctx:LoTLanParser.MemberVariableContext):
pass
# Enter a parse tree produced by LoTLanParser#value.
def enterValue(self, ctx:LoTLanParser.ValueContext):
pass
# Exit a parse tree produced by LoTLanParser#value.
def exitValue(self, ctx:LoTLanParser.ValueContext):
pass
# Enter a parse tree produced by LoTLanParser#transportOrderStep.
def enterTransportOrderStep(self, ctx:LoTLanParser.TransportOrderStepContext):
pass
# Exit a parse tree produced by LoTLanParser#transportOrderStep.
def exitTransportOrderStep(self, ctx:LoTLanParser.TransportOrderStepContext):
pass
# Enter a parse tree produced by LoTLanParser#tosStart.
def enterTosStart(self, ctx:LoTLanParser.TosStartContext):
pass
# Exit a parse tree produced by LoTLanParser#tosStart.
def exitTosStart(self, ctx:LoTLanParser.TosStartContext):
pass
# Enter a parse tree produced by LoTLanParser#tosStatement.
def enterTosStatement(self, ctx:LoTLanParser.TosStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#tosStatement.
def exitTosStatement(self, ctx:LoTLanParser.TosStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#locationStatement.
def enterLocationStatement(self, ctx:LoTLanParser.LocationStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#locationStatement.
def exitLocationStatement(self, ctx:LoTLanParser.LocationStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#optTosStatement.
def enterOptTosStatement(self, ctx:LoTLanParser.OptTosStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#optTosStatement.
def exitOptTosStatement(self, ctx:LoTLanParser.OptTosStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#eventStatement.
def enterEventStatement(self, ctx:LoTLanParser.EventStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#eventStatement.
def exitEventStatement(self, ctx:LoTLanParser.EventStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#onDoneStatement.
def enterOnDoneStatement(self, ctx:LoTLanParser.OnDoneStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#onDoneStatement.
def exitOnDoneStatement(self, ctx:LoTLanParser.OnDoneStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#parameterStatement.
def enterParameterStatement(self, ctx:LoTLanParser.ParameterStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#parameterStatement.
def exitParameterStatement(self, ctx:LoTLanParser.ParameterStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#task.
def enterTask(self, ctx:LoTLanParser.TaskContext):
pass
# Exit a parse tree produced by LoTLanParser#task.
def exitTask(self, ctx:LoTLanParser.TaskContext):
pass
# Enter a parse tree produced by LoTLanParser#taskStart.
def enterTaskStart(self, ctx:LoTLanParser.TaskStartContext):
pass
# Exit a parse tree produced by LoTLanParser#taskStart.
def exitTaskStart(self, ctx:LoTLanParser.TaskStartContext):
pass
# Enter a parse tree produced by LoTLanParser#taskStatement.
def enterTaskStatement(self, ctx:LoTLanParser.TaskStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#taskStatement.
def exitTaskStatement(self, ctx:LoTLanParser.TaskStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#constraintsStatement.
def enterConstraintsStatement(self, ctx:LoTLanParser.ConstraintsStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#constraintsStatement.
def exitConstraintsStatement(self, ctx:LoTLanParser.ConstraintsStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#transportOrder.
def enterTransportOrder(self, ctx:LoTLanParser.TransportOrderContext):
pass
# Exit a parse tree produced by LoTLanParser#transportOrder.
def exitTransportOrder(self, ctx:LoTLanParser.TransportOrderContext):
pass
# Enter a parse tree produced by LoTLanParser#fromStatement.
def enterFromStatement(self, ctx:LoTLanParser.FromStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#fromStatement.
def exitFromStatement(self, ctx:LoTLanParser.FromStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#toStatement.
def enterToStatement(self, ctx:LoTLanParser.ToStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#toStatement.
def exitToStatement(self, ctx:LoTLanParser.ToStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#parameters.
def enterParameters(self, ctx:LoTLanParser.ParametersContext):
pass
# Exit a parse tree produced by LoTLanParser#parameters.
def exitParameters(self, ctx:LoTLanParser.ParametersContext):
pass
# Enter a parse tree produced by LoTLanParser#repeatStatement.
def enterRepeatStatement(self, ctx:LoTLanParser.RepeatStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#repeatStatement.
def exitRepeatStatement(self, ctx:LoTLanParser.RepeatStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#expression.
def enterExpression(self, ctx:LoTLanParser.ExpressionContext):
pass
# Exit a parse tree produced by LoTLanParser#expression.
def exitExpression(self, ctx:LoTLanParser.ExpressionContext):
pass
# Enter a parse tree produced by LoTLanParser#binOperation.
def enterBinOperation(self, ctx:LoTLanParser.BinOperationContext):
pass
# Exit a parse tree produced by LoTLanParser#binOperation.
def exitBinOperation(self, ctx:LoTLanParser.BinOperationContext):
pass
# Enter a parse tree produced by LoTLanParser#unOperation.
def enterUnOperation(self, ctx:LoTLanParser.UnOperationContext):
pass
# Exit a parse tree produced by LoTLanParser#unOperation.
def exitUnOperation(self, ctx:LoTLanParser.UnOperationContext):
pass
# Enter a parse tree produced by LoTLanParser#con.
def enterCon(self, ctx:LoTLanParser.ConContext):
pass
# Exit a parse tree produced by LoTLanParser#con.
def exitCon(self, ctx:LoTLanParser.ConContext):
pass
del LoTLanParser | 2.28125 | 2 |
AzureFunctionProj/TestHttpTrigger/__init__.py | tys-hiroshi/BacklogApiProcessing | 2 | 12761180 | import logging
import azure.functions as func
from backlogapiprocessmodule import *
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('-------Python HTTP trigger function processed a request.')
configFilePath = '/home/site/wwwroot/BacklogApiTimerTrigger/config.yml'
loggingConfigFilePath = '/home/site/wwwroot/BacklogApiTimerTrigger/logging_debug.conf'
backlogapiprocess.run(configFilePath, loggingConfigFilePath)
name = req.params.get('name')
if not name:
try:
req_body = req.get_json()
except ValueError:
pass
else:
name = req_body.get('name')
if name:
return func.HttpResponse(f"Hello {name}!")
else:
return func.HttpResponse(
"Please pass a name on the query string or in the request body",
status_code=400
)
| 2.453125 | 2 |
iutest/dependencies.py | mgland/iutest | 10 | 12761181 | # Copyright 2019-2020 by <NAME>, MGLAND animation studio. All rights reserved.
# This file is part of IUTest, and is released under the "MIT License Agreement".
# Please see the LICENSE file that should have been included as part of this package.
import logging
logger = logging.getLogger(__name__)
class _ErrorDummy(object):
def __getattribute__(self, name):
return _ErrorDummy()
def __call__(self, *_, **__):
return _ErrorDummy()
def __repr__(self):
return "Error Happened."
def __iter__(self):
yield _ErrorDummy()
def __getitem__(self, index):
return _ErrorDummy()
def __bool__(self):
return False
def __nonzero__(self):
return False
class _DependencyWrapper(object):
@classmethod
def get(cls, silent=False):
"""Get an instance of the wrapper object.
Args:
silent (bool): Whether we issue errors or debug when the dependency
is not installed.
"""
if not hasattr(cls, "_instance"):
return None
if not cls._instance:
cls._instance = cls()
return cls._instance
@classmethod
def getModule(cls, silent=False):
wrapper = cls.get(silent=silent)
return wrapper._mod if wrapper._mod else _ErrorDummy()
@classmethod
def reload(cls, silent=True):
"""Try reimport the dependency module.
Args:
silent (bool): Whether we issue errors or debug when the dependency
is not installed.
"""
cls.get()._tryImport(force=True, silent=silent)
def __init__(self):
self._mod = None
self._tryImport(force=False, silent=True)
@classmethod
def _issueNotInstalledError(cls, silent=True):
if not silent:
logger.error("The package '%s' is not installed", cls.name())
else:
logger.debug("The package '%s' is not installed", cls.name())
@classmethod
def _issueNotImplementedError(cls):
err = "Please use a derived class instead of base class {}".format(cls.__name__)
raise NotImplementedError(err)
def _tryImport(self, force, silent):
self._issueNotImplementedError()
@classmethod
def name(cls):
cls._issueNotImplementedError()
def isValid(self):
return bool(self._mod)
@classmethod
def check(cls):
if not cls.get().isValid():
cls._issueNotInstalledError(silent=False)
return False
return True
class ReimportWrapper(_DependencyWrapper):
_instance = None
@classmethod
def name(cls):
return "reimport"
def _tryImport(self, force, silent):
if not force and self._mod:
return
self._mod = None
try:
import reimport
self._mod = reimport
except ImportError:
self._issueNotInstalledError(silent)
class Nose2Wrapper(_DependencyWrapper):
_instance = None
@classmethod
def name(cls):
return "nose2"
def _tryImport(self, force, silent):
if not force and self._mod:
return
self._mod = None
try:
import nose2
self._mod = nose2
except ImportError:
self._issueNotInstalledError(silent)
class PyTestWrapper(_DependencyWrapper):
_instance = None
@classmethod
def name(cls):
return "pytest"
def _tryImport(self, force, silent):
if not force and self._mod:
return
self._mod = None
try:
import pytest
self._mod = pytest
except ImportError:
self._issueNotInstalledError(silent)
| 1.953125 | 2 |
brain.py | riochandra1212/object-detection-app | 4 | 12761182 | <filename>brain.py
import cv2 as cv
import numpy as np
class obj_detector:
hasDetected = False
detectedObj = []
listObj = {}
capVideo = None
photo_target = None
def __init__(self, weightModel, configModel, namesModel):
self.net = cv.dnn.readNet(weightModel, configModel)
self.classes = []
with open(namesModel, 'r') as f:
self.classes = [line.strip() for line in f.readlines()]
layer_names = self.net.getLayerNames()
self.output_layers = [layer_names[i[0] - 1]
for i in self.net.getUnconnectedOutLayers()]
# print(output_layers)
self.colors = np.random.uniform(0, 255, size=(len(self.classes), 3))
self.tracker_obj = []
def set_photo(self, img):
self.photo_target = cv.imread(img)
def detect_obj(self, frame=None):
# init
self.listObj = {}
self.hasDetected = True
if frame is not None:
self.photo_target = frame
img_ori = self.photo_target
img = cv.UMat(img_ori)
timer = cv.getTickCount()
height, width, channels = img_ori.shape
net = self.net
blob = cv.dnn.blobFromImage(
img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
# net.setPreferableTarget(cv.dnn.DNN_TARGET_OPENCL)
outs = net.forward(self.output_layers)
self.detectedObj = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > .5:
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
x = int(center_x - w / 2)
y = int(center_y - h / 2)
self.detectedObj.append([
class_id,
str(self.classes[class_id]),
[x, y, w, h]
])
self.time_process = (cv.getTickCount() - timer) / cv.getTickFrequency()
return self.detectedObj
def label_img(self, bbox, label, color):
img = self.photo_target
font = cv.FONT_HERSHEY_PLAIN
x, y, w, h = bbox
x, y, w, h = int(x), int(y), int(w), int(h)
center_x = (w / 2) + x
center_y = (h / 2) + y
cv.rectangle(img, (x, y), (x+w, y+h), color, 2)
cv.putText(img, label, (x, y+30), font, 1.5, color, 2)
cv.circle(img, (int(center_x), int(center_y)), 2, color, thickness=2)
self.photo_target = img
def label_obj(self):
self.detect_obj()
for obj in self.detectedObj:
id_class_obj = obj[0]
class_obj = obj[1]
pos_obj = obj[2]
color_obj = self.colors[id_class_obj]
self.label_img(pos_obj, class_obj, color_obj)
if class_obj in self.listObj:
self.listObj[class_obj] += 1
else:
self.listObj[class_obj] = 1
def show_image(self):
cv.imshow('result', self.photo_target)
def get_image(self):
# Change from BGR (opencv) to RGB for tkinter
return self.cvrt_img(self.photo_target)
def cvrt_img(self, img):
return cv.cvtColor(img, cv.COLOR_BGR2RGB)
def capture_video(self, path):
self.capVideo = cv.VideoCapture(path)
return self.capVideo.isOpened()
def read_frame(self):
ret, frame = self.capVideo.read()
return frame
def detect_frame(self, frame):
self.photo_target = frame
self.label_obj()
result = self.get_image()
return result
| 2.6875 | 3 |
tests/core/p2p-proto/test_requests.py | Uxio0/trinity | 0 | 12761183 | import pytest
from p2p.exceptions import PeerConnectionLost
from trinity.protocol.eth.peer import (
ETHPeerPoolEventServer,
)
from tests.core.integration_test_helpers import (
FakeAsyncChainDB,
run_peer_pool_event_server,
run_proxy_peer_pool,
run_request_server,
)
from tests.core.peer_helpers import (
get_directly_linked_peers,
MockPeerPoolWithConnectedPeers,
)
@pytest.mark.asyncio
async def test_proxy_peer_requests(request,
event_bus,
other_event_bus,
event_loop,
chaindb_fresh,
chaindb_20):
server_event_bus = event_bus
client_event_bus = other_event_bus
client_peer, server_peer = await get_directly_linked_peers(
request,
event_loop,
alice_headerdb=FakeAsyncChainDB(chaindb_fresh.db),
bob_headerdb=FakeAsyncChainDB(chaindb_20.db),
)
client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer], event_bus=client_event_bus)
server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=server_event_bus)
async with run_peer_pool_event_server(
client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer
), run_peer_pool_event_server(
server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
), run_request_server(
server_event_bus,
FakeAsyncChainDB(chaindb_20.db)
), run_proxy_peer_pool(
client_event_bus
) as client_proxy_peer_pool, run_proxy_peer_pool(
server_event_bus
):
proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(client_peer.remote)
headers = await proxy_peer.requests.get_block_headers(0, 1, 0, False)
assert len(headers) == 1
block_header = headers[0]
assert block_header.block_number == 0
receipts = await proxy_peer.requests.get_receipts(headers)
assert len(receipts) == 1
receipt = receipts[0]
assert receipt[1][0] == block_header.receipt_root
block_bundles = await proxy_peer.requests.get_block_bodies(headers)
assert len(block_bundles) == 1
first_bundle = block_bundles[0]
assert first_bundle[1][0] == block_header.transaction_root
node_data = await proxy_peer.requests.get_node_data((block_header.state_root,))
assert node_data[0][0] == block_header.state_root
@pytest.mark.asyncio
async def test_proxy_peer_requests_with_timeouts(request,
event_bus,
other_event_bus,
event_loop,
chaindb_fresh,
chaindb_20):
server_event_bus = event_bus
client_event_bus = other_event_bus
client_peer, server_peer = await get_directly_linked_peers(
request,
event_loop,
alice_headerdb=FakeAsyncChainDB(chaindb_fresh.db),
bob_headerdb=FakeAsyncChainDB(chaindb_20.db),
)
client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer], event_bus=client_event_bus)
server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=server_event_bus)
async with run_peer_pool_event_server(
client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer
), run_peer_pool_event_server(
server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
), run_proxy_peer_pool(
client_event_bus
) as client_proxy_peer_pool, run_proxy_peer_pool(
server_event_bus
):
proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(client_peer.remote)
with pytest.raises(TimeoutError):
await proxy_peer.requests.get_block_headers(0, 1, 0, False, timeout=0.01)
with pytest.raises(TimeoutError):
await proxy_peer.requests.get_receipts((), timeout=0.01)
with pytest.raises(TimeoutError):
await proxy_peer.requests.get_block_bodies((), timeout=0.01)
with pytest.raises(TimeoutError):
await proxy_peer.requests.get_node_data((), timeout=0.01)
@pytest.mark.asyncio
async def test_requests_when_peer_in_client_vanishs(request,
event_bus,
other_event_bus,
event_loop,
chaindb_fresh,
chaindb_20):
server_event_bus = event_bus
client_event_bus = other_event_bus
client_peer, server_peer = await get_directly_linked_peers(
request,
event_loop,
alice_headerdb=FakeAsyncChainDB(chaindb_fresh.db),
bob_headerdb=FakeAsyncChainDB(chaindb_20.db),
)
client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer], event_bus=client_event_bus)
server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=server_event_bus)
async with run_peer_pool_event_server(
client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer
), run_peer_pool_event_server(
server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
), run_request_server(
server_event_bus,
FakeAsyncChainDB(chaindb_20.db)
), run_proxy_peer_pool(
client_event_bus
) as client_proxy_peer_pool, run_proxy_peer_pool(
server_event_bus
):
proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(client_peer.remote)
# We remove the peer from the client and assume to see PeerConnectionLost exceptions raised
client_peer_pool.connected_nodes.pop(client_peer.remote)
with pytest.raises(PeerConnectionLost):
await proxy_peer.requests.get_block_headers(0, 1, 0, False)
with pytest.raises(PeerConnectionLost):
await proxy_peer.requests.get_receipts(())
with pytest.raises(PeerConnectionLost):
await proxy_peer.requests.get_block_bodies(())
with pytest.raises(PeerConnectionLost):
await proxy_peer.requests.get_node_data(())
| 1.71875 | 2 |
gitmirror.py | d12y12/GitMirror | 0 | 12761184 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
import optparse
from repository.utils import get_version, set_logger
from repository import RepositoryManager
from repository.minisetting import Setting
def print_cmd_header():
print('GitMirror {}'.format(get_version()))
def print_cmd_result(success=True):
if success:
print("Success")
else:
print("Failed")
print("Please check log")
def usage_error(error: str):
print("Usage Error: {} {}".format(os.path.basename(__file__), error))
print("Try {} -h for more information".format(os.path.basename(__file__)))
def process(options, args):
setting = Setting()
if options.logfile:
set_logger(setting, log_enable=True, log_file=options.logfile)
if options.loglevel:
set_logger(setting, log_enable=True, log_level=options.loglevel)
if options.logdir:
set_logger(setting, log_enable=True, log_dir=options.logdir)
if options.nolog:
set_logger(setting, log_enable=False)
repo_manager = RepositoryManager(setting)
if options.list:
if len(args) > 0:
usage_error("--list take no argument")
return False
services, services_possible = repo_manager.get_services_list()
print("Available Service: {}".format(services))
print("Potential Service: {}".format(services_possible))
return True
if options.parse:
if len(args) != 1:
usage_error("--parse only take 1 argument <service name>")
return False
service_name = args[0]
print_cmd_result(repo_manager.parse_service(service_name))
return True
if options.mirror:
if len(args) != 1:
usage_error("--mirror only take 1 argument <service name>")
return False
service_name = args[0]
print_cmd_result(repo_manager.mirror_service(service_name))
return True
if options.get:
if options.get not in ['configs', 'repos']:
usage_error("--get options should be choice of [configs, repos]")
return False
if len(args) not in (1, 2):
usage_error("--get can take 2 argument <service name> [output file]")
return False
service_name = args[0]
output = ''
if len(args) == 2:
output = args[1]
if options.get == 'configs':
print_cmd_result(repo_manager.get_service_config(service_name, output))
if options.get == 'repos':
print_cmd_result(repo_manager.get_service_repos(service_name, output))
return True
if options.add:
if len(args) != 1:
usage_error("--add only take 1 argument <service name>")
return False
service_name = args[0]
print_cmd_result(repo_manager.add_service(service_name))
return True
if options.remove:
if len(args) != 1:
usage_error("--remove only take 1 argument <service name>")
return False
service_name = args[0]
print_cmd_result(repo_manager.remove_service(service_name))
if options.autoconf:
if len(args) > 0:
usage_error("--autoconf take no argument")
return False
repo_manager.autoconf()
return True
if options.batchrun:
if len(args) != 1:
usage_error("--batchrun only take 1 argument <service name>")
return False
service_name = args[0]
repo_manager.batchrun_service(service_name)
return True
if options.init:
if len(args) > 0:
usage_error("--init take no argument")
return False
repo_manager.init()
return True
def cli(argv=None):
print_cmd_header()
if argv is None:
argv = sys.argv
usage = "usage: %prog [options] [service name] [output]"
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(),
conflict_handler='resolve', usage=usage)
group_global = optparse.OptionGroup(parser, "Global Options")
group_global.add_option("--logdir", metavar="PATH",
help="Log directory. if omitted local log directory will be created")
group_global.add_option("--logfile", metavar="FILE",
help="log file. if omitted stderr will be used")
group_global.add_option("--loglevel", metavar="LEVEL", default=None,
help="log level (default: DEBUG)")
group_global.add_option("--nolog", action="store_true",
help="disable logging completely")
parser.add_option_group(group_global)
parser.add_option("--list", action='store_true', dest='list',
help="List all services names available")
parser.add_option("--parse", action='store_true', dest="parse",
help="Parse repositories for <service name>")
parser.add_option("--mirror", action='store_true', dest="mirror",
help="Update from remote & Push to target for <service name>")
parser.add_option("--get", metavar="CONTENT", dest="get",
help="Get content(configs/repos) from <service name> save to [output]")
parser.add_option("--add", action='store_true', dest="add",
help="Create or Update <service name>")
parser.add_option("--remove", action='store_true', dest="remove",
help="Backup and Remove <service name>")
group_devspace = optparse.OptionGroup(parser, "Devspace Options")
group_devspace.add_option("--autoconf", action='store_true', dest="autoconf",
help="Auto add service avaialbe and update crontab")
group_devspace.add_option("--batchrun", action='store_true', dest="batchrun",
help="Run parse and mirror for <service name>")
group_devspace.add_option("--init", action='store_true', dest="init",
help="For devspace init all service and first checkout")
parser.add_option_group(group_devspace)
if len(argv) == 1:
parser.print_help()
else:
options, args = parser.parse_args(args=argv[1:])
process(options, args)
if __name__ == '__main__':
cli()
| 2.34375 | 2 |
thing/models/characterdetails.py | skyride/evething-2 | 21 | 12761185 | # ------------------------------------------------------------------------------
# Copyright (c) 2010-2013, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
from datetime import datetime
from django.db import models
from thing.models.character import Character
from thing.models.item import Item
from thing.models.implant import Implant
class CharacterDetails(models.Model):
"""Character details"""
character = models.OneToOneField(
Character, unique=True, primary_key=True, related_name='details'
)
wallet_balance = models.DecimalField(
max_digits=18, decimal_places=2, default=0
)
plex_balance = models.IntegerField(default=0)
cha_attribute = models.SmallIntegerField(default=20)
int_attribute = models.SmallIntegerField(default=20)
mem_attribute = models.SmallIntegerField(default=20)
per_attribute = models.SmallIntegerField(default=20)
wil_attribute = models.SmallIntegerField(default=19)
__attr_bonus = {}
def __get_attr_bonus(self, attr):
if not self.__attr_bonus:
self.__attr_bonus = self.implants.all().aggregate(
models.Sum('charisma_modifier'),
models.Sum('intelligence_modifier'),
models.Sum('memory_modifier'),
models.Sum('perception_modifier'),
models.Sum('willpower_modifier')
)
return self.__attr_bonus[attr + '_modifier__sum'] if \
self.__attr_bonus[attr + '_modifier__sum'] else 0
@property
def cha_bonus(self):
return self.__get_attr_bonus('charisma')
@property
def int_bonus(self):
return self.__get_attr_bonus('intelligence')
@property
def mem_bonus(self):
return self.__get_attr_bonus('memory')
@property
def per_bonus(self):
return self.__get_attr_bonus('perception')
@property
def wil_bonus(self):
return self.__get_attr_bonus('willpower')
implants = models.ManyToManyField(Implant)
security_status = models.DecimalField(
max_digits=6, decimal_places=4, default=0
)
last_known_location = models.CharField(max_length=255, default='')
ship_item = models.ForeignKey(Item, blank=True, null=True)
ship_name = models.CharField(max_length=128, default='')
# Fatigue
last_jump_date = models.DateTimeField(null=True, default=None)
fatigue_expire_date = models.DateTimeField(null=True, default=None)
class Meta:
app_label = 'thing'
def __unicode__(self):
return '%s' % self.character
def fatigue(self):
if self.fatigue_expire_date != None:
return self.fatigue_expire_date - datetime.utcnow()
def has_fatigue(self):
if self.fatigue_expire_date == None:
return False
fatigue = self.fatigue_expire_date - datetime.utcnow()
return fatigue.total_seconds() > 0
| 1.476563 | 1 |
Data/load_data.py | Tammie-Li/DRL | 0 | 12761186 | <gh_stars>0
'''
Author: <NAME>
Description: data operate
FilePath: \DRL\Data\load_data.py
'''
import os
import numpy as np
import random
from torch.utils.data import DataLoader
from collections import defaultdict
from sklearn import preprocessing
from Data.make_dataset import *
def scale_data(data):
scaler = preprocessing.StandardScaler()
for i in range(data.shape[0]):
data[i, :, :] = scaler.fit_transform(data[i, :, :])
return data
def load_dataset(subject_id, dataset_type):
# 加载数据
print(f'Current directory: s{subject_id}')
target_data = np.load(os.path.join(os.getcwd(), 'Data', dataset_type, f'S{subject_id:>02d}', 'target.npy'))
non_target_data = np.load(os.path.join(os.getcwd(), 'Data', dataset_type, f'S{subject_id:>02d}', 'nontarget.npy'))
x_test = np.load(os.path.join(os.getcwd(), 'Data', dataset_type, f'S{subject_id:>02d}', 'x_test.npy'))
y_test = np.load(os.path.join(os.getcwd(),'Data', dataset_type, f'S{subject_id:>02d}', 'y_test.npy'))
target_data = scale_data(target_data)
non_target_data = scale_data(non_target_data)
x_test = scale_data(x_test)
print("Shape of target dataset", target_data.shape)
print("Shape of non-target dataset", non_target_data.shape)
print("Shape of test dataset", x_test.shape)
return target_data, non_target_data, x_test, y_test
# Sample pair construction
def generate_pair(target_data, non_target_data, num):
tmp_data = []
pair_data = []
# Paired target + target as positive pairs
for i in range(int(num/4)):
perm = np.random.permutation(target_data.shape[0])
rd_1, rd_2 = random.randint(0, target_data.shape[0]-1), random.randint(0, target_data.shape[0]-1)
tmp_data = []
tmp_data.append(target_data[rd_1, ...])
tmp_data.append(target_data[rd_2, ...])
pair_data.append(tmp_data)
# Paired nontarget + nontarget as positive pairs
for i in range(int(num/4)):
rd_1, rd_2 = random.randint(0, non_target_data.shape[0]-1), random.randint(0, non_target_data.shape[0]-1)
tmp_data = []
tmp_data.append(non_target_data[rd_1, ...])
tmp_data.append(non_target_data[rd_2, ...])
pair_data.append(tmp_data)
# Paired target + nontarget as negative pairs
for i in range(int(num/2)):
rd_1, rd_2 = random.randint(0, target_data.shape[0]-1), random.randint(0, non_target_data.shape[0]-1)
tmp_data = []
tmp_data.append(target_data[rd_1, ...])
tmp_data.append(non_target_data[rd_2, ...])
pair_data.append(tmp_data)
pair_label = [0 for i in range(int(num/2))] + [1 for i in range(int(num/2))]
pair_data, pair_label = np.array(pair_data), np.array(pair_label)
return pair_data, pair_label
def generate_data_info(target_data, non_target_data, x_test, y_test, subject_id, batch_size_1, batch_size_2, num):
# Data for stage 1: representation learning
x_train, y_train = generate_pair(target_data, non_target_data, num)
print("Shape of paired full dataset for train", x_train.shape)
# Data for stage 2: classifier learning
perm = np.random.permutation(non_target_data.shape[0])
non_target_num = int(target_data.shape[0])
non_target_data_downstream = non_target_data[perm[:non_target_num]]
x_downstream = np.concatenate((non_target_data_downstream, target_data))
y_downstream = [0 for i in range(non_target_data_downstream.shape[0])] + [1 for i in range(target_data.shape[0])]
y_downstream = np.array(y_downstream)
# Make Dataset
train_data = TrainDataset(x_train, y_train)
train_loader = DataLoader(train_data, batch_size=batch_size_1, shuffle=True)
downstream_data = DownstreamDataset(x_downstream, y_downstream)
downstream_loader = DataLoader(downstream_data, batch_size=batch_size_2, shuffle=True)
test_data = TestDataset(x_test, y_test)
test_loader = DataLoader(test_data, batch_size=batch_size_2, shuffle=False)
# Construct data info
data_info = defaultdict()
data_info['subject_id'] = subject_id
data_info['times'] = x_train.shape[-1]
data_info['channels'] = x_train.shape[-2]
data_info['train_num'] = x_train.shape[0]
data_info['downstream_num'] = x_downstream.shape[0]
data_info['test_num'] = x_test.shape[0]
data_info['train_loader'] = train_loader
data_info['downstream_loader'] = downstream_loader
data_info['test_loader'] = test_loader
return data_info | 2.296875 | 2 |
vendor/github.com/tensorflow/tensorflow/tensorflow/contrib/resampler/xla/resampler_ops_xla_test.py | owennewo/kfserving | 36 | 12761187 | <reponame>owennewo/kfserving
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for resampler ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.contrib import resampler
from tensorflow.contrib.resampler.ops import gen_resampler_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ResamplerOpsTest(xla_test.XLATestCase):
def _assertForwardOpMatchesExpected(self, image_np, warp_np, expected):
with self.test_session() as sess, self.test_scope():
input_image = array_ops.placeholder(image_np.dtype)
warp = array_ops.placeholder(warp_np.dtype)
resampled = resampler.resampler(input_image, warp, name='resampler')
out = sess.run(resampled, {input_image: image_np, warp: warp_np})
self.assertAllCloseAccordingToType(
expected, out, rtol=5e-3, half_rtol=1e-2, bfloat16_rtol=3e-2)
def _assertBackwardOpMatchesExpected(self, input_np, warp_np, grad_output_np,
expected_grad_data, expected_grad_warp):
with self.cached_session() as sess, self.test_scope():
input_image = array_ops.placeholder(input_np.dtype)
warp = array_ops.placeholder(warp_np.dtype)
grad_output = array_ops.placeholder(grad_output_np.dtype)
grad_data, grad_warp = gen_resampler_ops.resampler_grad(
input_image, warp, grad_output)
grad_data_tf, grad_warp_tf = sess.run([grad_data, grad_warp], {
input_image: input_np,
warp: warp_np,
grad_output: grad_output_np
})
self.assertAllCloseAccordingToType(
expected_grad_warp, grad_warp_tf, half_rtol=1e-2, bfloat16_rtol=3e-2)
self.assertAllCloseAccordingToType(
expected_grad_data, grad_data_tf, half_rtol=1e-2, bfloat16_rtol=3e-2)
def testSimple(self):
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [0, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2]
warp_data = [0.7, 0.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[26.42]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
grad_output = np.ones([1, 1], dtype=dtype)
expected_grad_data = [[[[0.12], [0.27999997]], [[0.18000001],
[0.42000002]]]]
expected_grad_warp = [[26.60000038, 38.20000076]]
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
def testMultiChannel(self):
for dtype in self.float_types:
input_shape = [1, 2, 2, 3]
input_rgb_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
input_np = np.array(input_rgb_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2]
warp_data = [0.7, 0.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[59.58000183, 146.94000244, 107.37999725]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
grad_output = np.ones([1, 3], dtype=dtype)
expected_grad_data = [[[[0.12, 0.12, 0.12],
[0.27999997, 0.27999997, 0.27999997]],
[[0.18000001, 0.18000001, 0.18000001],
[0.42000002, 0.42000002, 0.42000002]]]]
expected_grad_warp = [[199, 30]]
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
def testBatch2Height3byWidth3RGB(self):
for dtype in self.float_types:
input_shape = [2, 3, 3, 3]
input_rgb_data = [
0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1, 30, 105, 2, 40, 115,
3, 50, 125, 4, 60, 135, 5, 70, 145, 6, 0, 5, 13, 54, 135, 226, 37, 8,
234, 90, 255, 1, 30, 105, 2, 40, 115, 3, 50, 125, 4, 60, 135, 5, 70,
145, 6
]
input_np = np.array(input_rgb_data, dtype=dtype).reshape(input_shape)
# 2 batches and 2 samples for each batch.
warp_shape = [2, 2, 2]
warp_data = [0.7, 0.6, 1, 0.7, 0.9, 1.2, 1.3, 1.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected_forward = [[[43.92, 128.4, 65.86], [37.2, 114., 69.2]],
[[40.6, 122.8, 2.5], [51., 126, 4.1]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected_forward)
expected_grad_data = [[[[0.12, 0.12, 0.12],
[0.57999998, 0.57999998, 0.57999998],
[0., 0., 0.]],
[[0.18000001, 0.18000001, 0.18000001],
[1.12, 1.12, 1.12], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0.08000001, 0.08000001, 0.08000001],
[0.99999988, 0.99999988, 0.99999988],
[0.11999997, 0.11999997, 0.11999997]],
[[0.02000001, 0.02000001, 0.02000001],
[0.60000008, 0.60000008, 0.60000008],
[0.17999998, 0.17999998, 0.17999998]]]]
expected_grad_warp = [[[33.39999008, -96.20000458], [-26.10000229,
-278.]],
[[-162.99998474, 39.99999619], [21., 63.]]]
grad_output = np.ones([2, 2, 3], dtype=dtype)
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
def testOutOfBoundWarps(self):
# (x, y) are both less than 0.
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [10, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2, 2]
warp_data = [-1, -1, 0.7, 0.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[[0.0], [27.62]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
expected_grad_data = [[[[0.12], [0.27999997]], [[0.18000001],
[0.42000002]]]]
expected_grad_warp = [[[0., 0.], [22.60000038, 35.20000076]]]
grad_output = np.ones([1, 2, 1], dtype=dtype)
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
# One of (x, y) is less than 0.
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [10, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2, 2]
# -1 is out of bound for grad_warp.
warp_data = [-1, 0.1, 0.7, 0.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[[0.0], [27.62]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
expected_grad_data = [[[[0.12], [0.27999997]], [[0.18000001],
[0.42000002]]]]
expected_grad_warp = [[[0., 0.], [22.60000038, 35.20000076]]]
grad_output = np.ones([1, 2, 1], dtype=dtype)
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
# Both of (x, y) are greater than image size.
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [10, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2, 2]
# -0.1 is *inbound* for grad_warp and grad_data, 2.1 is out of bound.
warp_data = [-0.1, 0.1, 1.2, 2.1]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[[0.0], [0.0]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
expected_grad_data = [[[[0.81], [0.0]], [[0.09], [0.0]]]]
expected_grad_warp = [[[10.30, 2.7], [0.0, 0.0]]]
grad_output = np.ones([1, 2, 1], dtype=dtype)
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
# One of (x, y) is greater than image size.
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [10, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2, 2]
warp_data = [0.1, -0.1, 1.2, 0.1]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[[0.0], [0.0]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
expected_grad_data = [[[[0.81], [0.81]], [[0.0], [0.08]]]]
expected_grad_warp = [[[-4.5, 9.5], [-9.9, 39.20]]]
grad_output = np.ones([1, 2, 1], dtype=dtype)
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
if __name__ == '__main__':
test.main()
| 2.03125 | 2 |
mmgen/datasets/quick_test_dataset.py | plutoyuxie/mmgeneration | 718 | 12761188 | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch.utils.data import Dataset
from .builder import DATASETS
@DATASETS.register_module()
class QuickTestImageDataset(Dataset):
"""Dataset for quickly testing the correctness.
Args:
size (tuple[int]): The size of the images. Defaults to `None`.
"""
def __init__(self, *args, size=None, **kwargs):
super().__init__()
self.size = size
self.img_tensor = torch.randn(3, self.size[0], self.size[1])
def __len__(self):
return 10000
def __getitem__(self, idx):
return dict(real_img=self.img_tensor)
| 2.484375 | 2 |
apps/forms.py | LishenZz/my_project | 0 | 12761189 | #Author:<NAME>
#定义form公共类
class FormMixin(object):
#提取表单验证失败的错误信息
def get_errors(self):
#判断object对象中是否存在'errors'属性
if hasattr(self,'errors'):
errors = self.errors.get_json_data()
#新建一个字典用来装载提取出来的错误信息
new_errors={}
#遍历每个错误信息,因为errors是字典形式
for key,message_dicts in errors.items():
#再建一个列表
messages=[]
#因为message_dicts也是一个字典形式
for message in message_dicts:
#将message_dicts里面的value值加到新建立的列表中
messages.append(message['message'])
new_errors[key]=messages
return new_errors
else:
return {} | 2.703125 | 3 |
valid-parantheses.py | ahmetavc/leetcoding | 0 | 12761190 | # https://leetcode.com/problems/valid-parentheses/
class Solution:
def isValid(self, s: str) -> bool:
stack = []
for c in s:
if c == ')':
if len(stack) > 0 and stack[-1] == '(':
stack.pop()
else:
return False
elif c =='}':
if len(stack) > 0 and stack[-1] == '{':
stack.pop()
else:
return False
elif c == ']':
if len(stack) > 0 and stack[-1] == '[':
stack.pop()
else:
return False
else:
stack.append(c)
if len(stack) != 0:
return False
return True
| 3.578125 | 4 |
misopy/parse_gene.py | michalkouril/altanalyze | 73 | 12761191 | <gh_stars>10-100
import os
import misopy
import misopy.gff_utils as gff_utils
def parseGene(pickle_filename, event):
"""
Parse a pickled gene.
"""
if not os.path.isfile(pickle_filename):
raise Exception, "Error: no filename %s" %(pickle_filename)
gff_genes = gff_utils.load_indexed_gff_file(pickle_filename)
if gff_genes == None:
raise Exception, "Error: could not load genes from %s" \
%(pickle_filename)
exon_starts = []
exon_ends = []
mRNAs = []
chrom = None
for gene_id, gene_info in gff_genes.iteritems():
if event == gene_id:
gene_obj = gene_info['gene_object']
gene_hierarchy = gene_info['hierarchy']
tx_start, tx_end = gff_utils.get_inclusive_txn_bounds(\
gene_hierarchy[gene_id])
chrom = gene_obj.chrom
for mRNA_id, mRNA_info in gene_hierarchy[gene_id]['mRNAs'].iteritems():
mRNA = []
for exon_id, exon_info in gene_hierarchy[gene_id]['mRNAs']\
[mRNA_id]['exons'].\
iteritems():
exon_rec = gene_hierarchy[gene_id]['mRNAs']\
[mRNA_id]['exons'][exon_id]['record']
strand = exon_rec.strand
exon_starts.append(exon_rec.start)
exon_ends.append(exon_rec.end)
mRNA.append(sorted([exon_rec.start, exon_rec.end]))
mRNAs.append(mRNA)
break
mRNAs.sort(key=len)
return tx_start, tx_end, exon_starts, exon_ends, gene_obj, \
mRNAs, strand, chrom
| 2.515625 | 3 |
watcher/tests/api/v1/test_data_model.py | limin0801/watcher | 0 | 12761192 | # Copyright 2019 ZTE corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_serialization import jsonutils
from watcher.decision_engine import rpcapi as deapi
from watcher.tests.api import base as api_base
class TestListDataModel(api_base.FunctionalTest):
def setUp(self):
super(TestListDataModel, self).setUp()
p_dcapi = mock.patch.object(deapi, 'DecisionEngineAPI')
self.mock_dcapi = p_dcapi.start()
self.mock_dcapi().get_data_model_info.return_value = \
'fake_response_value'
self.addCleanup(p_dcapi.stop)
def test_get_all(self):
response = self.get_json(
'/data_model/?data_model_type=compute',
headers={'OpenStack-API-Version': 'infra-optim 1.3'})
self.assertEqual('fake_response_value', response)
def test_get_all_not_acceptable(self):
response = self.get_json(
'/data_model/?data_model_type=compute',
headers={'OpenStack-API-Version': 'infra-optim 1.2'},
expect_errors=True)
self.assertEqual(406, response.status_int)
class TestDataModelPolicyEnforcement(api_base.FunctionalTest):
def setUp(self):
super(TestDataModelPolicyEnforcement, self).setUp()
p_dcapi = mock.patch.object(deapi, 'DecisionEngineAPI')
self.mock_dcapi = p_dcapi.start()
self.addCleanup(p_dcapi.stop)
def _common_policy_check(self, rule, func, *arg, **kwarg):
self.policy.set_rules({
"admin_api": "(role:admin or role:administrator)",
"default": "rule:admin_api",
rule: "rule:defaut"})
response = func(*arg, **kwarg)
self.assertEqual(403, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
jsonutils.loads(response.json['error_message'])['faultstring'])
def test_policy_disallow_get_all(self):
self._common_policy_check(
"data_model:get_all", self.get_json,
"/data_model/?data_model_type=compute",
headers={'OpenStack-API-Version': 'infra-optim 1.3'},
expect_errors=True)
class TestDataModelEnforcementWithAdminContext(
TestListDataModel, api_base.AdminRoleTest):
def setUp(self):
super(TestDataModelEnforcementWithAdminContext, self).setUp()
self.policy.set_rules({
"admin_api": "(role:admin or role:administrator)",
"default": "rule:admin_api",
"data_model:get_all": "rule:default"})
| 1.585938 | 2 |
selfdrive/car/hyundai/hda2can.py | boxkon/openpilot | 1 | 12761193 | <gh_stars>1-10
def create_lkas(packer, enabled, frame, lat_active, apply_steer):
values = {
"LKA_MODE": 2,
"LKA_ICON": 2 if enabled else 1,
"TORQUE_REQUEST": apply_steer,
"LKA_ASSIST": 1 if lat_active else 0,
"STEER_REQ": 1 if lat_active else 0,
"STEER_MODE": 0,
"SET_ME_1": 0,
"NEW_SIGNAL_1": 0,
"NEW_SIGNAL_2": 0,
}
return packer.make_can_msg("LKAS", 4, values, frame % 255)
def create_buttons(packer, cnt, cancel, resume):
values = {
"_COUNTER": cnt % 0xf,
"SET_ME_1": 1,
"DISTANCE_BTN": 1 if resume else 0,
"PAUSE_RESUME_BTN": 1 if cancel else 0,
}
return packer.make_can_msg("CRUISE_BUTTONS", 5, values) | 2.234375 | 2 |
eahub/profiles/index.py | walambert/eahub.org | 36 | 12761194 | from algoliasearch_django import AlgoliaIndex
from algoliasearch_django.decorators import register
from django.conf import settings
from eahub.profiles.models import Profile, ProfileTag
if settings.IS_ENABLE_ALGOLIA:
class ProfilePublicIndex(AlgoliaIndex):
index_name = settings.ALGOLIA["INDEX_NAME_PROFILES_PUBLIC"]
should_index = "is_searchable_public"
fields = [
"job_title",
["get_full_name", "name"],
["get_messaging_url_if_can_receive_message", "messaging_url"],
"summary",
["get_tags_speech_topic_formatted", "speech_topics"],
"topics_i_speak_about",
"expertise_areas_other",
"cause_areas_other",
["get_absolute_url", "url"],
["get_image_url", "image"],
"personal_website_url",
"facebook_url",
"linkedin_url",
"available_as_speaker",
"available_to_volunteer",
"open_to_job_offers",
"is_organiser",
"city_or_town",
"country",
"lon",
"lat",
["get_tags_generic_formatted", "tags"],
["get_tags_affiliation_formatted", "affiliations"],
["get_tags_cause_area_formatted", "cause_areas"],
["get_tags_cause_area_expertise_formatted", "cause_areas_expertise"],
["get_tags_expertise_formatted", "expertise"],
["get_tags_career_interest_formatted", "career_interest_areas"],
["get_tags_pledge_formatted", "giving_pledges"],
["get_tags_event_attended_formatted", "events_attended"],
[
"get_tags_organisational_affiliation_formatted",
"organisational_affiliations",
],
["get_local_groups_formatted", "local_groups"],
["get_organizer_of_local_groups_formatted", "organizer_of_local_groups"],
["offering", "offering"],
["looking_for", "looking_for"],
]
class ProfileInternalIndex(AlgoliaIndex):
index_name = settings.ALGOLIA["INDEX_NAME_PROFILES_INTERNAL"]
should_index = "is_searchable_internal"
fields = ProfilePublicIndex.fields
@register(Profile)
class ProfileMetaIndex(AlgoliaIndex):
# noinspection PyShadowingNames,PyMissingConstructor
def __init__(self, model, client, settings):
self.indices = [
ProfilePublicIndex(model, client, settings),
ProfileInternalIndex(model, client, settings),
]
def raw_search(self, query="", params=None):
res = {}
for index in self.indices:
res[index.name] = index.raw_search(query, params)
return res
def update_records(self, qs, batch_size=1000, **kwargs):
for index in self.indices:
index.update_records(qs, batch_size, **kwargs)
def reindex_all(self, batch_size=1000):
for index in self.indices:
index.reindex_all(batch_size)
def set_settings(self):
for index in self.indices:
index.set_settings()
def clear_index(self):
for index in self.indices:
index.clear_index()
def save_record(self, instance, update_fields=None, **kwargs):
for index in self.indices:
index.save_record(instance, update_fields, **kwargs)
def delete_record(self, instance):
for index in self.indices:
index.delete_record(instance)
@register(ProfileTag)
class ProfileTagIndex(AlgoliaIndex):
index_name = settings.ALGOLIA["INDEX_NAME_TAGS"]
fields = [
"name",
"description",
"synonyms",
["get_types_formatted", "types"],
"created_at",
"status",
"is_featured",
"count",
]
| 1.695313 | 2 |
users/models.py | bycristhian/vurapy | 0 | 12761195 |
# Django
from django.db import models
# Models
from django.contrib.auth.models import User
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')
biography = models.CharField(max_length=160, blank=True, null=True)
follows = models.ManyToManyField(User, related_name='follows_user')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return "{} by {}".format(self.biography, self.user)
| 2.53125 | 3 |
app/mailgun.py | taeram/stratagem | 0 | 12761196 | from app import app
from flask import url_for
from flask.ext.script import Manager, prompt
from database import db, \
Domain
import requests
manager = Manager(usage="Manage Mailgun setup")
@manager.command
def setup():
"""Setup a new domain in Mailgun"""
stratagem_domain_name = prompt("What is the domain name you where Stratagem is hosted? [https://example.herokuapp.com]")
domain_name = prompt("What is a domain name you want to use with Stratagem? [example.com]")
# Lookup the domain name
domain = db.session.query(Domain).\
filter(Domain.name == domain_name).\
first()
# Add the domain name if it doesn't exist
if domain is None:
domain = Domain(name=domain_name)
db.session.add(domain)
db.session.commit()
email_destination = prompt("Where should we forward whitelisted emails? [<EMAIL>]")
# Add the route to Mailgun for this domain name
url = "%s/routes" % app.config['MAILGUN_API_URL']
auth = ('api', app.config['MAILGUN_API_KEY'])
params = {
"priority": 50,
"expression": 'match_recipient(".*\.[a-z0-9]{%s}@%s")' % (app.config['LOCAL_PART_HASH_LENGTH'], domain.name),
"action": [
'forward("%s/email/")' % stratagem_domain_name,
'forward("%s")' % email_destination,
'stop()'
]
}
r = requests.post(url, params=params, auth=auth)
if r.status_code > 200:
raise Exception(r.text)
else:
print r.text
def mailgun_explicit_whitelist(email_address, email_destination):
# Add the route to Mailgun for this email address
url = "%s/routes" % app.config['MAILGUN_API_URL']
auth = ('api', app.config['MAILGUN_API_KEY'])
params = {
"priority": 49,
"expression": 'match_recipient("%s")' % (email_address),
"action": [
'forward("%s")' % url_for("message_collection", _external=True),
'forward("%s")' % email_destination,
'stop()'
]
}
r = requests.post(url, params=params, auth=auth)
if r.status_code > 200:
return False
return True
| 3.015625 | 3 |
akshare/crypto/crypto_hist_investing.py | NovelResearchInvestment/akshare | 0 | 12761197 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/5/11 17:52
Desc: 加密货币
https://cn.investing.com/crypto/currencies
高频数据
https://bitcoincharts.com/about/markets-api/
"""
import math
import pandas as pd
import requests
from tqdm import tqdm
from akshare.datasets import get_crypto_info_csv
def crypto_name_url_table(symbol: str = "web") -> pd.DataFrame:
"""
加密货币名称、代码和 ID,每次更新较慢
https://cn.investing.com/crypto/ethereum/historical-data
:param symbol: choice of {"web", "local"}; web 表示从网页获取最新,local 表示利用本地本文件
:type symbol: str
:return: 加密货币名称、代码和 ID
:rtype: pandas.DataFrame
"""
if symbol == "web":
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
url = "https://cn.investing.com/crypto/Service/LoadCryptoCurrencies"
payload = {
'draw': '14',
'columns[0][data]': 'currencies_order',
'columns[0][name]': 'currencies_order',
'columns[0][searchable]': 'true',
'columns[0][orderable]': 'true',
'columns[0][search][value]': '',
'columns[0][search][regex]': 'false',
'columns[1][data]': 'function',
'columns[1][name]': 'crypto_id',
'columns[1][searchable]': 'true',
'columns[1][orderable]': 'false',
'columns[1][search][value]': '',
'columns[1][search][regex]': 'false',
'columns[2][data]': 'function',
'columns[2][name]': 'name',
'columns[2][searchable]': 'true',
'columns[2][orderable]': 'true',
'columns[2][search][value]': '',
'columns[2][search][regex]': 'false',
'columns[3][data]': 'symbol',
'columns[3][name]': 'symbol',
'columns[3][searchable]': 'true',
'columns[3][orderable]': 'true',
'columns[3][search][value]': '',
'columns[3][search][regex]': 'false',
'columns[4][data]': 'function',
'columns[4][name]': 'price_usd',
'columns[4][searchable]': 'true',
'columns[4][orderable]': 'true',
'columns[4][search][value]': '',
'columns[4][search][regex]': 'false',
'columns[5][data]': 'market_cap_formatted',
'columns[5][name]': 'market_cap_usd',
'columns[5][searchable]': 'true',
'columns[5][orderable]': 'true',
'columns[5][search][value]': '',
'columns[5][search][regex]': 'false',
'columns[6][data]': '24h_volume_formatted',
'columns[6][name]': '24h_volume_usd',
'columns[6][searchable]': 'true',
'columns[6][orderable]': 'true',
'columns[6][search][value]': '',
'columns[6][search][regex]': 'false',
'columns[7][data]': 'total_volume',
'columns[7][name]': 'total_volume',
'columns[7][searchable]': 'true',
'columns[7][orderable]': 'true',
'columns[7][search][value]': '',
'columns[7][search][regex]': 'false',
'columns[8][data]': 'change_percent_formatted',
'columns[8][name]': 'change_percent',
'columns[8][searchable]': 'true',
'columns[8][orderable]': 'true',
'columns[8][search][value]': '',
'columns[8][search][regex]': 'false',
'columns[9][data]': 'percent_change_7d_formatted',
'columns[9][name]': 'percent_change_7d',
'columns[9][searchable]': 'true',
'columns[9][orderable]': 'true',
'columns[9][search][value]': '',
'columns[9][search][regex]': 'false',
'order[0][column]': 'currencies_order',
'order[0][dir]': 'asc',
'start': '0',
'length': '100',
'search[value]': '',
'search[regex]': 'false',
'currencyId': '12',
}
r = requests.post(url, data=payload, headers=headers)
data_json = r.json()
total_page = math.ceil(int(data_json['recordsTotal']) / 100)
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page+1), leave=False):
payload.update({
"start": (page-1)*100,
'length': 100
})
r = requests.post(url, data=payload, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data'])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df = big_df[[
'symbol',
'name',
'name_trans',
'sml_id',
'related_pair_ID',
]]
return big_df
else:
get_crypto_info_csv_path = get_crypto_info_csv()
name_url_df = pd.read_csv(get_crypto_info_csv_path)
return name_url_df
def crypto_hist(
symbol: str = "BTC",
period: str = "每日",
start_date: str = "20191020",
end_date: str = "20201020",
):
"""
加密货币历史数据
https://cn.investing.com/crypto/ethereum/historical-data
:param symbol: 货币名称
:type symbol: str
:param period: choice of {"每日", "每周", "每月"}
:type period: str
:param start_date: '20151020', 注意格式
:type start_date: str
:param end_date: '20201020', 注意格式
:type end_date: str
:return: 加密货币历史数据获取
:rtype: pandas.DataFrame
"""
import warnings
warnings.filterwarnings('ignore')
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
period_map = {"每日": "Daily", "每周": "Weekly", "每月": "Monthly"}
start_date = "/".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "/".join([end_date[:4], end_date[4:6], end_date[6:]])
name_url_df = crypto_name_url_table(symbol='local')
curr_id = name_url_df[name_url_df["symbol"] == symbol]["related_pair_ID"].values[0]
sml_id = name_url_df[name_url_df["symbol"] == symbol]["sml_id"].values[0]
url = "https://cn.investing.com/instruments/HistoricalDataAjax"
payload = {
"curr_id": curr_id,
"smlID": sml_id,
"header": "null",
"st_date": start_date,
"end_date": end_date,
"interval_sec": period_map[period],
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data",
}
r = requests.post(url, data=payload, headers=headers)
temp_df = pd.read_html(r.text)[0]
df_data = temp_df.copy()
if period == "每月":
df_data.index = pd.to_datetime(df_data["日期"], format="%Y年%m月")
else:
df_data.index = pd.to_datetime(df_data["日期"], format="%Y年%m月%d日")
if any(df_data["交易量"].astype(str).str.contains("-")):
df_data["交易量"][df_data["交易量"].str.contains("-")] = df_data["交易量"][
df_data["交易量"].str.contains("-")
].replace("-", 0)
if any(df_data["交易量"].astype(str).str.contains("B")):
df_data["交易量"][df_data["交易量"].str.contains("B").fillna(False)] = (
df_data["交易量"][df_data["交易量"].str.contains("B").fillna(False)]
.str.replace("B", "")
.str.replace(",", "")
.astype(float)
* 1000000000
)
if any(df_data["交易量"].astype(str).str.contains("M")):
df_data["交易量"][df_data["交易量"].str.contains("M").fillna(False)] = (
df_data["交易量"][df_data["交易量"].str.contains("M").fillna(False)]
.str.replace("M", "")
.str.replace(",", "")
.astype(float)
* 1000000
)
if any(df_data["交易量"].astype(str).str.contains("K")):
df_data["交易量"][df_data["交易量"].str.contains("K").fillna(False)] = (
df_data["交易量"][df_data["交易量"].str.contains("K").fillna(False)]
.str.replace("K", "")
.str.replace(",", "")
.astype(float)
* 1000
)
df_data["交易量"] = df_data["交易量"].astype(float)
df_data["涨跌幅"] = pd.DataFrame(
round(
df_data["涨跌幅"].str.replace(",", "").str.replace("%", "").astype(float)
/ 100,
6,
)
)
del df_data["日期"]
df_data.reset_index(inplace=True)
df_data = df_data[[
"日期",
"收盘",
"开盘",
"高",
"低",
"交易量",
"涨跌幅",
]]
df_data['日期'] = pd.to_datetime(df_data['日期']).dt.date
df_data['收盘'] = pd.to_numeric(df_data['收盘'])
df_data['开盘'] = pd.to_numeric(df_data['开盘'])
df_data['高'] = pd.to_numeric(df_data['高'])
df_data['低'] = pd.to_numeric(df_data['低'])
df_data['交易量'] = pd.to_numeric(df_data['交易量'])
df_data['涨跌幅'] = pd.to_numeric(df_data['涨跌幅'])
df_data.sort_values('日期', inplace=True)
df_data.reset_index(inplace=True, drop=True)
return df_data
if __name__ == "__main__":
crypto_name_url_table_df = crypto_name_url_table(symbol="local")
print(crypto_name_url_table_df)
crypto_hist_df = crypto_hist(
symbol="BTC", period="每日", start_date="20151020", end_date="20220511"
)
print(crypto_hist_df)
| 2.21875 | 2 |
src/7/7581.py | youngdaLee/Baekjoon | 11 | 12761198 | <filename>src/7/7581.py
"""
7581. Cuboids
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 92 ms
해결 날짜: 2020년 9월 14일
"""
def main():
while True:
l, w, h, v = map(int, input().split())
if all(x == 0 for x in [l, w, h, v]): break
elif l == 0: l = v // w // h
elif w == 0: w = v // l // h
elif h == 0: h = v // l // w
elif v == 0: v = l * w * h
print(l, w, h, v, sep=' ')
if __name__ == '__main__':
main()
| 3.21875 | 3 |
bin/nexus_from_genotypes.py | mgharvey/misc_python | 9 | 12761199 | #!/usr/bin/env python
"""
Name: nexus_from_genotype_probabilities.py
Author: <NAME>
Date: 10 July 2013
Convert genotype probabilities file output by Tom White's post-UNEAK processing scripts to nexus
alignment of concatenated SNPs
Usage: python nexus_from_genotype_probabilities.py in_file out_file sample_size
Ex.: python nexus_from_genotype_probabilities.py HapMapPairedFilt_1.txt HapMapPairedFilt_1.nex 73
"""
import os
import sys
import argparse
import csv
import numpy
def get_args():
parser = argparse.ArgumentParser(
description="""Program description""")
parser.add_argument(
"in_file",
type=str,
help="""The input genotype probabilities file from Tom White's scripts"""
)
parser.add_argument(
"out_file",
type=str,
help="""The file name"""
)
parser.add_argument(
"sample_size",
type=str,
help="""The number of samples/individuals in file"""
)
return parser.parse_args()
def read_samples(infile, sample_size):
samples = list()
first_line = infile.readline()
parts = first_line.split()
for i in range(int(sample_size)):
parts2 = parts[4+i]
parts3 = parts2.split('_')
samples.append(parts3[0])
return samples
def unphase(infile, sample_size):
array = list()
for line in infile:
parts = line.split()
alleles = str(parts[3]).split('/')
a1 = alleles[0]
a2 = alleles[1]
seq = list()
for i in range(int(sample_size)):
if parts[4+i] == "NA":
seq.append("N")
else:
alsp = parts[4+i]
als = alsp.split(',')
if a1 == "A":
if a2 == "C":
if als[0] == "1":
if als[1] == "1":
seq.append("A")
elif als[1] == "2":
seq.append("M")
elif als[0] == "2":
if als[1] == "2":
seq.append("C")
elif als[1] == "1":
seq.append("M")
if a2 == "G":
if als[0] == "1":
if als[1] == "1":
seq.append("A")
elif als[1] == "2":
seq.append("R")
elif als[0] == "2":
if als[1] == "2":
seq.append("G")
elif als[1] == "1":
seq.append("R")
if a2 == "T":
if als[0] == "1":
if als[1] == "1":
seq.append("A")
elif als[1] == "2":
seq.append("W")
elif als[0] == "2":
if als[1] == "2":
seq.append("T")
elif als[1] == "1":
seq.append("W")
if a1 == "C":
if a2 == "A":
if als[0] == "1":
if als[1] == "1":
seq.append("C")
elif als[1] == "2":
seq.append("M")
elif als[0] == "2":
if als[1] == "2":
seq.append("A")
elif als[1] == "1":
seq.append("M")
if a2 == "G":
if als[0] == "1":
if als[1] == "1":
seq.append("C")
elif als[1] == "2":
seq.append("S")
elif als[0] == "2":
if als[1] == "2":
seq.append("G")
elif als[1] == "1":
seq.append("S")
if a2 == "T":
if als[0] == "1":
if als[1] == "1":
seq.append("C")
elif als[1] == "2":
seq.append("Y")
elif als[0] == "2":
if als[1] == "2":
seq.append("T")
elif als[1] == "1":
seq.append("Y")
if a1 == "G":
if a2 == "A":
if als[0] == "1":
if als[1] == "1":
seq.append("G")
elif als[1] == "2":
seq.append("R")
elif als[0] == "2":
if als[1] == "2":
seq.append("A")
elif als[1] == "1":
seq.append("R")
if a2 == "C":
if als[0] == "1":
if als[1] == "1":
seq.append("G")
elif als[1] == "2":
seq.append("S")
elif als[0] == "2":
if als[1] == "2":
seq.append("C")
elif als[1] == "1":
seq.append("S")
if a2 == "T":
if als[0] == "1":
if als[1] == "1":
seq.append("G")
elif als[1] == "2":
seq.append("K")
elif als[0] == "2":
if als[1] == "2":
seq.append("T")
elif als[1] == "1":
seq.append("K")
if a1 == "T":
if a2 == "A":
if als[0] == "1":
if als[1] == "1":
seq.append("T")
elif als[1] == "2":
seq.append("W")
elif als[0] == "2":
if als[1] == "2":
seq.append("A")
elif als[1] == "1":
seq.append("W")
if a2 == "C":
if als[0] == "1":
if als[1] == "1":
seq.append("T")
elif als[1] == "2":
seq.append("Y")
elif als[0] == "2":
if als[1] == "2":
seq.append("C")
elif als[1] == "1":
seq.append("Y")
if a2 == "G":
if als[0] == "1":
if als[1] == "1":
seq.append("T")
elif als[1] == "2":
seq.append("K")
elif als[0] == "2":
if als[1] == "2":
seq.append("G")
elif als[1] == "1":
seq.append("K")
array.append(seq)
return array
def main():
args = get_args()
infile = open("{0}".format(args.in_file), 'r')
outfile = open("{0}".format(args.out_file), 'wb')
samples = read_samples(infile, args.sample_size)
array = unphase(infile, args.sample_size)
alignment = zip(*array)
outfile.write("#NEXUS\n")
outfile.write("begin data;\n")
outfile.write("\tdimensions ntax={0} nchar={1};\n".format(args.sample_size, len(alignment[0])))
outfile.write("\tformat datatype=dna missing=? gap=-;\n")
outfile.write("\tmatrix\n")
i = 0
for sample in samples:
outfile.write("{0}\t{1}\n".format(sample, ''.join(alignment[i])))
i += 1
outfile.write(";\n")
outfile.write("end;\n")
infile.close()
outfile.close()
if __name__ == '__main__':
main() | 2.90625 | 3 |
example/example.py | knil-sama/aiosql | 1 | 12761200 | <reponame>knil-sama/aiosql
import argparse
import sqlite3
from datetime import datetime
from pathlib import Path
from typing import NamedTuple
import aiosql
class User(NamedTuple):
userid: int
username: str
firstname: str
lastname: str
class UserBlog(NamedTuple):
blogid: int
author: str
title: str
published: datetime
def __post_init__(self):
self.published = datetime.strptime(self.publised, "%Y-%m-%d %H:%M")
dir_path = Path(__file__).parent
sql_path = dir_path / "sql"
db_path = dir_path / "exampleblog.db"
queries = aiosql.from_path(
dir_path / "sql", "sqlite3", record_classes={"User": User, "UserBlog": UserBlog}
)
users = [("bobsmith", "Bob", "Smith"), ("johndoe", "John", "Doe"), ("janedoe", "Jane", "Doe")]
blogs = [
(
1,
"What I did Today",
"""\
I mowed the lawn, washed some clothes, and ate a burger.
Until next time,
Bob""",
"2017-07-28",
),
(
3,
"Testing",
"""\
Is this thing on?
""",
"2018-01-01",
),
(
1,
"How to make a pie.",
"""\
1. Make crust
2. Fill
3. Bake
4. Eat
""",
"2018-11-23",
),
]
def createdb():
conn = sqlite3.connect(dir_path / "exampleblog.db")
print("Inserting users and blogs data.")
with conn:
queries.create_schema(conn)
queries.users.insert_many(conn, users)
queries.blogs.insert_many(conn, blogs)
print("Done!")
conn.close()
def deletedb():
print(f"deleting the {db_path} file")
if db_path.exists():
db_path.unlink()
def get_users():
conn = sqlite3.connect(dir_path / "exampleblog.db")
for user in queries.users.get_all(conn):
print(user)
def get_user_blogs(username):
conn = sqlite3.connect(dir_path / "exampleblog.db")
user_blogs = queries.blogs.get_user_blogs(conn, username=username)
for user_blog in user_blogs:
print("------")
print(f'"{user_blog.title}"')
print(f"by {user_blog.author} at {user_blog.published}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
createdb_parser = subparsers.add_parser("createdb")
createdb_parser.set_defaults(cmd=createdb)
deletedb_parser = subparsers.add_parser("deletedb")
deletedb_parser.set_defaults(cmd=deletedb)
get_users_parser = subparsers.add_parser("get-users")
get_users_parser.set_defaults(cmd=get_users)
get_user_blogs_parser = subparsers.add_parser("get-user-blogs")
get_user_blogs_parser.add_argument("username")
get_user_blogs_parser.set_defaults(cmd=get_user_blogs)
args = parser.parse_args()
cmd_kwargs = {k: v for k, v in vars(args).items() if k != "cmd"}
args.cmd(**cmd_kwargs)
| 2.828125 | 3 |
afk-q-babyai/babyai/info_seeking/knowledge_graph.py | IouJenLiu/AFK | 1 | 12761201 | import re
from sent2vec.vectorizer import Vectorizer
from scipy import spatial
class KG(object):
def __init__(self, mode='graph_overlap', n_gram=2):
self.mode = mode
self.graph = {}
self.set = set()
self.instr_node = None
self.instr_CC_size = 0
self.vectorizer = Vectorizer()
if self.mode == 'graph_overlap':
self.related_nodes_fn = self.get_all_related_nodes
elif self.mode == 'graph_cosine':
self.related_nodes_fn = self.get_all_related_nodes_cosine
self.n_gram = n_gram
def update(self, node):
node1 = tuple(node)
increased = False
if self.mode == 'graph_overlap' or self.mode == 'graph_cosine':
max_overlap = 0
if node1 not in self.graph:
related_nodes, max_overlap = self.related_nodes_fn(node1)
self.graph[node1] = []
for n in related_nodes:
self.graph[n].append(node1)
self.graph[node1].append(n)
increased = False
if len(self.getCC()) > self.instr_CC_size:
self.instr_CC_size += 1
increased = True
elif self.mode == 'set':
max_overlap = 0
if node1 not in self.set:
self.set.add(node1)
increased = True
max_overlap = 1
return increased, max_overlap
def is_related(self, node1, node2):
# simple heuristic, need update
for token in node1:
if token in node2:
return True
return False
def pre_process_n_gram(self, node1, n_gram):
if n_gram <= 1:
return node1
p_node1 = []
for i in range(len(node1) - n_gram + 1):
n_gram_phrase = node1[i]
for k in range(1, n_gram):
if i + k >= len(node1):
break
n_gram_phrase += " " + node1[i + k]
p_node1.append(n_gram_phrase)
return p_node1
def n_overlap(self, node1, node2, n_gram=2):
p_node1 = self.pre_process_n_gram(node1, n_gram)
p_node2 = self.pre_process_n_gram(node2, n_gram)
n = 0
for token in p_node1:
if token in p_node2:
n += 1
return n
def get_all_related_nodes(self, node1):
related_nodes = []
max_overlap = 0
for node2 in self.graph:
if node2 == node1: continue
n_overlap = self.n_overlap(node1, node2, n_gram=self.n_gram)
if n_overlap > 0:
related_nodes.append(node2)
max_overlap = max(max_overlap, n_overlap)
return related_nodes, max_overlap
def get_all_related_nodes_cosine(self, node1):
related_nodes = []
max_overlap = 0
n1 = node1[0]
for s in node1[1:]:
n1 = n1 + " " + s
for node2 in self.graph:
if node2 == node1: continue
n2 = node2[0]
for s in node2[1:]:
n2 = n2 + " " + s
self.vectorizer.bert([n1, n2])
vectors = self.vectorizer.vectors
n_overlap = spatial.distance.cosine(vectors[0], vectors[1])
if n_overlap > 0.01:
related_nodes.append(node2)
max_overlap = max(max_overlap, n_overlap)
return related_nodes, max_overlap
def getCC(self):
"""
:return: list of tuples which represents the connected component that contains the instr node.
Ex. [('go', 'to', 'jack', 'favorite toy'), ('blue ball', 'room0')]
"""
if self.mode == 'graph_overlap' or self.mode == 'graph_cosine':
def bfs(node, graph):
res = []
visited = set()
q = [node]
visited.add(node)
while q:
v = q.pop(0)
res.append(v)
for n in graph[v]:
if n not in visited:
q.append(n)
visited.add(n)
return res
return bfs(self.instr_node, self.graph)
else:
return list(self.set)
def reset(self, node):
# add none to instruction node of KG for no adj query
# TODO: ugly, may need refactor
self.instr_node = tuple(node)
self.graph = {self.instr_node: []}
self.set = set()
self.set.add(self.instr_node)
self.instr_CC_size = len(self.graph[self.instr_node]) + 1
def __repr__(self):
if self.mode == 'graph_overlap' or self.mode == 'graph_cosine':
ret = ""
for k, v in self.graph.items():
ret += str(k) + ": " + str(v) + "\n"
return ret
else:
return str(self.set)
| 2.53125 | 3 |
object_database/proxy_server.py | APrioriInvestments/object_database | 2 | 12761202 | # Copyright 2017-2020 object_database Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ProxyServer
Models a Server that sits on top of another Server, and acts to pool subscription
requests. This makes it possible for several connections that all tend to
subscribe to the same schemas or types (say, the list of hosts) to share
connect to the same proxy, and only place the load of one connection on the
core server.
Usually, we'll put one ProxyServer per physical host, at least when
we are using larger hosts. This way, the centeral server doesn't get
overloaded trying to write connection data out to all the connections.
"""
import logging
import threading
import uuid
from typed_python import (
OneOf,
NamedTuple,
Dict,
Set,
Tuple,
ConstDict,
makeNamedTuple,
TupleOf,
ListOf,
deserialize,
)
from .channel import ServerToClientChannel, ClientToServerChannel
from .messages import ServerToClient, ClientToServer
from .server import ObjectBase
from .schema import (
IndexValue,
FieldId,
FieldDefinition,
ObjectId,
ObjectFieldId,
SchemaDefinition,
TypeDefinition,
IndexId,
)
SubscriptionKey = NamedTuple(
schema=str,
typename=str,
fieldname_and_value=OneOf(None, Tuple(str, IndexValue)),
isLazy=bool,
)
# recall these definitions, included here for reference
# ObjectId = int
# FieldId = int
# ObjectFieldId = NamedTuple(objId=int, fieldId=int, isIndexValue=bool)
# IndexValue = bytes
# IndexId = NamedTuple(fieldId=int, indexValue=IndexValue)
# TypeDefinition = NamedTuple(fields=TupleOf(str), indices=TupleOf(str))
# SchemaDefinition = ConstDict(str, TypeDefinition)
# FieldDefinition = NamedTuple(schema=str, typename=str, fieldname=str)
def mergeTypeDefinition(typedef1, typedef2):
return TypeDefinition(
fields=typedef1.fields + [x for x in typedef2.fields if x not in typedef1.fields],
indices=typedef1.indices + [x for x in typedef2.indices if x not in typedef1.indices],
)
def mergeSchemaDefinitions(schemaDef1, schemaDef2):
out = dict(schemaDef1)
for typename, typedef in schemaDef2.items():
if typename not in out:
out[typename] = typedef
else:
out[typename] = mergeTypeDefinition(out[typename], typedef)
return SchemaDefinition(out)
class FieldIdToDefMapping:
def __init__(self):
self.fieldIdToDef = {}
self.fieldDefToId = {}
def addFieldMapping(self, fieldId: FieldId, fieldDef: FieldDefinition):
self.fieldIdToDef[fieldId] = fieldDef
self.fieldDefToId[fieldDef] = fieldId
class SubscriptionState:
def __init__(self):
# for each fieldId, the set of channels subscribed to it and vice versa
# this is what we use to determine which channels are subscribed
self.fieldIdToSubscribedChannels = Dict(FieldId, Set(ServerToClientChannel))()
self.channelToSubscribedFieldIds = Dict(ServerToClientChannel, Set(FieldId))()
self.indexIdToSubscribedChannels = Dict(IndexId, Set(ServerToClientChannel))()
self.channelToSubscribedIndexIds = Dict(ServerToClientChannel, Set(IndexId))()
self.channelToSubscribedOids = Dict(ServerToClientChannel, Set(ObjectId))()
self.oidToSubscribedChannels = Dict(ObjectId, Set(ServerToClientChannel))()
self.channelToLazilySubscribedFieldIds = Dict(ServerToClientChannel, Set(FieldId))()
self.channelToLazilySubscribedIndexIds = Dict(ServerToClientChannel, Set(IndexId))()
# the definition of each schema as we know it
self.schemaDefs = Dict(str, ConstDict(FieldDefinition, FieldId))()
# the schemas we've actually defined on the server
# map from name to SchemaDefinition
self._definedSchemas = Dict(str, SchemaDefinition)()
# map from schema -> typename -> fieldname -> fieldId
self.schemaTypeAndNameToFieldId = Dict(str, Dict(str, Dict(str, int)))()
self.fieldIdToDef = Dict(int, FieldDefinition)()
# mapping between a channel and its subscriptions
self.channelSubscriptions = Dict(ServerToClientChannel, Set(SubscriptionKey))()
# subscriptions pending a schema/typname being fully subscribed
self.channelToPendingSubscriptions = Dict(
ServerToClientChannel, Set(SubscriptionKey)
)()
self.channelToPendingTransactions = Dict(
ServerToClientChannel, ListOf(ClientToServer)
)()
self.subscriptionsPendingSubscriptionOnServer = Dict(
# schema and typename
Tuple(str, str),
Set(Tuple(ServerToClientChannel, SubscriptionKey)),
)()
# the current top transaction we've ever seen.
self.transactionId = -1
# set of schema/typename for which we have complete subscriptions
self.completedTypes = Set(NamedTuple(schema=str, typename=str))()
# the state of our subscriptions
self.objectValues = Dict(FieldId, Dict(ObjectId, bytes))()
self.indexValues = Dict(FieldId, Dict(ObjectId, IndexValue))()
self.reverseIndexValues = Dict(FieldId, Dict(IndexValue, Set(ObjectId)))()
def dropConnection(self, channel: ServerToClientChannel):
if channel in self.channelToSubscribedFieldIds:
for fieldId in self.channelToSubscribedFieldIds[channel]:
self.fieldIdToSubscribedChannels[fieldId].discard(channel)
self.channelToSubscribedFieldIds.pop(channel)
if channel in self.channelToSubscribedIndexIds:
for fieldAndIv in self.channelToSubscribedIndexIds[channel]:
self.indexIdToSubscribedChannels[fieldAndIv].discard(channel)
self.channelToSubscribedIndexIds.pop(channel)
if channel in self.channelToSubscribedOids:
for oid in self.channelToSubscribedOids[channel]:
self.oidToSubscribedChannels[oid].discard(channel)
if not self.oidToSubscribedChannels[oid]:
self.oidToSubscribedChannels.pop(oid)
self.channelToSubscribedOids.pop(channel)
if channel in self.channelSubscriptions:
self.channelSubscriptions.pop(channel)
if channel in self.channelToPendingTransactions:
self.channelToPendingTransactions.pop(channel)
if channel in self.channelToPendingSubscriptions:
for subsKey in self.channelToPendingSubscriptions[channel]:
self.subscriptionsPendingSubscriptionOnServer[
subsKey.schema, subsKey.typename
].pop((channel, subsKey))
self.channelToPendingSubscriptions.pop(channel)
if channel in self.channelToLazilySubscribedFieldIds:
self.channelToLazilySubscribedFieldIds.pop(channel)
if channel in self.channelToLazilySubscribedIndexIds:
self.channelToLazilySubscribedIndexIds.pop(channel)
def addSubscription(self, channel, subscriptionKey: SubscriptionKey):
self.channelSubscriptions.setdefault(channel).add(subscriptionKey)
if (
makeNamedTuple(schema=subscriptionKey.schema, typename=subscriptionKey.typename)
in self.completedTypes
):
self.sendDataForSubscription(channel, subscriptionKey)
else:
self.subscriptionsPendingSubscriptionOnServer.setdefault(
(subscriptionKey.schema, subscriptionKey.typename)
).add((channel, subscriptionKey))
self.channelToPendingSubscriptions.setdefault(channel).add(subscriptionKey)
def sendDataForSubscription(self, channel, key: SubscriptionKey):
# get the set of affected objects
oids = self.objectIndentitiesForSubscriptionKey(key)
if key.fieldname_and_value is not None:
fieldname, indexValue = key.fieldname_and_value
if fieldname != "_identity":
fieldId = self.schemaTypeAndNameToFieldId[key.schema][key.typename][fieldname]
self.indexIdToSubscribedChannels.setdefault((fieldId, indexValue)).add(channel)
self.channelToSubscribedIndexIds.setdefault(channel).add((fieldId, indexValue))
if key.isLazy:
self.channelToLazilySubscribedIndexIds.setdefault(channel).add(
IndexId(fieldId=fieldId, indexValue=indexValue)
)
# and also mark the specific values its subscribed to
self.channelToSubscribedOids[channel] = oids
for oid in oids:
self.oidToSubscribedChannels.setdefault(oid).add(channel)
else:
# subscribe this channel to all the values in this type
for fieldId in self.schemaTypeAndNameToFieldId[key.schema][key.typename].values():
self.fieldIdToSubscribedChannels.setdefault(fieldId).add(channel)
self.channelToSubscribedFieldIds.setdefault(channel).add(fieldId)
if key.isLazy:
self.channelToLazilySubscribedFieldIds.setdefault(channel).add(fieldId)
if key.isLazy:
channel.sendMessage(
ServerToClient.LazySubscriptionData(
schema=key.schema,
typename=key.typename,
fieldname_and_value=key.fieldname_and_value,
identities=oids,
index_values=self.indexValuesForOids(key.schema, key.typename, oids),
)
)
else:
channel.sendMessage(
ServerToClient.SubscriptionData(
schema=key.schema,
typename=key.typename,
fieldname_and_value=key.fieldname_and_value,
values=self.objectValuesForOids(key.schema, key.typename, oids),
index_values=self.indexValuesForOids(key.schema, key.typename, oids),
identities=None if key.fieldname_and_value is None else oids,
)
)
channel.sendMessage(
ServerToClient.SubscriptionComplete(
schema=key.schema,
typename=key.typename,
fieldname_and_value=key.fieldname_and_value,
tid=self.transactionId,
)
)
def objectIndentitiesForSubscriptionKey(self, key: SubscriptionKey) -> Set(ObjectId):
oids = Set(ObjectId)()
if key.fieldname_and_value is not None:
if key.fieldname_and_value[0] == "_identity":
# this is an 'identity' subscription, which subscribes to a single object
oids.add(deserialize(ObjectBase, key.fieldname_and_value[1])._identity)
return oids
if key.schema in self.schemaTypeAndNameToFieldId:
typenameToFieldMap = self.schemaTypeAndNameToFieldId[key.schema]
if key.typename in typenameToFieldMap:
if key.fieldname_and_value is None:
for fieldId in typenameToFieldMap[key.typename].values():
if fieldId in self.objectValues:
oids.update(self.objectValues[fieldId])
else:
fieldname, indexValue = key.fieldname_and_value
if fieldname in typenameToFieldMap[key.typename]:
fieldId = typenameToFieldMap[key.typename][fieldname]
if (
fieldId in self.reverseIndexValues
and indexValue in self.reverseIndexValues[fieldId]
):
oids.update(self.reverseIndexValues[fieldId][indexValue])
return oids
def objectValuesForOids(self, schema, typename, oids):
res = Dict(ObjectFieldId, OneOf(None, bytes))()
if schema in self.schemaTypeAndNameToFieldId:
typenameToFieldMap = self.schemaTypeAndNameToFieldId[schema]
if typename in typenameToFieldMap:
for fieldId in typenameToFieldMap[typename].values():
if fieldId in self.objectValues:
oidToVal = self.objectValues[fieldId]
for oid in oids:
if oid in oidToVal:
res[
ObjectFieldId(
objId=oid, fieldId=fieldId, isIndexValue=False
)
] = oidToVal[oid]
return ConstDict(ObjectFieldId, OneOf(None, bytes))(res)
def indexValuesForOids(self, schema, typename, oids):
res = Dict(ObjectFieldId, OneOf(None, IndexValue))()
if schema in self.schemaTypeAndNameToFieldId:
typenameToFieldMap = self.schemaTypeAndNameToFieldId[schema]
if typename in typenameToFieldMap:
for fieldId in typenameToFieldMap[typename].values():
if fieldId in self.indexValues:
oidToVal = self.indexValues[fieldId]
for oid in oids:
if oid in oidToVal:
res[
ObjectFieldId(
objId=oid, fieldId=fieldId, isIndexValue=True
)
] = oidToVal[oid]
return ConstDict(ObjectFieldId, OneOf(None, IndexValue))(res)
def objectValuesForSubscriptionKey(self, subscriptionKey):
res = Dict(ObjectFieldId, OneOf(None, bytes))()
if subscriptionKey.schema in self.schemaTypeAndNameToFieldId:
typenameToFieldMap = self.schemaTypeAndNameToFieldId[subscriptionKey.schema]
if subscriptionKey.typename in typenameToFieldMap:
for fieldId in typenameToFieldMap[subscriptionKey.typename].values():
for objectId, value in self.objectValues.setdefault(fieldId).items():
res[
ObjectFieldId(objId=objectId, fieldId=fieldId, isIndexValue=False)
] = value
return ConstDict(ObjectFieldId, OneOf(None, bytes))(res)
def indexValuesForSubscriptionKey(self, subscriptionKey):
res = Dict(ObjectFieldId, OneOf(None, bytes))()
if subscriptionKey.schema in self.schemaTypeAndNameToFieldId:
typenameToFieldMap = self.schemaTypeAndNameToFieldId[subscriptionKey.schema]
if subscriptionKey.typename in typenameToFieldMap:
for fieldId in typenameToFieldMap[subscriptionKey.typename].values():
for objectId, value in self.indexValues.setdefault(fieldId).items():
res[
ObjectFieldId(objId=objectId, fieldId=fieldId, isIndexValue=True)
] = value
return ConstDict(ObjectFieldId, OneOf(None, bytes))(res)
def mapSchema(self, schemaName, schemaDef: ConstDict(FieldDefinition, FieldId)):
self.schemaDefs[schemaName] = schemaDef
for fieldDef, fieldId in schemaDef.items():
self.schemaTypeAndNameToFieldId.setdefault(fieldDef.schema).setdefault(
fieldDef.typename
)[fieldDef.fieldname] = fieldId
self.fieldIdToDef[fieldId] = fieldDef
def handleSubscriptionData(
self, schema, typename, fieldnameAndValue, values, indexValues, identities
):
def update(dictlike, key, valueOrNone):
if valueOrNone is None:
if key in dictlike:
del dictlike[key]
else:
dictlike[key] = valueOrNone
# this will always be for an entire schema
for key, valueData in values.items():
assert not key.isIndexValue
update(self.objectValues.setdefault(key.fieldId), key.objId, valueData)
for key, indexData in indexValues.items():
assert key.isIndexValue
update(self.indexValues.setdefault(key.fieldId), key.objId, indexData)
indexValueToObjects = self.reverseIndexValues.setdefault(key.fieldId)
if indexData is not None:
indexValueToObjects.setdefault(indexData).add(key.objId)
def getChannelsForSchemaAndTypename(self, schema, typename):
channels = set()
if schema not in self.schemaTypeAndNameToFieldId:
return channels
if typename not in self.schemaTypeAndNameToFieldId[schema]:
return channels
for fieldId in self.schemaTypeAndNameToFieldId[schema][typename].values():
if fieldId in self.fieldIdToSubscribedChannels:
channels.update(self.fieldIdToSubscribedChannels[fieldId])
return channels
def handleSubscriptionComplete(self, schema, typename, fieldnameAndValue, tid):
if tid > self.transactionId:
self.transactionId = tid
channelsToMessageToSend = Dict(ServerToClientChannel, ListOf(ServerToClient))()
# this will always be for an entire schema
self.completedTypes.add(makeNamedTuple(schema=schema, typename=typename))
if (schema, typename) in self.subscriptionsPendingSubscriptionOnServer:
for channel, subscriptionKey in self.subscriptionsPendingSubscriptionOnServer.pop(
(schema, typename)
):
self.channelToPendingSubscriptions[channel].discard(subscriptionKey)
self.sendDataForSubscription(channel, subscriptionKey)
if not self.channelToPendingSubscriptions[channel]:
self.channelToPendingSubscriptions.pop(channel)
if channel in self.channelToPendingTransactions:
channelsToMessageToSend[
channel
] = self.channelToPendingTransactions.pop(channel)
return channelsToMessageToSend
def _increaseBroadcastTransactionToInclude(
self, indexId, writes, set_adds, set_removes, newOids
):
"""Update the transaction data in 'writes', 'set_adds', 'set_removes' to contain
all the definitions of the objects contained in newOids.
"""
# figure out what kind of objects these are. They all came from
# the same index id
indexFieldDef = self.fieldIdToDef[indexId.fieldId]
fieldnameToFieldId = self.schemaTypeAndNameToFieldId[indexFieldDef.schema][
indexFieldDef.typename
]
typeDefinition = self._definedSchemas[indexFieldDef.schema][indexFieldDef.typename]
for fieldname in typeDefinition.fields:
fieldId = fieldnameToFieldId[fieldname]
if fieldId in self.objectValues:
for oid in newOids:
if oid in self.objectValues[fieldId]:
writes[ObjectFieldId(objId=oid, fieldId=fieldId)] = self.objectValues[
fieldId
][oid]
for indexname in typeDefinition.indices:
fieldId = fieldnameToFieldId[indexname]
if fieldId in self.objectValues:
for oid in newOids:
if oid in self.objectValues[fieldId]:
fieldVal = self.objectValues[fieldId][oid]
set_adds.setdefault(IndexId(fieldId=fieldId, indexValue=fieldVal)).add(
oid
)
def handleTransaction(self, writes, set_adds, set_removes, transaction_id):
# we may have to modify the transaction values
writes = Dict(ObjectFieldId, OneOf(None, bytes))(writes)
priorValues = Dict(ObjectFieldId, OneOf(None, bytes))()
for ofi, value in writes.items():
priorValues[ofi] = self.objectValues.setdefault(ofi.fieldId).get(ofi.objId)
set_adds = Dict(IndexId, Set(ObjectId))(
{k: Set(ObjectId)(v) for k, v in set_adds.items()}
)
set_removes = Dict(IndexId, Set(ObjectId))(
{k: Set(ObjectId)(v) for k, v in set_removes.items()}
)
fieldIds = Set(FieldId)()
oidsMentioned = Set(ObjectId)()
# all channels that need to get the prior values of each thing
# being written before they receive the transaction (because of
# laziness)
channelsTriggeredForPriors = Set(ServerToClientChannel)()
for objectFieldId, val in writes.items():
oidsMentioned.add(objectFieldId.objId)
fieldIds.add(objectFieldId.fieldId)
oidMap = self.objectValues.setdefault(objectFieldId.fieldId)
if val is None:
oidMap.pop(objectFieldId.objId, b"")
else:
oidMap[objectFieldId.objId] = val
for indexId, oids in set_removes.items():
vals = self.indexValues.setdefault(indexId.fieldId)
for oid in oids:
oidsMentioned.add(oid)
if oid in vals:
vals.pop(oid)
objectsWithThisIndexVal = self.reverseIndexValues.setdefault(
indexId.fieldId
).setdefault(indexId.indexValue)
for oid in oids:
objectsWithThisIndexVal.discard(oid)
if not objectsWithThisIndexVal:
self.reverseIndexValues[indexId.fieldId].pop(indexId.indexValue)
idsToAddToTransaction = Dict(IndexId, Set(ObjectId))()
for indexId, oids in set_adds.items():
vals = self.indexValues.setdefault(indexId.fieldId)
# each channel subscribed to this indexid may need a 'SubscriptionIncrease'
# message.
if indexId in self.indexIdToSubscribedChannels:
for channel in self.indexIdToSubscribedChannels[indexId]:
# if this channel is lazily subscribed to this index then we need to send
# priors for every value we're updating. We're not being careful about
# tracking this on a per-object basis, so in theory we could do better
if indexId in self.channelToLazilySubscribedIndexIds.setdefault(channel):
channelsTriggeredForPriors.add(channel)
if channel not in self.channelToSubscribedOids:
newOids = oids
else:
existingSet = self.channelToSubscribedOids[channel]
newOids = [o for o in oids if o not in existingSet]
if newOids:
self.channelToSubscribedOids.setdefault(channel).update(newOids)
for n in newOids:
self.oidToSubscribedChannels.setdefault(n).add(channel)
fieldDef = self.fieldIdToDef[indexId.fieldId]
channel.sendMessage(
ServerToClient.SubscriptionIncrease(
schema=fieldDef.schema,
typename=fieldDef.typename,
fieldname_and_value=(fieldDef.fieldname, indexId.indexValue),
identities=newOids,
transaction_id=transaction_id,
)
)
idsToAddToTransaction.setdefault(indexId).update(newOids)
objectsWithThisIndexVal = self.reverseIndexValues.setdefault(
indexId.fieldId
).setdefault(indexId.indexValue)
for oid in oids:
oidsMentioned.add(oid)
vals[oid] = indexId.indexValue
objectsWithThisIndexVal.add(oid)
for indexId, oids in idsToAddToTransaction.items():
self._increaseBroadcastTransactionToInclude(
indexId, writes, set_adds, set_removes, oids
)
for indexId in set_adds:
fieldIds.add(indexId.fieldId)
for indexId in set_removes:
fieldIds.add(indexId.fieldId)
# determine which channels are affected
channels = set()
for f in fieldIds:
if f in self.fieldIdToSubscribedChannels:
channels.update(self.fieldIdToSubscribedChannels[f])
for c in self.fieldIdToSubscribedChannels[f]:
if f in self.channelToLazilySubscribedFieldIds.setdefault(c):
channelsTriggeredForPriors.add(c)
for oid in oidsMentioned:
if oid in self.oidToSubscribedChannels:
channels.update(self.oidToSubscribedChannels[oid])
if transaction_id > self.transactionId:
self.transactionId = transaction_id
for channel in channelsTriggeredForPriors:
channel.sendMessage(ServerToClient.LazyTransactionPriors(writes=priorValues))
if channels:
msg = ServerToClient.Transaction(
writes=writes,
set_adds=ConstDict(IndexId, TupleOf(ObjectId))(
{k: TupleOf(ObjectId)(v) for k, v in set_adds.items()}
),
set_removes=ConstDict(IndexId, TupleOf(ObjectId))(
{k: TupleOf(ObjectId)(v) for k, v in set_removes.items()}
),
transaction_id=transaction_id,
)
for c in channels:
c.sendMessage(msg)
def increaseSubscriptionIfNecessary(self, channel, set_adds, transaction_id):
"""Mark any new objects we need to track based on contents of 'set_adds'.
When a client creates new objects, it needs to track them regardless of
whether it's explicitly subscribed to the object.
So we check whether any new objects are being created (set_adds with field ' exists')
and if we're not subscribed the type we increase the subscription.
"""
for indexId, oids in set_adds.items():
fieldDef = self.fieldIdToDef[indexId.fieldId]
if fieldDef.fieldname == " exists" and (
channel not in self.fieldIdToSubscribedChannels.setdefault(indexId.fieldId)
):
newIds = [
x
for x in oids
if x not in self.channelToSubscribedOids.setdefault(channel)
]
if newIds:
self.channelToSubscribedOids[channel].update(newIds)
for oid in newIds:
self.oidToSubscribedChannels.setdefault(oid).add(channel)
channel.sendMessage(
ServerToClient.SubscriptionIncrease(
schema=fieldDef.schema,
typename=fieldDef.typename,
fieldname_and_value=(fieldDef.fieldname, indexId.indexValue),
identities=newIds,
transaction_id=transaction_id,
)
)
def lazyLoadObject(self, channel, schema, typename, identity):
channel.write(
ServerToClient.LazyLoadResponse(
identity=identity,
values=self.objectValuesForOids(schema, typename, [identity]),
)
)
class ProxyServer:
def __init__(self, upstreamChannel: ClientToServerChannel, authToken):
self._channelToMainServer = upstreamChannel
self._authToken = authToken
self._logger = logging.getLogger(__name__)
self._downstreamChannels = set()
self._authenticatedDownstreamChannels = set()
self._connectionIdentity = None
self._identityRoot = None
self._transactionNum = None
self._lock = threading.RLock()
self._deferredMessagesAndEndpoints = []
self._channelToMainServer.setServerToClientHandler(self.handleServerToClientMessage)
self._guidToChannelRequestingIdentity = {}
self._channelToMissedHeartbeatCount = Dict(ServerToClientChannel, int)()
self._channelToConnectionId = Dict(ServerToClientChannel, ObjectId)()
# dictionary from (channel, schemaName) -> SchemaDefinition
self._channelSchemas = Dict(Tuple(ServerToClientChannel, str), SchemaDefinition)()
# map from schema name to iteration number to ConstDict(FieldDefinition, int)
self._mappedSchemas = Dict(str, Dict(int, ConstDict(FieldDefinition, FieldId)))()
self._requestedSchemaIteration = Dict(str, int)()
self._receivedSchemaIteration = Dict(str, int)()
# for each requested (schema, iteration), the set of channels waiting for it
self._unmappedSchemasToChannels = {}
self._fieldIdToDefMapping = FieldIdToDefMapping()
self._subscriptionState = SubscriptionState()
# right now, we only subscribe to entire types
self._subscribedTypes = Set(NamedTuple(schema=str, typename=str))()
# state machine for tracking the flush guids we're getting
# from each channel
self._flushGuidIx = 0
self._outgoingFlushGuidToChannelAndFlushGuid = Dict(
int, Tuple(ServerToClientChannel, int)
)()
# state machine for managing the transactions we have pending
# on each channel
self._transactionGuidIx = 0
self._channelAndTransactionGuidToOutgoingTransactionGuid = Dict(
Tuple(ServerToClientChannel, int), int
)()
self._outgoingTransactionGuidToChannelAndTransactionGuid = Dict(
int, Tuple(ServerToClientChannel, int)
)()
@property
def authToken(self):
return self._authToken
def authenticate(self):
self._channelToMainServer.sendMessage(
ClientToServer.Authenticate(token=self.authToken)
)
def addConnection(self, channel: ServerToClientChannel):
"""An incoming connection is being made."""
with self._lock:
self._downstreamChannels.add(channel)
self._channelToMissedHeartbeatCount[channel] = 0
channel.setClientToServerHandler(
lambda msg: self.handleClientToServerMessage(channel, msg)
)
def dropConnection(self, channel: ServerToClientChannel):
"""An incoming connection has dropped."""
with self._lock:
if channel not in self._downstreamChannels:
return
self._subscriptionState.dropConnection(channel)
self._downstreamChannels.discard(channel)
del self._channelToMissedHeartbeatCount[channel]
self._authenticatedDownstreamChannels.discard(channel)
if channel in self._channelToConnectionId:
connId = self._channelToConnectionId.pop(channel)
self._channelToMainServer.sendMessage(
ClientToServer.DropDependentConnectionId(connIdentity=connId)
)
channel.close()
def handleClientToServerMessage(self, channel, msg: ClientToServer):
with self._lock:
self._handleClientToServerMessage(channel, msg)
def checkForDeadConnections(self):
with self._lock:
for c in list(self._channelToMissedHeartbeatCount):
self._channelToMissedHeartbeatCount[c] += 1
if self._channelToMissedHeartbeatCount[c] >= 4:
logging.info(
"Connection %s has not heartbeat in a long time. Killing it.",
self._channelToConnectionId.get(c),
)
c.close()
self.dropConnection(c)
def _handleClientToServerMessage(self, channel, msg: ClientToServer):
if channel not in self._downstreamChannels:
# this channel disconnected
return
if self._connectionIdentity is None:
# we are not authenticated yet.
self._deferredMessagesAndEndpoints.append((channel, msg))
return
if msg.matches.Authenticate:
if channel in self._authenticatedDownstreamChannels:
# the channel is already authenticated
self._logger.warn("Channel attempted to re-authenticate")
self.dropConnection(channel)
return
if msg.token != self._authToken:
self._logger.warn("Channel attempted to authenticate with invalid token.")
self.dropConnection(channel)
return
self._authenticatedDownstreamChannels.add(channel)
# we can request a new connection ID for this worker
guid = str(uuid.uuid4())
self._guidToChannelRequestingIdentity[guid] = channel
self._channelToMainServer.sendMessage(
ClientToServer.RequestDependentConnectionId(
parentId=self._connectionIdentity, guid=guid
)
)
return
# ensure that we're connected
if channel not in self._authenticatedDownstreamChannels:
# don't worry about heartbeats
if msg.matches.Heartbeat:
return
self._logger.warn(
"Channel attempted to communicate without authenticating: %s", type(msg)
)
self.dropConnection(channel)
return
self._handleAuthenticatedMessage(channel, msg)
def _handleAuthenticatedMessage(self, channel, msg: ClientToServer):
if msg.matches.DefineSchema:
self._channelSchemas[channel, msg.name] = msg.definition
if msg.name not in self._subscriptionState._definedSchemas:
self._requestedSchemaIteration[msg.name] = 0
self._channelToMainServer.sendMessage(
ClientToServer.DefineSchema(name=msg.name, definition=msg.definition)
)
self._subscriptionState._definedSchemas[msg.name] = msg.definition
else:
if msg.definition != self._subscriptionState._definedSchemas[msg.name]:
biggerSchema = mergeSchemaDefinitions(
self._subscriptionState._definedSchemas[msg.name], msg.definition
)
# if the schema contains new fields we need to send this message and
# enlarge the schema definition
if biggerSchema != self._subscriptionState._definedSchemas[msg.name]:
self._requestedSchemaIteration[msg.name] += 1
self._channelToMainServer.sendMessage(
ClientToServer.DefineSchema(name=msg.name, definition=biggerSchema)
)
self._subscriptionState._definedSchemas[msg.name] = biggerSchema
schemaIteration = self._requestedSchemaIteration[msg.name]
if (
msg.name in self._mappedSchemas
and schemaIteration in self._mappedSchemas[msg.name]
):
channel.sendMessage(
ServerToClient.SchemaMapping(
schema=msg.name, mapping=self._mappedSchemas[msg.name][schemaIteration]
)
)
else:
self._unmappedSchemasToChannels.setdefault(
(msg.name, schemaIteration), set()
).add(channel)
return
if msg.matches.SubscribeNone:
schemaAndTypename = makeNamedTuple(schema=msg.schema, typename=msg.typename)
if schemaAndTypename not in self._subscribedTypes:
self._channelToMainServer.sendMessage(
ClientToServer.Subscribe(
schema=msg.schema,
typename=msg.typename,
fieldname_and_value=None,
isLazy=False,
)
)
self._subscribedTypes.add(schemaAndTypename)
return
if msg.matches.Subscribe:
schemaAndTypename = makeNamedTuple(schema=msg.schema, typename=msg.typename)
if (channel, msg.schema) not in self._channelSchemas:
raise Exception(
f"Can't subscribe to schema {msg.schema} that we don't have "
f"a definition for."
)
subscription = SubscriptionKey(
schema=msg.schema,
typename=msg.typename,
fieldname_and_value=msg.fieldname_and_value,
isLazy=msg.isLazy,
)
if schemaAndTypename not in self._subscribedTypes:
self._channelToMainServer.sendMessage(
ClientToServer.Subscribe(
schema=subscription.schema,
typename=subscription.typename,
fieldname_and_value=None,
isLazy=False,
)
)
self._subscribedTypes.add(schemaAndTypename)
self._subscriptionState.addSubscription(channel, subscription)
return
if msg.matches.Flush:
self._flushGuidIx += 1
guid = self._flushGuidIx
self._outgoingFlushGuidToChannelAndFlushGuid[guid] = (channel, msg.guid)
self._channelToMainServer.sendMessage(ClientToServer.Flush(guid=guid))
return
if msg.matches.LoadLazyObject:
if (
makeNamedTuple(schema=msg.schema, typename=msg.typename)
not in self._subscriptionState.completedTypes
):
logging.error("Client tried to lazy load for a type we're not subscribed to")
self.dropConnection(channel)
return
self._subscriptionState.lazyLoadObject(
channel, msg.schema, msg.typename, msg.identity
)
return
if msg.matches.TransactionData:
if channel in self._subscriptionState.channelToPendingSubscriptions:
assert self._subscriptionState.channelToPendingSubscriptions[channel]
self._subscriptionState.channelToPendingTransactions.setdefault(
channel
).append(msg)
return
if (
channel,
msg.transaction_guid,
) in self._channelAndTransactionGuidToOutgoingTransactionGuid:
guid = self._channelAndTransactionGuidToOutgoingTransactionGuid[
channel, msg.transaction_guid
]
else:
self._transactionGuidIx += 1
guid = self._transactionGuidIx
self._outgoingTransactionGuidToChannelAndTransactionGuid[guid] = (
channel,
msg.transaction_guid,
)
self._channelAndTransactionGuidToOutgoingTransactionGuid[
channel, msg.transaction_guid
] = guid
self._subscriptionState.increaseSubscriptionIfNecessary(
channel, msg.set_adds, self._transactionNum
)
self._channelToMainServer.sendMessage(
ClientToServer.TransactionData(
writes=msg.writes,
set_adds=msg.set_adds,
set_removes=msg.set_removes,
key_versions=msg.key_versions,
index_versions=msg.index_versions,
transaction_guid=guid,
)
)
return
if msg.matches.CompleteTransaction:
if channel in self._subscriptionState.channelToPendingSubscriptions:
assert self._subscriptionState.channelToPendingSubscriptions[channel]
self._subscriptionState.channelToPendingTransactions.setdefault(
channel
).append(msg)
return
if (
channel,
msg.transaction_guid,
) not in self._channelAndTransactionGuidToOutgoingTransactionGuid:
logging.error(
"Received unexpected CompleteTransaction message: %s", msg.transaction_guid
)
return
guid = self._channelAndTransactionGuidToOutgoingTransactionGuid.pop(
(channel, msg.transaction_guid)
)
self._channelToMainServer.sendMessage(
ClientToServer.CompleteTransaction(
as_of_version=msg.as_of_version, transaction_guid=guid
)
)
return
if msg.matches.Heartbeat:
if channel in self._downstreamChannels:
self._channelToMissedHeartbeatCount[channel] = 0
return
raise Exception("Don't know how to handle ", msg)
def handleServerToClientMessage(self, msg: ServerToClient):
with self._lock:
if msg.matches.Initialize:
self._connectionIdentity = msg.connIdentity
self._identityRoot = msg.identity_root
self._transactionNum = msg.transaction_num
# process any messages we received while we were not yet
# authenticated.
for channel, msg in self._deferredMessagesAndEndpoints:
self._handleClientToServerMessage(channel, msg)
self._deferredMessagesAndEndpoints.clear()
return
if msg.matches.DependentConnectionId:
guid = msg.guid
channel = self._guidToChannelRequestingIdentity.pop(guid, None)
if channel is None or channel not in self._downstreamChannels:
# the channel was disconnected before we processed the message.
# just send the drop back.
self._channelToMainServer.sendMessage(
ClientToServer.DropDependentConnectionId(connIdentity=msg.connIdentity)
)
return None
self._channelToConnectionId[channel] = msg.connIdentity
channel.sendMessage(
ServerToClient.Initialize(
transaction_num=self._transactionNum,
connIdentity=msg.connIdentity,
identity_root=msg.identity_root,
)
)
return
if msg.matches.SchemaMapping:
assert msg.schema in self._requestedSchemaIteration
schemaIteration = self._receivedSchemaIteration.get(msg.schema, -1) + 1
self._receivedSchemaIteration[msg.schema] = schemaIteration
self._subscriptionState.mapSchema(msg.schema, msg.mapping)
self._mappedSchemas.setdefault(msg.schema)[schemaIteration] = msg.mapping
# forward the mapping to any of our channels who need it
for channel in self._unmappedSchemasToChannels.pop(
(msg.schema, schemaIteration), set()
):
channel.sendMessage(
ServerToClient.SchemaMapping(schema=msg.schema, mapping=msg.mapping)
)
return
if msg.matches.SubscriptionData:
self._subscriptionState.handleSubscriptionData(
msg.schema,
msg.typename,
msg.fieldname_and_value,
msg.values,
msg.index_values,
msg.identities,
)
return
if msg.matches.SubscriptionComplete:
channelsToMessageToSend = self._subscriptionState.handleSubscriptionComplete(
msg.schema, msg.typename, msg.fieldname_and_value, msg.tid
)
for channel, messages in channelsToMessageToSend.items():
for msg in messages:
self.handleClientToServerMessage(channel, msg)
return
if msg.matches.Transaction:
self._subscriptionState.handleTransaction(
msg.writes, msg.set_adds, msg.set_removes, msg.transaction_id
)
return
if msg.matches.FlushResponse:
if msg.guid not in self._outgoingFlushGuidToChannelAndFlushGuid:
logging.error("Received unexpected flush guid: %s", msg.guid)
return
channel, guid = self._outgoingFlushGuidToChannelAndFlushGuid.pop(msg.guid)
channel.sendMessage(ServerToClient.FlushResponse(guid=guid))
return
if msg.matches.TransactionResult:
if (
msg.transaction_guid
not in self._outgoingTransactionGuidToChannelAndTransactionGuid
):
logging.error(
"Received unexpected TransactionResult message: %s",
msg.transaction_guid,
)
return
channel, guid = self._outgoingTransactionGuidToChannelAndTransactionGuid.pop(
msg.transaction_guid
)
channel.sendMessage(
ServerToClient.TransactionResult(
transaction_guid=guid, success=msg.success, badKey=msg.badKey
)
)
return
raise Exception("Don't know how to handle ", msg)
| 1.921875 | 2 |
eka/core/data.py | viswanc/eka | 1 | 12761203 | <reponame>viswanc/eka
r"""
Eka - Data.
"""
import pkg_resources
# Exports
pluginsEntryPoint = 'eka.plugins.classes'
Plugins = {EntryPoint.name: EntryPoint for EntryPoint in pkg_resources.iter_entry_points(pluginsEntryPoint)} # #Note: A dictionary is maintained, as there wasn't a way to load a plugin without knowing its distro.
| 1.554688 | 2 |
src/utils/tts.py | barthofu/jarvis | 0 | 12761204 | import tempfile
import os
# import pygame
import config
from playsound import playsound
from gtts import gTTS
# from pygame import mixer
class TTS:
# def __init__(self):
# mixer.init()
def playMP3(self, fileName, filePath = config.SOUNDS_DIR, blocking = False):
playsound(os.path.join(filePath, fileName))
# if ".mp3" in fileName:
# mixer.music.load(os.path.join(filePath, fileName))
# mixer.music.play
# else:
# sound = pygame.mixer.Sound(os.path.join(filePath, fileName))
# chan = pygame.mixer.find_channel()
# chan.queue(sound)
# if blocking:
# while mixer.music.get_busy():
# pygame.time.delay(100)
def speak(self, text, showText = True):
if showText:
print(text)
try:
tts = gTTS(text = text)
with tempfile.NamedTemporaryFile(mode='wb', suffix='.mp3',
delete=False) as f:
(tempPath, tempName) = os.path.split(f.name)
tts.write_to_fp(f)
self.playMP3(tempName, tempPath)
os.remove(os.path.join(tempPath, tempName))
except Exception as e:
print('Unknown Google TTS issue: ' + str(e)) | 2.875 | 3 |
compliance_checker/tests/test_ioos_profile.py | neumannd/compliance-checker | 0 | 12761205 | import os
import numpy as np
from netCDF4 import Dataset
from compliance_checker.ioos import (
IOOS0_1Check,
IOOS1_1Check,
IOOS1_2_PlatformIDValidator,
IOOS1_2Check,
NamingAuthorityValidator,
)
from compliance_checker.tests import BaseTestCase
from compliance_checker.tests.helpers import MockTimeSeries, MockVariable
from compliance_checker.tests.resources import STATIC_FILES
from compliance_checker.tests.test_cf import get_results
class TestIOOS0_1(BaseTestCase):
"""
Tests for the IOOS Inventory Metadata v0.1
"""
def setUp(self):
# Use the NCEI Gold Standard Point dataset for IOOS checks
self.ds = self.load_dataset(STATIC_FILES["ncei_gold_point_1"])
self.ioos = IOOS0_1Check()
def test_cc_meta(self):
assert self.ioos._cc_spec == "ioos"
assert self.ioos._cc_spec_version == "0.1"
def test_global_attributes(self):
"""
Tests that all global attributes checks are working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
results = self.ioos.check_global_attributes(nc_obj)
for result in results:
self.assert_result_is_bad(result)
attrs = [
"acknowledgement",
"publisher_email",
"institution",
"publisher_name",
"Conventions",
]
for attr in attrs:
setattr(nc_obj, attr, "test")
results = self.ioos.check_global_attributes(nc_obj)
for result in results:
self.assert_result_is_good(result)
def test_variable_attributes(self):
"""
Tests that the platform variable attributes check is working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("platform", "S1", ())
platform = nc_obj.variables["platform"]
results = self.ioos.check_variable_attributes(nc_obj)
for result in results:
self.assert_result_is_bad(result)
platform.long_name = "platform"
platform.short_name = "platform"
platform.source = "glider"
platform.ioos_name = "urn:ioos:station:glos:leorgn"
platform.wmo_id = "1234"
platform.comment = "test"
results = self.ioos.check_variable_attributes(nc_obj)
for result in results:
self.assert_result_is_good(result)
def test_variable_units(self):
"""
Tests that the variable units test is working
"""
# this check tests that units attribute is present on EVERY variable
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("sample_var", "d", ("time",))
sample_var = nc_obj.variables["sample_var"]
results = self.ioos.check_variable_units(nc_obj)
self.assert_result_is_bad(results)
sample_var.units = "m"
sample_var.short_name = "sample_var"
results = self.ioos.check_variable_units(nc_obj)
self.assert_result_is_good(results)
def test_altitude_units(self):
"""
Tests that the altitude variable units test is working
"""
results = self.ioos.check_altitude_units(self.ds)
self.assert_result_is_good(results)
# Now test an nc file with a 'z' variable without units
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("z", "d", ("time",))
z = nc_obj.variables["z"]
z.short_name = "sample_var"
results = self.ioos.check_variable_units(nc_obj)
self.assert_result_is_bad(results)
class TestIOOS1_1(BaseTestCase):
"""
Tests for the compliance checker implementation of IOOS Metadata Profile
for NetCDF, Version 1.1
"""
def setUp(self):
# Use the IOOS 1_1 dataset for testing
self.ds = self.load_dataset(STATIC_FILES["ioos_gold_1_1"])
self.ioos = IOOS1_1Check()
def test_cc_meta(self):
assert self.ioos._cc_spec == "ioos"
assert self.ioos._cc_spec_version == "1.1"
def test_required_attributes(self):
"""
Tests that required attributes test is working properly
"""
results = self.ioos.check_high(self.ds)
for result in results:
self.assert_result_is_good(result)
def test_recomended_attributes(self):
"""
Tests that recommended attributes test is working properly
"""
results = self.ioos.check_recommended(self.ds)
for result in results:
self.assert_result_is_good(result)
def test_bad_platform_variables(self):
"""
Tests that the platform variable attributes check is working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.platform = "platform"
# global attribute 'platform' points to variable that does not exist in dataset
results = self.ioos.check_platform_variables(nc_obj)
for result in results:
self.assert_result_is_bad(result)
def test_good_platform_variables(self):
"""
Tests that the platform variable attributes check is working
"""
results = self.ioos.check_platform_variables(self.ds)
for result in results:
self.assert_result_is_good(result)
def test_bad_geophysical_vars_fill_value(self):
"""
Tests that the geophysical variable _FillValue check is working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("sample_var", "d", ("time",))
# Define some variable attributes but don't specify _FillValue
sample_var = nc_obj.variables["sample_var"]
sample_var.units = "m"
sample_var.short_name = "temp"
# global attribute 'platform' points to variable that does not exist in dataset
results = self.ioos.check_geophysical_vars_fill_value(nc_obj)
for result in results:
self.assert_result_is_bad(result)
def test_good_geophysical_vars_fill_value(self):
"""
Tests that the geophysical variable _FillValue check is working
"""
results = self.ioos.check_geophysical_vars_fill_value(self.ds)
for result in results:
self.assert_result_is_good(result)
def test_bad_geophysical_vars_standard_name(self):
"""
Tests that the platform variable attributes check is working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("sample_var", "d", ("time",))
# Define some variable attributes but don't specify _FillValue
sample_var = nc_obj.variables["sample_var"]
sample_var.units = "m"
sample_var.short_name = "temp"
# global attribute 'platform' points to variable that does not exist in dataset
results = self.ioos.check_geophysical_vars_standard_name(nc_obj)
for result in results:
self.assert_result_is_bad(result)
def test_good_geophysical_vars_standard_name(self):
"""
Tests that the geophysical variable _FillValue check is working
"""
results = self.ioos.check_geophysical_vars_standard_name(self.ds)
for result in results:
self.assert_result_is_good(result)
def test_bad_units(self):
"""
Tests that the valid units check is working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("temperature", "d", ("time",))
# Define some variable attributes but don't specify _FillValue
sample_var = nc_obj.variables["temperature"]
sample_var.units = "degC" # Not valid units
sample_var.short_name = "temp"
# global attribute 'platform' points to variable that does not exist in dataset
results = self.ioos.check_geophysical_vars_standard_name(nc_obj)
for result in results:
self.assert_result_is_bad(result)
def test_good_units(self):
"""
Tests that the valid units check is working
"""
results = self.ioos.check_units(self.ds)
for result in results:
self.assert_result_is_good(result)
class TestIOOS1_2(BaseTestCase):
"""
Tests for the compliance checker implementation of IOOS Metadata Profile
for NetCDF, Version 1.1
"""
def setUp(self):
self.ioos = IOOS1_2Check()
def test_check_geophysical_vars_have_attrs(self):
# create geophysical variable
ds = MockTimeSeries() # time, lat, lon, depth
temp = ds.createVariable("temp", np.float64, dimensions=("time",))
# should fail here
results = self.ioos.check_geophysical_vars_have_attrs(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
# set the necessary attributes
ds = MockTimeSeries(default_fill_value=9999999999.0) # time, lat, lon, depth
temp = ds.createVariable(
"temp", np.float64, fill_value=9999999999.0
) # _FillValue
temp.setncattr("missing_value", 9999999999.0)
temp.setncattr("standard_name", "sea_surface_temperature")
temp.setncattr(
"standard_name_url",
"http://cfconventions.org/Data/cf-standard-names/64/build/cf-standard-name-table.html",
)
temp.setncattr("units", "degree_C")
temp.setncattr("platform", "myPlatform")
results = self.ioos.check_geophysical_vars_have_attrs(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
def test_check_geospatial_vars_have_attrs(self):
# create geophysical variable
ds = MockTimeSeries() # time, lat, lon, depth
temp = ds.createVariable("temp", np.float64, dimensions=("time",))
# should fail here
results = self.ioos.check_geospatial_vars_have_attrs(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
# should pass - default_fill_value sets _FillValue attr
ds = MockTimeSeries(default_fill_value=9999999999.0) # time, lat, lon, depth
ds.variables["time"].setncattr("standard_name", "time")
ds.variables["time"].setncattr(
"standard_name_url",
"http://cfconventions.org/Data/cf-standard-names/64/build/cf-standard-name-table.html",
)
ds.variables["time"].setncattr("units", "hours since 1970-01-01T00:00:00")
ds.variables["time"].setncattr("missing_value", 9999999999.0)
results = self.ioos.check_geospatial_vars_have_attrs(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
def test_check_contributor_role_and_vocabulary(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no contributor_role or vocab, fail both
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertFalse(all(r.value for r in results))
# bad contributor_role and vocab
ds.setncattr("contributor_role", "bad")
ds.setncattr("contributor_role_vocabulary", "bad")
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertFalse(all(r.value for r in results))
# good role, bad vocab
ds.setncattr("contributor_role", "contributor")
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertTrue(results[0].value)
self.assertEqual(results[0].msgs, [])
self.assertFalse(results[1].value)
# bad role, good vocab
ds.setncattr("contributor_role", "bad")
ds.setncattr(
"contributor_role_vocabulary",
"http://vocab.nerc.ac.uk/collection/G04/current/",
)
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertFalse(results[0].value)
self.assertTrue(results[1].value)
self.assertEqual(results[1].msgs, [])
# good role, good vocab
ds.setncattr("contributor_role", "contributor")
ds.setncattr(
"contributor_role_vocabulary",
"http://vocab.nerc.ac.uk/collection/G04/current/",
)
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertTrue(results[0].value)
self.assertEqual(results[0].msgs, [])
self.assertTrue(results[1].value)
self.assertEqual(results[1].msgs, [])
ds.setncattr("contributor_role", "resourceProvider")
ds.setncattr(
"contributor_role_vocabulary",
"https://www.ngdc.noaa.gov/wiki/index.php?title=ISO_19115_and_19115-2_CodeList_Dictionaries#CI_RoleCode",
)
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertTrue(results[0].value)
self.assertEqual(results[0].msgs, [])
self.assertTrue(results[1].value)
self.assertEqual(results[1].msgs, [])
def test_check_creator_and_publisher_type(self):
"""
Checks the creator_type and publisher_type global attributes with
the following values:
Empty: Valid, defaults to "person" when not specified, which is
contained in the list of valid values.
Bad values: Invalid, not contained in list of valid values.
Good values: Valid, contained in list.
"""
ds = MockTimeSeries()
# values which are not set/specified default to person, which is valid
result_list = self.ioos.check_creator_and_publisher_type(ds)
self.assertTrue(all(res.value for res in result_list))
# create invalid values for attribute
ds.setncattr("creator_type", "PI")
ds.setncattr("publisher_type", "Funder")
result_list = self.ioos.check_creator_and_publisher_type(ds)
err_regex = (
r"^If specified, \w+_type must be in value list "
r"\(\['group', 'institution', 'person', 'position'\]\)$"
)
for res in result_list:
self.assertFalse(res.value)
self.assertRegex(res.msgs[0], err_regex)
# good values
ds.setncattr("creator_type", "person")
ds.setncattr("publisher_type", "institution")
result_list = self.ioos.check_creator_and_publisher_type(ds)
self.assertTrue(all(res.value for res in result_list))
def test_check_gts_ingest_global(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no gts_ingest_requirements, should pass
result = self.ioos.check_gts_ingest_global(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
# passing value
ds.setncattr("gts_ingest", "true")
result = self.ioos.check_gts_ingest_global(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
ds.setncattr("gts_ingest", "false")
result = self.ioos.check_gts_ingest_global(ds)
self.assertTrue(result.value)
ds.setncattr("gts_ingest", "notgood")
result = self.ioos.check_gts_ingest_global(ds)
self.assertFalse(result.value)
def test_check_gts_ingest_requirements(self):
ds = MockTimeSeries() # time, lat, lon, depth
# NOTE: this check will always have a "failing" result; see
# https://github.com/ioos/compliance-checker/issues/759#issuecomment-625356938
# and subsequent discussion
# no gts_ingest_requirements, should pass
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
# flag for ingest, no variables flagged - default pass
ds.setncattr("gts_ingest", "true")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
# give one variable the gts_ingest attribute
# no standard_name or ancillary vars, should fail
ds.variables["time"].setncattr("gts_ingest", "true")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
# no ancillary vars, should fail
ds.variables["time"].setncattr("gts_ingest", "true")
ds.variables["time"].setncattr("standard_name", "time")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
self.assertIn(
"The following variables did not qualify for NDBC/GTS Ingest: time\n",
result.msgs,
)
# set ancillary var with bad standard name
tmp = ds.createVariable("tmp", np.byte, ("time",))
tmp.setncattr("standard_name", "bad")
ds.variables["time"].setncattr("ancillary_variables", "tmp")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
self.assertIn(
"The following variables did not qualify for NDBC/GTS Ingest: time\n",
result.msgs,
)
# good ancillary var standard name, time units are bad
tmp.setncattr("standard_name", "aggregate_quality_flag")
ds.variables["time"].setncattr("units", "bad since bad")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
self.assertIn(
"The following variables did not qualify for NDBC/GTS Ingest: time\n",
result.msgs,
)
# good ancillary var stdname, good units, pass
tmp.setncattr("standard_name", "aggregate_quality_flag")
ds.variables["time"].setncattr("units", "seconds since 1970-01-01T00:00:00Z")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
self.assertIn(
"The following variables qualified for NDBC/GTS Ingest: time\n", result.msgs
)
def test_check_instrument_variables(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no instrument variable, should pass
results = self.ioos.check_instrument_variables(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
temp = ds.createVariable("temp", np.float64, dimensions=("time",))
temp.setncattr("cf_role", "timeseries")
temp.setncattr("standard_name", "sea_surface_temperature")
temp.setncattr("units", "degree_C")
temp.setncattr("axis", "Y")
temp.setncattr("instrument", "myInstrument")
temp[:] = 45.0
instr = ds.createVariable("myInstrument", np.float64, dimensions=("time",))
# give instrument variable with component
instr.setncattr("component", "someComponent")
results = self.ioos.check_instrument_variables(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
# give discriminant
instr.setncattr("discriminant", "someDiscriminant")
results = self.ioos.check_instrument_variables(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
# bad component
instr.setncattr("component", 45)
results = self.ioos.check_instrument_variables(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
def test_check_wmo_platform_code(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no wmo_platform_code, pass
result = self.ioos.check_wmo_platform_code(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
# valid code
ds.setncattr("wmo_platform_code", "12345")
result = self.ioos.check_wmo_platform_code(ds)
self.assertTrue(result.value)
# valid code
ds.setncattr("wmo_platform_code", "7654321")
result = self.ioos.check_wmo_platform_code(ds)
self.assertTrue(result.value)
# alphanumeric, valid
ds.setncattr("wmo_platform_code", "abcd1")
result = self.ioos.check_wmo_platform_code(ds)
self.assertTrue(result.value)
# invalid length, fail
ds.setncattr("wmo_platform_code", "123")
result = self.ioos.check_wmo_platform_code(ds)
self.assertFalse(result.value)
# alphanumeric len 7, fail
ds.setncattr("wmo_platform_code", "1a2b3c7")
result = self.ioos.check_wmo_platform_code(ds)
self.assertFalse(result.value)
def test_check_standard_name(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no standard names
results = self.ioos.check_standard_name(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
# give standard names to all variables
ds.variables["time"].setncattr("standard_name", "time")
ds.variables["lon"].setncattr("standard_name", "longitude")
ds.variables["lat"].setncattr("standard_name", "latitude")
ds.variables["depth"].setncattr("standard_name", "depth")
results = self.ioos.check_standard_name(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
# add a QARTOD variable, no standard name - should fail
qr = ds.createVariable("depth_qc", np.byte)
qr.setncattr("flag_meanings", "blah")
results = self.ioos.check_standard_name(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
# bad standard name
qr.setncattr("standard_name", "blah")
results = self.ioos.check_standard_name(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
# good standard name
qr.setncattr("standard_name", "spike_test_quality_flag")
results = self.ioos.check_standard_name(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
def test_naming_authority_validation(self):
test_attr_name = "naming_authority"
validator = NamingAuthorityValidator()
# check URL - should pass
self.assertTrue(validator.validate(test_attr_name, "https://ioos.us")[0])
# check reverse DNS - should pass
self.assertTrue(validator.validate(test_attr_name, "edu.ucar.unidata")[0])
# email address is neither of the above, so should fail
bad_result = validator.validate(test_attr_name, "<EMAIL>")
self.assertFalse(bad_result[0])
self.assertEqual(
bad_result[1],
[
"naming_authority should either be a URL or a "
'reversed DNS name (e.g "edu.ucar.unidata")'
],
)
def test_platform_id_validation(self):
attn = "platform_id"
attv = "alphaNum3R1C"
v = IOOS1_2_PlatformIDValidator()
self.assertTrue(v.validate(attn, attv)[0])
attv = "alpha"
v = IOOS1_2_PlatformIDValidator()
self.assertTrue(v.validate(attn, attv)[0])
attv = "311123331112"
v = IOOS1_2_PlatformIDValidator()
self.assertTrue(v.validate(attn, attv)[0])
attv = "---fail---"
v = IOOS1_2_PlatformIDValidator()
self.assertFalse(v.validate(attn, attv)[0])
def test_check_platform_cf_role(self):
"""
Check that cf_role inside platform variables only allows certain
values, namely "profile_id", "timeseries_id", or "trajectory_id"
"""
ds = MockTimeSeries()
plat_var = ds.createVariable("platform", np.int8, ())
ds.variables["depth"].platform = "platform"
self.ioos.setup(ds)
results = self.ioos.check_platform_variable_cf_role(ds)
# don't set attribute, should raise error about attribute not
# existing
self.assertEqual(len(results), 1)
score, out_of = results[0].value
self.assertLess(score, out_of)
# set to invalid value
plat_var.setncattr("cf_role", "bad_value")
results = self.ioos.check_platform_variable_cf_role(ds)
self.assertLess(score, out_of)
expected_vals = {"profile_id", "timeseries_id", "trajectory_id"}
expect_msg = (
'Platform variable "platform" must have a cf_role attribute '
"with one of the values {}".format(sorted(expected_vals))
)
self.assertEqual(results[0].msgs, [expect_msg])
# set to valid value
plat_var.setncattr("cf_role", "timeseries_id")
results = self.ioos.check_platform_variable_cf_role(ds)
score, out_of = results[0].value
self.assertEqual(score, out_of)
def test_check_platform_global(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no global attr, fail
self.assertFalse(self.ioos.check_platform_global(ds).value)
# bad global attr, fail
ds.setncattr("platform", "bad value")
self.assertFalse(self.ioos.check_platform_global(ds).value)
# another bad value
ds.setncattr("platform", " bad")
self.assertFalse(self.ioos.check_platform_global(ds).value)
# good value
ds.setncattr("platform", "single_string")
res = self.ioos.check_platform_global(ds)
self.assertTrue(res.value)
self.assertEqual(res.msgs, [])
def test_check_single_platform(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no global attr but also no platform variables, should pass
result = self.ioos.check_single_platform(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
# give platform global, no variables, fail
ds.setncattr("platform", "buoy")
result = self.ioos.check_single_platform(ds)
self.assertFalse(result.value)
# global platform, one platform variable, pass
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
result = self.ioos.check_single_platform(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
# two platform variables, fail
temp2 = ds.createVariable("temp2", "d", ("time"))
temp2.setncattr("platform", "platform_var2")
plat = ds.createVariable("platform_var2", np.byte)
result = self.ioos.check_single_platform(ds)
self.assertFalse(result.value)
# no global attr, one variable, fail
ds = MockTimeSeries() # time, lat, lon, depth
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
result = self.ioos.check_single_platform(ds)
self.assertFalse(result.value)
def test_check_cf_dsg(self):
ds = MockTimeSeries() # time, lat, lon, depth
ds.setncattr("platform", "single_string")
# correct cf_role & featureType, pass
ds.setncattr("featureType", "profile")
ds.createDimension("profile", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("profile",))
cf_role_var.setncattr("cf_role", "timeseries_id")
results = self.ioos.check_cf_dsg(ds)
self.assertTrue(all(r.value for r in results))
self.assertTrue(all(r.msgs == [] for r in results))
# correct featureType, incorrect cf_role var dimension
ds = MockTimeSeries() # time, lat, lon, depth
ds.setncattr("featureType", "trajectoryprofile")
ds.createDimension("trajectory", 2) # should only be 1
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("trajectory",))
cf_role_var.setncattr("cf_role", "trajectory_id")
results = self.ioos.check_cf_dsg(ds)
self.assertFalse(results[0].value)
# featureType==timeSeries, cf_role=timeseries_id
ds = MockTimeSeries()
ds.setncattr("featureType", "timeSeries")
ds.createDimension("station", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("station",))
cf_role_var.setncattr("cf_role", "timeseries_id")
results = self.ioos.check_cf_dsg(ds)
# check should pass with no results
self.assertEqual(results, [])
# featureType==timeSeriesProfile, cf_role==timeseries_id, dim 1, pass
ds = MockTimeSeries()
ds.setncattr("featureType", "timeSeriesProfile")
ds.createDimension("station", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("station",))
cf_role_var.setncattr("cf_role", "timeseries_id")
results = self.ioos.check_cf_dsg(ds)
self.assertEqual(results, [])
# featureType==timeSeriesProfile, cf_role==timeseries_id, dim 2, fail
ds = MockTimeSeries()
ds.setncattr("platform", "platform")
ds.setncattr("featureType", "timeSeriesProfile")
ds.createDimension("station", 2)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("station",))
cf_role_var.setncattr("cf_role", "timeseries_id")
results = self.ioos.check_cf_dsg(ds)
self.assertFalse(results[0].value)
# featureType==trajectory, cf_role==trajectory_id, dim 1, pass
ds = MockTimeSeries()
ds.setncattr("featureType", "trajectory")
ds.createDimension("trajectory", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("trajectory",))
cf_role_var.setncattr("cf_role", "trajectory_id")
results = self.ioos.check_cf_dsg(ds)
self.assertEqual(results, [])
# featureType==trajectory, cf_role==trajectory, dim 2, fail
ds = MockTimeSeries()
ds.setncattr("featureType", "trajectory")
ds.createDimension("trajectory", 2)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("trajectory",))
cf_role_var.setncattr("cf_role", "trajectory_id")
results = self.ioos.check_cf_dsg(ds)
self.assertFalse(results[0].value)
# featureType==trajectoryProfile, cf_role==trajectory_id, dim 1, pass
ds = MockTimeSeries()
ds.setncattr("featureType", "trajectoryProfile")
ds.createDimension("trajectoryprof", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("trajectoryprof",))
cf_role_var.setncattr("cf_role", "trajectory_id")
results = self.ioos.check_cf_dsg(ds)
self.assertEqual(results, [])
# featureType==trajectoryProfile, cf_role==trajectory_id, dim 2, fail
ds = MockTimeSeries()
ds.setncattr("featureType", "trajectoryProfile")
ds.createDimension("trajectoryprof", 2)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("trajectoryprof",))
cf_role_var.setncattr("cf_role", "trajectory_id")
results = self.ioos.check_cf_dsg(ds)
self.assertFalse(results[0].value)
# featureType==profile, cf_role==profile_id, dim 1, pass
ds = MockTimeSeries()
ds.setncattr("featureType", "profile")
ds.createDimension("prof", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("prof",))
cf_role_var.setncattr("cf_role", "profile_id")
results = self.ioos.check_cf_dsg(ds)
self.assertEqual(results, [])
# featureType==profile, cf_role==profile_id, dim 2, fail
ds = MockTimeSeries()
ds.setncattr("featureType", "profile")
ds.createDimension("prof", 2)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("prof",))
cf_role_var.setncattr("cf_role", "profile_id")
results = self.ioos.check_cf_dsg(ds)
self.assertFalse(results[0].value)
# featureType==point -- do nothing
ds = MockTimeSeries()
ds.setncattr("featureType", "point")
ds.createDimension("blah", 2)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("blah",))
cf_role_var.setncattr("cf_role", "profile_id")
results = self.ioos.check_cf_dsg(ds)
self.assertEqual(results, [])
def test_check_platform_vocabulary(self):
ds = MockTimeSeries() # time, lat, lon, depth
ds.setncattr("platform_vocabulary", "http://google.com")
result = self.ioos.check_platform_vocabulary(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
ds.setncattr("platform_vocabulary", "bad")
self.assertFalse(self.ioos.check_platform_vocabulary(ds).value)
def test_check_qartod_variables_flags(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no QARTOD variables
results = self.ioos.check_qartod_variables_flags(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
# QARTOD variable without flag_values, flag_meanings (fail)
qr = ds.createVariable("depth_qc", np.byte)
qr.setncattr("standard_name", "spike_test_quality_flag")
results = self.ioos.check_qartod_variables_flags(ds)
self.assertTrue(not any(r.value for r in results)) # all False
# QARTOD variable with flag meanings, without flag_meanings
qr.setncattr("flag_values", np.array([0, 1, 2], dtype=np.byte))
results = self.ioos.check_qartod_variables_flags(ds)
self.assertEqual(results[0].value[0], results[0].value[1]) # should pass
self.assertFalse(results[1].value) # still fail
# QARTOD variable with flag meanings, flag_values
qr.setncattr("flag_meanings", "x y z") # alphanumeric, space-separated
results = self.ioos.check_qartod_variables_flags(ds)
self.assertEqual(results[0].value[0], results[0].value[1]) # pass
self.assertEqual(results[1].value[0], results[1].value[1]) # pass
# flag_values array not equal to length of flag_meanings
qr.setncattr("flag_values", np.array([0, 1], dtype=np.byte))
results = self.ioos.check_qartod_variables_flags(ds)
self.assertLess(results[0].value[0], results[0].value[1]) # should fail
self.assertEqual(results[1].value[0], results[1].value[1]) # pass
# flag_values right length, wrong type
qr.setncattr("flag_values", np.array([0, 1, 2], dtype=np.float64))
results = self.ioos.check_qartod_variables_flags(ds)
self.assertLess(results[0].value[0], results[0].value[1]) # should fail
self.assertEqual(results[1].value[0], results[1].value[1]) # pass
def test_check_qartod_variables_references(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no QARTOD variables
results = self.ioos.check_qartod_variables_references(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
# QARTOD variable without references (fail)
qr = ds.createVariable("depth_qc", np.byte)
qr.setncattr("flag_meanings", "blah")
qr.setncattr("standard_name", "spike_test_quality_flag")
results = self.ioos.check_qartod_variables_references(ds)
self.assertFalse(all(r.value for r in results))
# QARTOD variable with references (pass)
qr.setncattr("references", "http://services.cormp.org/quality.php")
results = self.ioos.check_qartod_variables_references(ds)
self.assertTrue(all(r.value for r in results))
self.assertEqual(results[0].msgs, []) # only one Result to test
# QARTOD variable with bad references (fail)
qr.setncattr(
"references", r"p9q384ht09q38@@####???????////??//\/\/\/\//\/\74ht"
)
results = self.ioos.check_qartod_variables_references(ds)
self.assertFalse(all(r.value for r in results))
def test_check_ioos_ingest(self):
ds = MockTimeSeries()
# no value, pass
res = self.ioos.check_ioos_ingest(ds)
self.assertTrue(res.value)
self.assertEqual(res.msgs, [])
# value false
ds.setncattr("ioos_ingest", "false")
self.assertTrue(self.ioos.check_ioos_ingest(ds).value)
# value true
ds.setncattr("ioos_ingest", "true")
self.assertTrue(self.ioos.check_ioos_ingest(ds).value)
# case insensitive
ds.setncattr("ioos_ingest", "True")
self.assertTrue(self.ioos.check_ioos_ingest(ds).value)
ds.setncattr("ioos_ingest", "False")
self.assertTrue(self.ioos.check_ioos_ingest(ds).value)
# anything else fails
ds.setncattr("ioos_ingest", "badval")
self.assertFalse(self.ioos.check_ioos_ingest(ds).value)
ds.setncattr("ioos_ingest", 0)
self.assertFalse(self.ioos.check_ioos_ingest(ds).value)
def test_vertical_dimension(self):
# MockTimeSeries has a depth variable, with axis of 'Z', units of 'm',
# and positive = 'down'
nc_obj = MockTimeSeries()
result = self.ioos.check_vertical_coordinates(nc_obj)[0]
self.assertEqual(*result.value)
nc_obj.variables["depth"].positive = "upwards"
result = self.ioos.check_vertical_coordinates(nc_obj)[0]
self.assertNotEqual(*result.value)
nc_obj.variables["depth"].positive = "up"
result = self.ioos.check_vertical_coordinates(nc_obj)[0]
self.assertEqual(*result.value)
# test units
nc_obj.variables["depth"].units = "furlong"
result = self.ioos.check_vertical_coordinates(nc_obj)[0]
expected_msg = (
"depth's units attribute furlong is not equivalent to "
"one of ('meter', 'inch', 'foot', 'yard', "
"'US_survey_foot', 'mile', 'fathom')"
)
self.assertEqual(result.msgs[0], expected_msg)
self.assertNotEqual(*result.value)
accepted_units = (
"meter",
"meters",
"inch",
"foot",
"yard",
"mile",
"miles",
"US_survey_foot",
"US_survey_feet",
"fathom",
"fathoms",
"international_inch",
"international_inches",
"international_foot",
"international_feet",
"international_yard",
"international_yards",
"international_mile",
"international_miles",
"inches",
"in",
"feet",
"ft",
"yd",
"mi",
)
for units in accepted_units:
nc_obj.variables["depth"].units = units
result = self.ioos.check_vertical_coordinates(nc_obj)[0]
self.assertEqual(*result.value)
| 2.1875 | 2 |
crnn/__init__.py | shunj-g/detectocr | 1 | 12761206 | import crnn.crnn_torch as models | 1.039063 | 1 |
gram/urls.py | nderituliz/Instagram-App | 0 | 12761207 | <filename>gram/urls.py
from django.conf import settings
from django.urls import path,re_path
from django.conf.urls.static import static
from . import views
urlpatterns=[
path('',views.home,name='home'),
path('search/',views.search_results,name='search_results'),
path('image/',views.add_image,name='upload_image'),
path('profile/',views.profile_info, name = 'profile'),
re_path('comment/(\d+)',views.comment,name = 'comment'),
path('follow/(\d+)', views.follow, name = 'follow'),
path('unfollow/(\d+)', views.unfollow, name='unfollow'),
re_path('likes/(\d+)/', views.like_images,name='likes'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | 2 | 2 |
tests/integration/check_exists.py | BenGardiner/strictdoc | 47 | 12761208 | import argparse
import os.path
# https://stackoverflow.com/a/19476216/598057
import sys
main_parser = argparse.ArgumentParser()
main_parser.add_argument(
"input_path", type=str, help="One or more folders with *.sdoc files"
)
main_parser.add_argument(
"--file",
action="store_true",
default=False,
help="Enforce checking that input_path is a file",
)
main_parser.add_argument(
"--dir",
action="store_true",
default=False,
help="Enforce checking that input_path is a directory",
)
main_parser.add_argument(
"--invert",
action="store_true",
default=False,
help="Enforce checking that input_path is a file",
)
args = main_parser.parse_args()
invert: bool = args.invert
if not invert:
if not os.path.exists(args.input_path):
print(
"error: path does not exist: {}".format(args.input_path),
file=sys.stderr,
)
exit(1)
if args.file and not os.path.isfile(args.input_path):
print(
"error: path is not a file: {}".format(args.input_path),
file=sys.stderr,
)
exit(1)
if args.dir and not os.path.isdir(args.input_path):
print(
"error: path is not a directory: {}".format(args.input_path),
file=sys.stderr,
)
exit(1)
else:
if os.path.exists(args.input_path):
print(
"error: expected path to not exist, but it does: {}".format(
args.input_path
),
file=sys.stderr,
)
exit(1)
if args.file and os.path.isfile(args.input_path):
print(
"error: expected path to not exist, but is a file: {}".format(
args.input_path
),
file=sys.stderr,
)
exit(1)
if args.dir and os.path.isdir(args.input_path):
print(
"error: expected path to not exist, but is a directory: {}".format(
args.input_path
),
file=sys.stderr,
)
exit(1)
exit(0)
| 3.203125 | 3 |
lib/galaxy/webapps/tool_shed/api/categories.py | KyleL1998/galaxy | 0 | 12761209 | <reponame>KyleL1998/galaxy
import logging
import tool_shed.util.shed_util_common as suc
from galaxy import (
exceptions,
util,
web
)
from galaxy.web import (
_future_expose_api as expose_api,
_future_expose_api_anonymous_and_sessionless as expose_api_anonymous_and_sessionless,
require_admin as require_admin
)
from galaxy.web.base.controller import BaseAPIController
from tool_shed.util import repository_util
log = logging.getLogger(__name__)
class CategoriesController(BaseAPIController):
"""RESTful controller for interactions with categories in the Tool Shed."""
def __get_repository_count(self, trans, category_name):
return self.app.repository_registry.viewable_repositories_and_suites_by_category.get(category_name, 0)
def __get_value_mapper(self, trans):
value_mapper = {'id': trans.security.encode_id}
return value_mapper
@expose_api
@require_admin
def create(self, trans, payload, **kwd):
"""
POST /api/categories
Return a dictionary of information about the created category.
The following parameters are included in the payload:
:param name (required): the name of the category
:param description (optional): the description of the category (if not provided, the name will be used)
Example: POST /api/categories/?key=XXXYYYXXXYYY
Content-Disposition: form-data; name="name" Category_Name
Content-Disposition: form-data; name="description" Category_Description
"""
category_dict = dict(message='', status='ok')
name = payload.get('name', '')
if name:
description = payload.get('description', '')
if not description:
# Default the description to the name.
description = name
if suc.get_category_by_name(self.app, name):
raise exceptions.Conflict('A category with that name already exists.')
else:
# Create the category
category = self.app.model.Category(name=name, description=description)
trans.sa_session.add(category)
trans.sa_session.flush()
category_dict = category.to_dict(view='element',
value_mapper=self.__get_value_mapper(trans))
category_dict['message'] = "Category '%s' has been created" % str(category.name)
category_dict['url'] = web.url_for(controller='categories',
action='show',
id=trans.security.encode_id(category.id))
else:
raise exceptions.RequestParameterMissingException('Missing required parameter "name".')
return category_dict
@expose_api_anonymous_and_sessionless
def get_repositories(self, trans, category_id, **kwd):
"""
GET /api/categories/{encoded_category_id}/repositories
Return information about the provided category and the repositories in that category.
:param id: the encoded id of the Category object
:param sort_key: the field by which the repositories should be sorted
:param sort_order: ascending or descending sort
:param page: the page number to return
Example: GET localhost:9009/api/categories/f9cad7b01a472135/repositories
"""
installable = util.asbool(kwd.get('installable', 'false'))
sort_key = kwd.get('sort_key', 'name')
sort_order = kwd.get('sort_order', 'asc')
page = kwd.get('page', None)
category = suc.get_category(self.app, category_id)
if category is None:
category_dict = dict(message='Unable to locate category record for id %s.' % (str(id)),
status='error')
return category_dict
category_dict = category.to_dict(view='element',
value_mapper=self.__get_value_mapper(trans))
category_dict['repository_count'] = suc.count_repositories_in_category(self.app, category_id)
category_dict['url'] = web.url_for(controller='categories',
action='show',
id=trans.security.encode_id(category.id))
repositories = repository_util.get_repositories_by_category(self.app,
category.id,
installable=installable,
sort_order=sort_order,
sort_key=sort_key,
page=page)
category_dict['repositories'] = repositories
return category_dict
@expose_api_anonymous_and_sessionless
def index(self, trans, deleted=False, **kwd):
"""
GET /api/categories
Return a list of dictionaries that contain information about each Category.
:param deleted: flag used to include deleted categories
Example: GET localhost:9009/api/categories
"""
category_dicts = []
deleted = util.asbool(deleted)
if deleted and not trans.user_is_admin:
raise exceptions.AdminRequiredException('Only administrators can query deleted categories.')
for category in trans.sa_session.query(self.app.model.Category) \
.filter(self.app.model.Category.table.c.deleted == deleted) \
.order_by(self.app.model.Category.table.c.name):
category_dict = category.to_dict(view='collection',
value_mapper=self.__get_value_mapper(trans))
category_dict['url'] = web.url_for(controller='categories',
action='show',
id=trans.security.encode_id(category.id))
category_dict['repositories'] = self.app.repository_registry.viewable_repositories_and_suites_by_category.get(category.name, 0)
category_dicts.append(category_dict)
return category_dicts
@expose_api_anonymous_and_sessionless
def show(self, trans, id, **kwd):
"""
GET /api/categories/{encoded_category_id}
Return a dictionary of information about a category.
:param id: the encoded id of the Category object
Example: GET localhost:9009/api/categories/f9cad7b01a472135
"""
category = suc.get_category(self.app, id)
if category is None:
category_dict = dict(message='Unable to locate category record for id %s.' % (str(id)),
status='error')
return category_dict
category_dict = category.to_dict(view='element',
value_mapper=self.__get_value_mapper(trans))
category_dict['url'] = web.url_for(controller='categories',
action='show',
id=trans.security.encode_id(category.id))
return category_dict
| 2.140625 | 2 |
tests/integration/test_storage_s3/s3_mocks/echo.py | pdv-ru/ClickHouse | 15,577 | 12761210 | import http.server
import sys
class RequestHandler(http.server.BaseHTTPRequestHandler):
def do_HEAD(self):
if self.path.startswith("/get-my-path/"):
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
elif self.path == "/":
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
else:
self.send_response(404)
self.send_header("Content-Type", "text/plain")
self.end_headers()
def do_GET(self):
self.do_HEAD()
if self.path.startswith("/get-my-path/"):
self.wfile.write(b'/' + self.path.split('/', maxsplit=2)[2].encode())
elif self.path == "/":
self.wfile.write(b"OK")
httpd = http.server.HTTPServer(("0.0.0.0", int(sys.argv[1])), RequestHandler)
httpd.serve_forever()
| 3.125 | 3 |
utils/remove_duplicates.py | data-rock/semantic-segmentation-editor | 0 | 12761211 | <filename>utils/remove_duplicates.py<gh_stars>0
import pymongo
from collections import Counter
conn_url = 'mongodb://127.0.0.1:3001/meteor'
print(conn_url)
conn = pymongo.MongoClient(conn_url)
db = 'meteor'
print(conn[db].list_collection_names())
coll = conn[db]['SseSamples']
def remove_dups(folder):
count_fns = Counter()
for d in coll.find({'folder': folder}): count_fns[d['file']] += 1
dup_fns = [fn for fn in count_fns if count_fns[fn] > 1]
print(folder, 'dups: %d' % len(dup_fns))
for fn in dup_fns:
for i, d in enumerate(coll.find({'folder': folder, 'file': fn})):
if i == 0: continue
coll.delete_one({'_id': d['_id']})
remove_dups('/SPF-129_pred')
remove_dups('/SPF-157_pred')
remove_dups('/STR-06-030C_pred')
remove_dups('/SPF-157_pred')
| 3 | 3 |
feffery_antd_components/AntdInput.py | Beddow/feffery-antd-components | 0 | 12761212 | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class AntdInput(Component):
"""An AntdInput component.
Keyword arguments:
- id (string; optional)
- addonAfter (string; optional)
- addonBefore (string; optional)
- allowClear (boolean; optional)
- bordered (boolean; optional)
- className (string; optional)
- defaultValue (string; optional)
- disabled (boolean; optional)
- loading_state (dict; optional)
`loading_state` is a dict with keys:
- component_name (string; optional):
Holds the name of the component that is loading.
- is_loading (boolean; optional):
Determines if the component is loading or not.
- prop_name (string; optional):
Holds which property is loading.
- maxLength (number; optional)
- mode (string; default 'default')
- nClicksSearch (number; default 0)
- nSubmit (number; default 0)
- placeholder (string; optional)
- showCount (boolean; optional)
- size (string; optional)
- style (dict; optional)
- value (string; optional)"""
@_explicitize_args
def __init__(self, id=Component.UNDEFINED, className=Component.UNDEFINED, style=Component.UNDEFINED, loading_state=Component.UNDEFINED, mode=Component.UNDEFINED, placeholder=Component.UNDEFINED, size=Component.UNDEFINED, addonBefore=Component.UNDEFINED, addonAfter=Component.UNDEFINED, allowClear=Component.UNDEFINED, bordered=Component.UNDEFINED, defaultValue=Component.UNDEFINED, disabled=Component.UNDEFINED, maxLength=Component.UNDEFINED, value=Component.UNDEFINED, showCount=Component.UNDEFINED, nSubmit=Component.UNDEFINED, nClicksSearch=Component.UNDEFINED, **kwargs):
self._prop_names = ['id', 'addonAfter', 'addonBefore', 'allowClear', 'bordered', 'className', 'defaultValue', 'disabled', 'loading_state', 'maxLength', 'mode', 'nClicksSearch', 'nSubmit', 'placeholder', 'showCount', 'size', 'style', 'value']
self._type = 'AntdInput'
self._namespace = 'feffery_antd_components'
self._valid_wildcard_attributes = []
self.available_properties = ['id', 'addonAfter', 'addonBefore', 'allowClear', 'bordered', 'className', 'defaultValue', 'disabled', 'loading_state', 'maxLength', 'mode', 'nClicksSearch', 'nSubmit', 'placeholder', 'showCount', 'size', 'style', 'value']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(AntdInput, self).__init__(**args)
| 2.359375 | 2 |
2-resources/_External-learning-resources/02-pyth/python-patterns-master/patterns/structural/flyweight_with_metaclass.py | eengineergz/Lambda | 0 | 12761213 | import weakref
class FlyweightMeta(type):
def __new__(mcs, name, parents, dct):
"""
Set up object pool
:param name: class name
:param parents: class parents
:param dct: dict: includes class attributes, class methods,
static methods, etc
:return: new class
"""
dct["pool"] = weakref.WeakValueDictionary()
return super().__new__(mcs, name, parents, dct)
@staticmethod
def _serialize_params(cls, *args, **kwargs):
"""
Serialize input parameters to a key.
Simple implementation is just to serialize it as a string
"""
args_list = list(map(str, args))
args_list.extend([str(kwargs), cls.__name__])
key = "".join(args_list)
return key
def __call__(cls, *args, **kwargs):
key = FlyweightMeta._serialize_params(cls, *args, **kwargs)
pool = getattr(cls, "pool", {})
instance = pool.get(key)
if instance is None:
instance = super().__call__(*args, **kwargs)
pool[key] = instance
return instance
class Card2(metaclass=FlyweightMeta):
def __init__(self, *args, **kwargs):
# print('Init {}: {}'.format(self.__class__, (args, kwargs)))
pass
if __name__ == "__main__":
instances_pool = getattr(Card2, "pool")
cm1 = Card2("10", "h", a=1)
cm2 = Card2("10", "h", a=1)
cm3 = Card2("10", "h", a=2)
assert (cm1 == cm2) and (cm1 != cm3)
assert (cm1 is cm2) and (cm1 is not cm3)
assert len(instances_pool) == 2
del cm1
assert len(instances_pool) == 2
del cm2
assert len(instances_pool) == 1
del cm3
assert len(instances_pool) == 0
| 2.859375 | 3 |
python_api/tests/testapi.py | PappaArty/HWF | 0 | 12761214 | import flatbuffers
import sys
from websocket import create_connection
sys.path.append("../../.") #Append project root directory so importing from schema works
import schema.GetHardwarePool as GetHardwarePool
import schema.GetResult as GetResult
import schema.Message as Message
import schema.Task as Task
import schema.Stage as Stage
binFile = "dataToSend.bin"
ImgFile = "hellgo.png"
targetAgentId = 1
headers = {
"Content-Type": "application/octet-stream",
}
# Creates a HWFMessage with param info, returns builder output
def createAndSendBuffer():
stage_amount = 2
cmd_amount = 1
cmdo = "I'm the first stage"
builder = flatbuffers.Builder(1024)
cmd = builder.CreateString(cmdo)
Stage.StartCmdListVector(builder, cmd_amount)
builder.PrependUOffsetTRelative(cmd)
cmdVector = builder.EndVector()
Stage.Start(builder)
Stage.AddCmdList(builder, cmdVector)
stage = Stage.End(builder)
cmdo = "I'm the second stage"
cmd = builder.CreateString(cmdo)
Stage.StartCmdListVector(builder, cmd_amount)
builder.PrependUOffsetTRelative(cmd)
cmdVector = builder.EndVector()
Stage.Start(builder)
Stage.AddCmdList(builder, cmdVector)
stage2 = Stage.End(builder)
Task.StartStagesVector(builder, stage_amount)
builder.PrependUOffsetTRelative(stage2)
builder.PrependUOffsetTRelative(stage)
stages = builder.EndVector()
Task.Start(builder)
Task.AddStages(builder, stages)
task = Task.End(builder)
Message.Start(builder)
Message.AddType(builder, 1)
Message.AddTask(builder, task)
message = Message.End(builder)
builder.Finish(message)
buf = builder.Output()
ws = create_connection("ws://localhost:3001")
ws.send_binary(buf)
return 0
# def build_binary_message(_agentId, _cmd, _srcFile):
# fbb = flatbuffers.Builder(1024)
# # create cmd string
# cmd = fbb.CreateString(_cmd)
# # create srcfile byte arr
# with open(_srcFile, "rb") as bin:
# readBytes = bin.read()
# byteVector = fbb.CreateByteVector(readBytes)
# HWFMessage.MessageStart(fbb)
# # agent id is temporary since server doesn't assign tasks yet
# HWFMessage.MessageAddAgentId(fbb, _agentId)
# HWFMessage.MessageAddCmd(fbb, cmd)
# HWFMessage.MessageAddData(fbb, byteVector)
# readyMsg = HWFMessage.MessageEnd(fbb)
# fbb.Finish(readyMsg)
# return fbb.Output()
"""""
# Creates a bin file containing a target agent id and a string.
def CreateBinary(destFile):
fbb = flatbuffers.Builder(1024)
cmd = fbb.CreateString("find / -name secretpasswordsdontlook.txt")
HWFMessage.Start(fbb)
HWFMessage.AddAgentId(fbb, targetAgentId)
HWFMessage.AddCmd(fbb, cmd)
readyMsg = HWFMessage.End(fbb)
fbb.Finish(readyMsg)
buf = fbb.Output()
with open(destFile, "wb") as bin:
bin.write(buf)
#Reads a file, saves its bytes in the vector "Data", then sends them to the hub together with an agent id
def SendBinaryFromSourceFile(srcFile):
with open(srcFile, "rb") as bin:
readBytes = bin.read()
fbb = flatbuffers.Builder(1024)
byteVector = fbb.CreateByteVector(readBytes)
HWFMessage.Start(fbb)
HWFMessage.AddAgentId(fbb, targetAgentId)
HWFMessage.AddData(fbb, byteVector)
readyMsg = HWFMessage.End(fbb)
fbb.Finish(readyMsg)
buf = fbb.Output()
ws = create_connection("ws://localhost:3001")
ws.send_binary(buf)
#Sends a binary file to the hub
def SendBinary(srcFile):
with open(srcFile, "rb") as bin:
buf = bin.read()
ws = create_connection("ws://localhost:3001")
ws.send_binary(buf)
"""
# def send_request(cmd, filename):
# temp_agent_id = 1
# buf = createBuffer()
# #buf = build_binary_message(temp_agent_id, cmd, filename)
# # create bin file from message
# global binFile
# with open(binFile, "wb") as bin:
# bin.write(buf)
# ws = create_connection("ws://localhost:3001")
# ws.send_binary(buf)
if __name__ == "__main__":
createAndSendBuffer()
# CreateBinary(binFile)
# SendBinary(binFile)
# SendBinaryFromSourceFile(ImgFile)
# what cmd command to run?
# what file to send?
#send_request("echo hello world", "hellgo.png")
| 2.171875 | 2 |
dragg/redis_client.py | apigott/dra | 2 | 12761215 | <reponame>apigott/dra
import os
import redis
class Singleton(type):
_instances = {}
def __call__(cls):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__()
return cls._instances[cls]
class RedisClient(metaclass=Singleton):
def __init__(self):
self.pool = redis.ConnectionPool(host = os.environ.get('REDIS_HOST', 'localhost'), decode_responses = True, db = 0)
@property
def conn(self):
if not hasattr(self, '_conn'):
self.getConnection()
return self._conn
def getConnection(self):
self._conn = redis.Redis(connection_pool = self.pool)
| 2.75 | 3 |
alipay/aop/api/response/AlipayInsSceneFamilydoctorItemBatchqueryResponse.py | antopen/alipay-sdk-python-all | 0 | 12761216 | <filename>alipay/aop/api/response/AlipayInsSceneFamilydoctorItemBatchqueryResponse.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.HealthServiceFamilyDoctorDrugDTO import HealthServiceFamilyDoctorDrugDTO
class AlipayInsSceneFamilydoctorItemBatchqueryResponse(AlipayResponse):
def __init__(self):
super(AlipayInsSceneFamilydoctorItemBatchqueryResponse, self).__init__()
self._drugs = None
@property
def drugs(self):
return self._drugs
@drugs.setter
def drugs(self, value):
if isinstance(value, list):
self._drugs = list()
for i in value:
if isinstance(i, HealthServiceFamilyDoctorDrugDTO):
self._drugs.append(i)
else:
self._drugs.append(HealthServiceFamilyDoctorDrugDTO.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayInsSceneFamilydoctorItemBatchqueryResponse, self).parse_response_content(response_content)
if 'drugs' in response:
self.drugs = response['drugs']
| 2.296875 | 2 |
grtk/grtk/expression/preprocessing.py | mdozmorov/genome_runner | 11 | 12761217 | import multiprocessing as mp
from multiprocessing.sharedctypes import RawArray
from ctypes import c_bool, c_double
import numpy as np
import pandas as pd
def standardize(X):
"""
Standardize each row in X to mean = 0 and SD = 1.
"""
X_m = np.ma.masked_invalid(X)
return ((X.T - X_m.mean(axis=1)) / X_m.std(axis=1)).T.data
mask = None
X_s = None
X = None
k = None
def knn_init(k_, mask_, X_, X_s_):
global k, mask, X_s, X
mask = from_shared(mask_)
X_s = from_shared(X_s_)
X = from_shared(X_)
k = k_
def knn_work(i):
print(i)
dx = X_s.dot(X_s[i,:]) / ((~ mask) & (~ mask[i,:])).sum(axis=1)
ix = (-dx).argsort()
for j in np.isnan(X[i,:]).nonzero()[0]:
v = X[ix,j]
v = v[np.invert(np.isnan(v))]
X[i,j] = v[:k].mean()
def ctype_to_dtype(ctype):
if ctype == c_double:
return np.float64
elif ctype == c_bool:
return np.bool
else:
raise Exception
def to_shared(arr, type=c_double):
shared = RawArray(type, arr.flat)
return (shared, ctype_to_dtype(type), arr.shape)
def from_shared(args):
arr, dtype, shape = args
return np.frombuffer(arr, dtype=dtype).reshape(shape)
class KNNImputer(object):
def __init__(self, k=50):
self._k = k
def fit_transform(self, X, axis=0):
assert(axis in (0,1))
if isinstance(X, pd.DataFrame):
X = X.dropna(axis=0, how="all").dropna(axis=1, thresh=self._k)
return pd.DataFrame(
self.fit_transform(X.as_matrix(), axis=axis),
index=X.index,
columns=X.columns)
if axis==0:
return self.fit_transform(X.T, axis=1).T
X_s = standardize(X)
mask = np.ma.masked_invalid(X_s).mask
X_s[np.isnan(X_s)] = 0
mask_shared = to_shared(mask, c_bool)
X_shared = to_shared(X)
X_s_shared = to_shared(X_s)
pool = mp.Pool(initializer=knn_init,
initargs=(self._k, mask_shared, X_shared, X_s_shared))
pool.map(knn_work, range(X.shape[0]))
return from_shared(X_shared)
| 2.515625 | 3 |
djfw/tinymce/admin.py | kozzztik/tulius | 1 | 12761218 | <reponame>kozzztik/tulius
from django import forms
from django.contrib import admin
from .models import FileUpload
class FileUploadForm(forms.ModelForm):
class Meta:
model = FileUpload
fields = '__all__'
class FileUploadAdmin(admin.ModelAdmin):
form = FileUploadForm
list_display = (
'preview_image_url',
'user',
'filename',
'get_absolute_url',
'mime',
'file_size',
)
list_display_links = (
'preview_image_url',
)
list_editable = (
'filename',
)
def has_add_permission(self, request):
return False
admin.site.register(FileUpload, FileUploadAdmin)
| 2.21875 | 2 |
helpers/sett/resolvers/StrategyCurveGaugeResolver.py | EchoDao-BSC/badger-system | 99 | 12761219 | from helpers.sett.resolvers.StrategyCoreResolver import StrategyCoreResolver
class StrategyCurveGaugeResolver(StrategyCoreResolver):
def get_strategy_destinations(self):
strategy = self.manager.strategy
return {
"gauge": strategy.gauge(),
"mintr": strategy.mintr(),
}
| 1.984375 | 2 |
src/einsteinpy/symbolic/vacuum_metrics.py | michiboo/einsteinpy | 1 | 12761220 | <reponame>michiboo/einsteinpy
import sympy
from einsteinpy.symbolic.metric import MetricTensor
def SchwarzschildMetric(symbolstr="t r theta phi"):
"""
Returns Metric Tensor of symbols of Schwarzschild Metric.
Parameters
----------
symbolstr : string
symbols to be used to define schwarzschild space, defaults to 't r theta phi'
Returns
-------
~einsteinpy.symbolic.metric.MetricTensor
Metric Tensor for Schwarzschild space-time
"""
list2d = [[0 for i in range(4)] for i in range(4)]
syms = sympy.symbols(symbolstr)
c, a = sympy.symbols("c a")
list2d[0][0] = 1 - (a / syms[1])
list2d[1][1] = -1 / ((1 - (a / syms[1])) * (c ** 2))
list2d[2][2] = -1 * (syms[1] ** 2) / (c ** 2)
list2d[3][3] = -1 * (syms[1] ** 2) * (sympy.sin(syms[2]) ** 2) / (c ** 2)
return MetricTensor(list2d, syms)
| 2.984375 | 3 |
vebio/WidgetFunctions.py | NREL/VirtualEngineering | 3 | 12761221 | from ipywidgets import *
from vebio.Utilities import dict_to_yaml, yaml_to_dict
#================================================================
class WidgetCollection:
"""A ``WidgetCollection`` object collects any number of different iPyWidgets.
This object can be used to organize all the widgets pertaining to a single
unit model or step of the overall conversion process. New widgets should be
accumulated after initialization using an approach like::
fs_options = wf.WidgetCollection()
fs_options.xylan_solid_fraction = widgets.BoundedFloatText(...)
fs_options.glucan_solid_fraction = widgets.BoundedFloatText(...)
fs_options.initial_porosity = widgets.Checkbox(...)
where widget options and values can be accessed using, for example,
``fs_options.initial_porosity.value``
"""
def __init__(self):
pass
def display_all_widgets(self):
"""Displays all the collected widgets in the Notebook.
This method displays all the widgets collected in the object
to the Jupyter Notebook interface.
Args:
None
Returns:
None
"""
# Set default viewing options
widget_width = 350
description_width = 125
html_width = 350
padding = 5
# Define display options
default_widget_layout = {'width': '%dpx' % (widget_width)}
widget_style = {'description_width': '%dpx' % (description_width)}
html_layout = {'width':'%dpx' % (html_width), 'margin': '0px 0px 0px %dpx' % (2*padding)}
box_layout = {'padding': '0px %dpx 0px %dpx' % (padding, padding), 'align_items': 'center'}
# For every widget
for widget_name, widget in self.__dict__.items():
widget_layout = default_widget_layout.copy()
if hasattr(widget, 'contains_sub_widgets'):
widget.lower.style = widget_style
sub_width = int((widget_width - description_width)/2.0 - 2.0)
widget.lower.layout = {'width': '%dpx' % (description_width + sub_width)}
widget.upper.layout = {'width': '%dpx' % (sub_width)}
html_label = widgets.HTMLMath(
value = widget.lower.description_tooltip,
layout = html_layout
)
hbox = HBox([widget.lower, widget.upper, html_label], layout = box_layout)
else:
# Set this widget's style and layout
widget.style = widget_style
if type(widget) == Checkbox:
shift_amt = (widget_width - description_width) - 22
widget_layout.update({'padding': '0px 0px 0px %dpx ' % (shift_amt)})
elif type(widget) == RadioButtons:
height = (len(widget.options)-2)*20 + 2*24
widget_layout.update({'height': '%dpx' % (height)})
if hasattr(widget, 'custom_layout'):
widget_layout.update(widget.custom_layout)
widget.layout = widget_layout
html_label = widgets.HTMLMath(
value = widget.description_tooltip,
layout = html_layout
)
# Organize this widget with more layout options
hbox = HBox([widget, html_label], layout = box_layout)
display(hbox)
def export_widgets_to_dict(self, parent_name=None):
"""Store all widget values in dictionary.
This method allows the values of each widget to be saved
in a dictionary with the pattern ``{"widget_1_name": widget_1_value, ...}``.
If the widget was created with a scaling function, the scaled
version of the value is calculated and stored in the dictionary.
This scaled value is the one that will be referenced by subsequent operations
accessing the VE parameter file.
Args:
parent_name (str, optional):
At the end of the dictionary creation, all the ``name: value``
entries can be nested under a single "parent" keyword. For example::
{parent_name: {"widget_1_name": widget_1_value,
"widget_2_name": widget_2_value}}
Defaults to ``None``, i.e., do not nest under a parent name.
Returns:
dict:
The name and value of each widget
collected in a Python dictionary.
"""
#Start with a blank dictionary
widget_dict = {}
for widget_name, widget in self.__dict__.items():
# Get the name and current state of each widget
widget_value = widget.value
if hasattr(widget, 'scaling_fn'):
# print('pre-scaling value = %f' % (widget_value))
widget_value = widget.scaling_fn(widget_value)
# print('post-scaling value = %f' % (widget_value))
# Create a dictionary with name : value pairs
widget_dict['%s' % (widget_name)] = widget_value
if parent_name is not None:
widget_dict = {'%s' % (parent_name): widget_dict}
return widget_dict
#================================================================
class ValueRangeWidget:
"""A pair of linked ``BoundedFloatText`` widgets for value ranges.
This is a custom widget composed of two linked ``BoundedFloatText`` widgets
intended to enable the easy solicitation of a range of values from the user.
"""
def __init__(self, description, tooltip, bounds, init_vals, step_size):
"""Initialize the linked input fields.
These linked widgets will be displayed side-by-side for intuitive
entry of [lower_bound, upper_bound] information, the behavior of the
individual fields and their labeling/description is provided by the user.
Args:
description (string):
A short name or label for the the widget, e.g., `Porosity Range`
tooltip (string):
A longer, potentially multi-line explanation describing the
values that are intended to be entered, background information,
units, suggested values, etc., should all be communicated here.
bounds (list(float)):
A two-element list where ``bounds[0]`` is the lower bound to enforce
on both fields and ``bounds[1]`` is the upper bound to enforce on
both fields.
init_vals (list(float)):
A two-element list where ``init_vals[0]`` is the initial value of the
lower bound and ``init_vals[1]`` is the initial value of the upper bound.
step_size (float):
The change in value produced by clicking the increment or decrement
buttons in the ``BoundedFloatText`` field.
Returns:
None
"""
self.contains_sub_widgets = True
self.lower = widgets.BoundedFloatText(
value = init_vals[0],
min = bounds[0],
max = bounds[1],
step = step_size,
description = description,
description_tooltip = tooltip,
disabled = False
)
self.upper = widgets.BoundedFloatText(
value = init_vals[1],
min = bounds[0],
max = bounds[1],
step = step_size,
disabled = False
)
def swap_range_values(change):
lower_val_tmp = self.lower.value
upper_val_tmp = self.upper.value
if upper_val_tmp < lower_val_tmp:
self.lower.value = upper_val_tmp
self.upper.value = lower_val_tmp
self.upper.observe(swap_range_values, names='value')
self.lower.observe(swap_range_values, names='value')
| 2.796875 | 3 |
tests/test_builder_security_requirements.py | tabebqena/flask-open-spec | 0 | 12761222 | <filename>tests/test_builder_security_requirements.py
from ..open_oas.builder.builder import OasBuilder
from unittest import TestCase
from ..open_oas.decorators import (
Deferred,
path_security_requirements,
security_requirements,
)
class TestSetSecurityRequirements(TestCase):
def test_and_or_false_empty_old(self):
builder = OasBuilder()
data = dict(
security="BasicAuth",
scopes=[],
AND=False,
OR=False,
index=-1,
old=[],
)
res = builder._set_security_requirements(**data) # type: ignore
self.assertEqual(res, [{"BasicAuth": []}])
def test_and_or_false_value_old(self):
builder = OasBuilder()
data = dict(
security="BasicAuth",
scopes=[],
AND=False,
OR=False,
index=-1,
old=[{"OldAuth": []}],
)
res = builder._set_security_requirements(**data) # type: ignore
self.assertEqual(res, [{"BasicAuth": []}])
def test_and_false_or_true_empty_old_default_index(self):
builder = OasBuilder()
data = dict(
security="BasicAuth",
scopes=[],
AND=False,
OR=True,
index=-1,
old=[],
)
res = builder._set_security_requirements(**data) # type: ignore
self.assertEqual(res, [{"BasicAuth": []}])
def test_and_false_or_true_value_old_default_index(self):
builder = OasBuilder()
data = dict(
security="BasicAuth",
scopes=[],
AND=False,
OR=True,
index=-1,
old=[{"OldAuth": []}],
)
res = builder._set_security_requirements(**data) # type: ignore
self.assertEqual(res, [{"OldAuth": []}, {"BasicAuth": []}])
def test_and_false_or_true_value_old_0_index(self):
builder = OasBuilder()
data = dict(
security="BasicAuth",
scopes=[],
AND=False,
OR=True,
index=0,
old=[{"OldAuth": []}],
)
res = builder._set_security_requirements(**data) # type: ignore
self.assertEqual(
res,
[
{"BasicAuth": []},
{"OldAuth": []},
],
)
def test_and_false_or_true_value_old_random_index(self):
builder = OasBuilder()
data = dict(
security="BasicAuth",
scopes=[],
AND=False,
OR=True,
index=1,
old=[{"VeryOldAuth": []}, {"OldAuth": []}],
)
res = builder._set_security_requirements(**data) # type: ignore
self.assertEqual(
res,
[
{"VeryOldAuth": []},
{"BasicAuth": []},
{"OldAuth": []},
],
)
def test_and_true_or_false_empty_old_default_index(self):
builder = OasBuilder()
data = dict(
security="BasicAuth",
scopes=[],
AND=True,
OR=False,
index=-1,
old=[],
)
res = builder._set_security_requirements(**data) # type: ignore
self.assertEqual(res, [{"BasicAuth": []}])
def test_and_true_or_false_value_old_default_index(self):
builder = OasBuilder()
data = dict(
security="BasicAuth",
scopes=[],
AND=True,
OR=False,
index=-1,
old=[{"OldAuth": []}],
)
res = builder._set_security_requirements(**data) # type: ignore
self.assertEqual(res, [{"BasicAuth": [], "OldAuth": []}])
def test_and_true_or_false_value_old_non_default_index(self):
builder = OasBuilder()
data = dict(
security="BasicAuth",
scopes=[],
AND=True,
OR=False,
index=0,
old=[
{"VeryOldAuth": []},
{"OldAuth": []},
],
)
res = builder._set_security_requirements(**data) # type: ignore
self.assertEqual(
res,
[
{"BasicAuth": [], "VeryOldAuth": []},
{"OldAuth": []},
],
)
class TestRootSecurityRequirements(TestCase):
def test_decorator(self):
security_requirements(
"BasicAuth",
[],
)
data = OasBuilder().get_data()
self.assertEqual(
data.get(
"security",
),
[{"BasicAuth": []}],
)
#
security_requirements(
"BearerAuth",
[],
)
data = OasBuilder().get_data()
self.assertEqual(
data.get(
"security",
),
[{"BasicAuth": []}, {"BearerAuth": []}],
)
def tearDown(self) -> None:
Deferred._deferred = []
return super().tearDown()
class TestPathSecurityRequirements(TestCase):
def run_tests(self, builder: OasBuilder):
data = builder.get_data()
self.assertEqual(
data.get("paths", {})
.get("/admin", {})
.get("get", {})
.get("security", {}),
self.admin_auth,
)
self.assertEqual(
data.get("paths", {})
.get("/user", {})
.get("get", {})
.get("security", {}),
self.user_auth,
)
# [] = or
# {} = and
def setUp(self) -> None:
self.admin_auth = [
{
"Oath2": ["admin"],
},
{
"AdminApiKey": [],
},
]
self.user_auth = [
{
"Oath2": ["user"],
"BasicAuth": [],
},
]
return super().setUp()
def test_data(self):
data = {
"paths": {
"/admin": {
"get": {
"security": self.admin_auth,
},
},
"/user": {
"get": {
"security": self.user_auth,
},
},
}
}
builder = OasBuilder(data)
self.run_tests(builder)
def test_decorator(self):
path_security_requirements(
["/admin"],
["get"],
"Oath2",
["admin"],
)
path_security_requirements(
["/admin"],
["get"],
"AdminApiKey",
[],
)
#
path_security_requirements(
["/user"],
["get"],
"Oath2",
["user"],
)
path_security_requirements(
["/user"], ["get"], "BasicAuth", [], AND=True, OR=False
)
builder = OasBuilder()
self.run_tests(builder)
def tearDown(self) -> None:
Deferred._deferred = []
return super().tearDown()
| 2.515625 | 3 |
dace/transformation/dataflow/tiling.py | gronerl/dace | 0 | 12761223 | <reponame>gronerl/dace<gh_stars>0
""" This module contains classes and functions that implement the orthogonal
tiling transformation. """
from dace import registry
from dace.properties import make_properties, Property, ShapeProperty
from dace.graph import nodes, nxutil
from dace.transformation import pattern_matching
@registry.autoregister_params(singlestate=True)
@make_properties
class MapTiling(pattern_matching.Transformation):
""" Implements the orthogonal tiling transformation.
Orthogonal tiling is a type of nested map fission that creates tiles
in every dimension of the matched Map.
"""
_map_entry = nodes.MapEntry(nodes.Map("", [], []))
# Properties
prefix = Property(
dtype=str, default="tile", desc="Prefix for new range symbols")
tile_sizes = ShapeProperty(
dtype=tuple, default=(128, 128, 128), desc="Tile size per dimension")
strides = ShapeProperty(
dtype=tuple,
default=tuple(),
desc="Tile stride (enables overlapping tiles). If empty, matches tile")
divides_evenly = Property(
dtype=bool,
default=False,
desc="Tile size divides dimension length evenly")
@staticmethod
def annotates_memlets():
return False
@staticmethod
def expressions():
return [nxutil.node_path_graph(MapTiling._map_entry)]
@staticmethod
def can_be_applied(graph, candidate, expr_index, sdfg, strict=False):
return True
@staticmethod
def match_to_str(graph, candidate):
map_entry = graph.nodes()[candidate[MapTiling._map_entry]]
return map_entry.map.label + ': ' + str(map_entry.map.params)
def apply(self, sdfg):
graph = sdfg.nodes()[self.state_id]
tile_strides = self.tile_sizes
if self.strides is not None and len(self.strides) == len(tile_strides):
tile_strides = self.strides
# Retrieve map entry and exit nodes.
map_entry = graph.nodes()[self.subgraph[MapTiling._map_entry]]
from dace.transformation.dataflow.map_collapse import MapCollapse
from dace.transformation.dataflow.strip_mining import StripMining
stripmine_subgraph = {
StripMining._map_entry: self.subgraph[MapTiling._map_entry]
}
sdfg_id = sdfg.sdfg_list.index(sdfg)
last_map_entry = None
for dim_idx in range(len(map_entry.map.params)):
if dim_idx >= len(self.tile_sizes):
tile_size = self.tile_sizes[-1]
tile_stride = tile_strides[-1]
else:
tile_size = self.tile_sizes[dim_idx]
tile_stride = tile_strides[dim_idx]
stripmine = StripMining(sdfg_id, self.state_id, stripmine_subgraph,
self.expr_index)
stripmine.dim_idx = dim_idx
stripmine.new_dim_prefix = self.prefix
stripmine.tile_size = str(tile_size)
stripmine.tile_stride = str(tile_stride)
stripmine.divides_evenly = self.divides_evenly
stripmine.apply(sdfg)
if last_map_entry:
new_map_entry = graph.in_edges(map_entry)[0].src
mapcollapse_subgraph = {
MapCollapse._outer_map_entry:
graph.node_id(last_map_entry),
MapCollapse._inner_map_entry: graph.node_id(new_map_entry)
}
mapcollapse = MapCollapse(sdfg_id, self.state_id,
mapcollapse_subgraph, 0)
mapcollapse.apply(sdfg)
last_map_entry = graph.in_edges(map_entry)[0].src
| 2.234375 | 2 |
monitoringHisto/setup.py | juliozinga/FIWARELab-monitoringAPI | 0 | 12761224 | <reponame>juliozinga/FIWARELab-monitoringAPI<filename>monitoringHisto/setup.py<gh_stars>0
from setuptools import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
install_requires=required,
name='monitoringHisto',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1',
description='FIWARE Historical monitoring collector',
long_description='',
# The project's main homepage.
url='https://github.com/SmartInfrastructures/FIWARELab-monitoringAPI',
# Author details
author='<NAME>',
author_email='',
# Choose your license
license='Apache v2.0',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=['monitoringHisto'],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={'console_scripts': [
'monitoringHisto=monitoringHisto.monitoringHisto:main',
],
},
)
| 1.617188 | 2 |
practice/binary_search.py | HiAwesome/python-algorithm | 1 | 12761225 | <gh_stars>1-10
# https://leetcode-cn.com/problems/binary-search/
def binary_search(nums, target):
if not nums:
return -1
if nums[0] > target or nums[-1] < target:
return -1
l, r = 0, len(nums) - 1
while l <= r:
m = l + (r - l) // 2
if nums[m] == target:
return m
elif nums[m] > target:
r = m - 1
else:
l = m + 1
return -1
if __name__ == '__main__':
print(binary_search([1, 2, 3, 4, 5], 5))
| 3.734375 | 4 |
flask_blog/forms.py | ronan7796/flask-blog | 0 | 12761226 | <gh_stars>0
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from flask_blog.models import User
class RegistrationForm(FlaskForm):
username = StringField('Username', [DataRequired(), Length(min=6, max=25)])
email = StringField('Email', [DataRequired(), Email(), Length(min=6, max=25)])
password = PasswordField('Password', [DataRequired(), Length(min=6, max=25)])
confirm_password = PasswordField('Confirm Password', [DataRequired(), Length(min=6, max=25),
EqualTo('password', message='Password is not match')])
submit = SubmitField('Register')
def validate_username(form, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Username already taken. Choose a different one')
def validate_email(form, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('Email already taken. Choose a different one')
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Confirm')
new_password = PasswordField('New Password', [DataRequired(), Length(min=6, max=25)])
confirm_password = PasswordField('<PASSWORD> New Password', [DataRequired(), Length(min=6, max=25),
EqualTo('password', message='Password is not match')])
| 2.9375 | 3 |
lynx/auth.py | aethersoft/lynx | 3 | 12761227 | import typing
from flask_httpauth import HTTPTokenAuth, HTTPBasicAuth, MultiAuth
class Auth:
def __init__(self):
"""Creates access control class for authentication and authorization."""
self._basic_auth = HTTPBasicAuth()
self._token_auth = HTTPTokenAuth()
self._auth = MultiAuth(self._basic_auth, self._token_auth)
self._resources = {}
def error_handler(self, f: typing.Callable) -> typing.NoReturn:
"""Set error handler for Authentication Errors.
:param f: error handler.
:return: NoReturn
"""
self._token_auth.error_handler(f)
self._basic_auth.error_handler(f)
def verify_password(self, f: typing.Callable) -> typing.Any:
""" Verifies basic password.
:param f: function defining verification process.
:return: Any
"""
return self._basic_auth.verify_password(f)
def verify_token(self, f: typing.Callable) -> typing.Any:
""" Verifies token.
:param f: function defining verification process.
:return: Any
"""
return self._token_auth.verify_token(f)
def login_required(self, f: typing.Callable = None, role: typing.Text = None) -> typing.Any:
""" Identifies as login required for provided function {f}.
:param f: input function.
:param role: user role
:return: func
"""
return self._auth.login_required(f, role)
| 3.09375 | 3 |
amadeus/airport/__init__.py | akshitsingla/amadeus-python | 125 | 12761228 | from ._predictions import AirportOnTime
__all__ = ['AirportOnTime']
| 1 | 1 |
branching.py | FriendOfDorothy/twilio-quest | 8 | 12761229 | <filename>branching.py
import sys
def add(x, y):
z = int(x) + int(y)
if z <= 0:
print("You have chosen the path of destitution.")
elif z in range(1,101):
print("You have chosen the path of plenty.")
else:
print("You have chosen the path of excess.")
return z
print(add(sys.argv[1], sys.argv[2]))
| 3.65625 | 4 |
pyglslify.py | anentropic/pyglslify | 0 | 12761230 | import subprocess
import sys
def glslify(shader_path, *transforms):
args = ["glslify", shader_path]
if transforms:
args.append("-t")
args.extend(transforms)
return subprocess.check_output(args, encoding=sys.getdefaultencoding())
if __name__ == "__main__":
shader = glslify(sys.argv[1], *sys.argv[2:])
print(shader)
| 2.53125 | 3 |
python/API_operaciones/files.py | josemonsalve2/PIMC | 1 | 12761231 | <reponame>josemonsalve2/PIMC<gh_stars>1-10
import os
from werkzeug.utils import secure_filename
from API_operaciones.mysql_connection import app, PIMC_ALLOWED_EXTENSIONS
from API_operaciones.mysql_connection import mysql2 as mysql
from API_operaciones.bd_descripcion import pimcBD
from API_operaciones.consulta import consultarElemento
def allowed_file(filename):
''' Esta funcion permite revisar que el archivo tenga
una extension valida. Solo debemos permitir archivos
con cierto tipo de extensiones
'''
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS']
def file_extension_changed(filename, new_filename):
''' Esta funcion permite revisar que al cambiar el
nombre de un archivo no se le cambie la extension
por seguridad y para que no se pierda la info de
los archivos
'''
return filename.rsplit('.', 1)[1].lower() != \
new_filename.rsplit('.', 1)[1].lower()
def cargarArchivos(elementoRelacional, parametrosPOST):
''' Esta funcion permite cargar archivos de un
elemento relacional en especifico
'''
# check if the post request has the file part
if not parametrosPOST.files or 'file' not in parametrosPOST.files:
raise ValueError('No se envió archivo adjunto')
file = parametrosPOST.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
raise ValueError('Nombre archivo incorrecto')
if file and allowed_file(file.filename):
# Revisamos que el elemento exista en la base de datos
idElementoRelacional = pimcBD.obtenerTablaId(elementoRelacional)
elementoBD = consultarElemento(elementoRelacional, parametrosPOST.form.to_dict())
if not elementoBD:
raise ValueError('Elemento no existente')
filename = secure_filename(file.filename)
pathCompleto = os.path.join(app.config['UPLOAD_FOLDER'], elementoRelacional, str(elementoBD[0][idElementoRelacional]), filename)
# Creamos los folderes si es necesario
directory = os.path.dirname(pathCompleto)
if not os.path.exists(directory):
os.makedirs(directory)
# Guardamos el archivo en la base de datos
file.save(pathCompleto)
return {"status": "Success",
"message": "Archivo agregado satisfactoriamente"}
else:
raise ValueError('Nombre archivo incorrecto')
def archivosElementoRelacional(elementoRelacional, parametrosJSON):
''' Esta funcion permite revisar los archivos
que un elemento relacional tiene. Devuelve una
lista con cada uno de los archivos
'''
# Revisamos que el elemento exista en la base de datos
idElementoRelacional = pimcBD.obtenerTablaId(elementoRelacional)
elementoBD = consultarElemento(elementoRelacional, parametrosJSON)
if not elementoBD:
raise ValueError('Elemento no existente')
# Obtenemos el nombre del archivo
idElemento = elementoBD[0][idElementoRelacional]
pathCompleto = os.path.join(app.config['UPLOAD_FOLDER'], elementoRelacional, str(idElemento))
if os.path.exists(pathCompleto):
listaArchivos = [f for f in os.listdir(pathCompleto) if os.path.isfile(os.path.join(pathCompleto, f))]
return listaArchivos
else:
return [];
return
def descargarAchivoElementoRelacional(elementoRelacional, parametrosJSON):
''' Esta funcion permite descargar los archivos
de un elemento relacional en especifico. parametrosJSON
deberia tener un nombre de archivo valido
'''
# Revisamos que el elemento exista en la base de datos
idElementoRelacional = pimcBD.obtenerTablaId(elementoRelacional)
elementoBD = consultarElemento(elementoRelacional, parametrosJSON)
if 'fileName' not in parametrosJSON:
raise ValueError('Parametros Incorrectos' + str(parametrosJSON))
if not elementoBD:
raise ValueError('Elemento no existente')
# Obtenemos el nombre del archivo
fileName = secure_filename(parametrosJSON['fileName'])
idElemento = elementoBD[0][idElementoRelacional]
pathCompleto = os.path.join(app.config['UPLOAD_FOLDER'], elementoRelacional, str(idElemento), fileName)
if os.path.exists(pathCompleto) and os.path.isfile(pathCompleto):
return {'directorio': os.path.dirname(pathCompleto), 'nombreArchivo': os.path.basename(pathCompleto)}
raise ValueError('El archivo no existe')
def eliminarArchivoElementoRelacional(elementoRelacional, parametrosJSON):
''' Esta funcion permite eliminar un archivo de un
elemento relacional en especifico. parametrosJSON
deberia tener un nombre de un archivo valido
'''
# Revisamos que el elemento exista en la base de datos
idElementoRelacional = pimcBD.obtenerTablaId(elementoRelacional)
elementoBD = consultarElemento(elementoRelacional, parametrosJSON)
if 'fileName' not in parametrosJSON:
raise ValueError('Parametros Incorrectos' + str(parametrosJSON))
if not elementoBD:
raise ValueError('Elemento no existente')
# Obtenemos el nombre del archivo
fileName = secure_filename(parametrosJSON['fileName'])
idElemento = elementoBD[0][idElementoRelacional]
pathCompleto = os.path.join(app.config['UPLOAD_FOLDER'],
elementoRelacional, str(idElemento), fileName)
if os.path.exists(pathCompleto) and os.path.isfile(pathCompleto):
try:
os.remove(pathCompleto)
except OSError:
raise ValueError('Elemento incorrecto')
return {"status": "Success",
"message": "Archivo eliminado satisfactoriamente"}
raise ValueError('El archivo no existe')
def renombrarArchivoElementoRelacional(elemento_relacional, parametros_JSON):
''' Esta funcion permite renombrar un archivo de un
elemento relacional en especifico. parametros_JSON
deberia tener un nombre de un archivo valido
'''
# Revisamos que el elemento exista en la base de datos
id_elemento_relacional = pimcBD.obtenerTablaId(elemento_relacional)
elemento_BD = consultarElemento(elemento_relacional, parametros_JSON)
if 'fileName' not in parametros_JSON:
raise ValueError('Parametros Incorrectos' + str(parametros_JSON))
if not elemento_BD:
raise ValueError('Elemento no existente')
# Obtenemos el nombre del archivo
file_name = secure_filename(parametros_JSON['fileName'])
new_file_name = secure_filename(parametros_JSON['newFileName'])
id_elemento = elemento_BD[0][id_elemento_relacional]
path_completo = os.path.join(app.config['UPLOAD_FOLDER'],
elemento_relacional, str(id_elemento), file_name)
new_path_completo = os.path.join(app.config['UPLOAD_FOLDER'],
elemento_relacional, str(id_elemento), new_file_name)
# Revisamos que no se cambie la extension del archivo
if file_extension_changed(file_name, new_file_name):
raise ValueError('No se puede cambiar la extension de un archivo')
# renombramos el archivo
if (os.path.exists(path_completo) and
os.path.isfile(path_completo) and
allowed_file(new_file_name)):
try:
os.rename(path_completo, new_path_completo)
except OSError:
raise ValueError('Elemento incorrecto')
return {"status": "Success",
"message": "Archivo renombrado satisfactoriamente"}
raise ValueError('El archivo no existe')
| 2.46875 | 2 |
homeassistant/components/geocaching/sensor.py | liangleslie/core | 30,023 | 12761232 | """Platform for sensor integration."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from typing import cast
from geocachingapi.models import GeocachingStatus
from homeassistant.components.sensor import SensorEntity, SensorEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
from .coordinator import GeocachingDataUpdateCoordinator
@dataclass
class GeocachingRequiredKeysMixin:
"""Mixin for required keys."""
value_fn: Callable[[GeocachingStatus], str | int | None]
@dataclass
class GeocachingSensorEntityDescription(
SensorEntityDescription, GeocachingRequiredKeysMixin
):
"""Define Sensor entity description class."""
SENSORS: tuple[GeocachingSensorEntityDescription, ...] = (
GeocachingSensorEntityDescription(
key="find_count",
name="Total finds",
icon="mdi:notebook-edit-outline",
native_unit_of_measurement="caches",
value_fn=lambda status: status.user.find_count,
),
GeocachingSensorEntityDescription(
key="hide_count",
name="Total hides",
icon="mdi:eye-off-outline",
native_unit_of_measurement="caches",
entity_registry_visible_default=False,
value_fn=lambda status: status.user.hide_count,
),
GeocachingSensorEntityDescription(
key="favorite_points",
name="Favorite points",
icon="mdi:heart-outline",
native_unit_of_measurement="points",
entity_registry_visible_default=False,
value_fn=lambda status: status.user.favorite_points,
),
GeocachingSensorEntityDescription(
key="souvenir_count",
name="Total souvenirs",
icon="mdi:license",
native_unit_of_measurement="souvenirs",
value_fn=lambda status: status.user.souvenir_count,
),
GeocachingSensorEntityDescription(
key="awarded_favorite_points",
name="Awarded favorite points",
icon="mdi:heart",
native_unit_of_measurement="points",
entity_registry_visible_default=False,
value_fn=lambda status: status.user.awarded_favorite_points,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up a Geocaching sensor entry."""
coordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
GeocachingSensor(coordinator, description) for description in SENSORS
)
class GeocachingSensor(
CoordinatorEntity[GeocachingDataUpdateCoordinator], SensorEntity
):
"""Representation of a Sensor."""
entity_description: GeocachingSensorEntityDescription
def __init__(
self,
coordinator: GeocachingDataUpdateCoordinator,
description: GeocachingSensorEntityDescription,
) -> None:
"""Initialize the Geocaching sensor."""
super().__init__(coordinator)
self.entity_description = description
self._attr_name = (
f"Geocaching {coordinator.data.user.username} {description.name}"
)
self._attr_unique_id = (
f"{coordinator.data.user.reference_code}_{description.key}"
)
self._attr_device_info = DeviceInfo(
name=f"Geocaching {coordinator.data.user.username}",
identifiers={(DOMAIN, cast(str, coordinator.data.user.reference_code))},
entry_type=DeviceEntryType.SERVICE,
manufacturer="Groundspeak, Inc.",
)
@property
def native_value(self) -> str | int | None:
"""Return the state of the sensor."""
return self.entity_description.value_fn(self.coordinator.data)
| 1.960938 | 2 |
xija/get_model_spec.py | jzuhone/xija | 2 | 12761233 | <gh_stars>1-10
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Get Chandra model specifications
"""
import json
import tempfile
import contextlib
import shutil
import platform
import os
import re
import warnings
from pathlib import Path
from typing import List, Optional, Union
import git
import requests
from Ska.File import get_globfiles
__all__ = ['get_xija_model_spec', 'get_xija_model_names', 'get_repo_version',
'get_github_version']
REPO_PATH = Path(os.environ['SKA'], 'data', 'chandra_models')
MODELS_PATH = REPO_PATH / 'chandra_models' / 'xija'
CHANDRA_MODELS_LATEST_URL = 'https://api.github.com/repos/sot/chandra_models/releases/latest'
def _models_path(repo_path=REPO_PATH) -> Path:
return Path(repo_path) / 'chandra_models' / 'xija'
@contextlib.contextmanager
def temp_directory():
"""Get name of a temporary directory that is deleted at the end.
Like tempfile.TemporaryDirectory but without the bug that it fails to
remove read-only files within the temp dir. Git repos can have read-only
files. https://bugs.python.org/issue26660.
"""
tmpdir = tempfile.mkdtemp()
yield tmpdir
shutil.rmtree(tmpdir, ignore_errors=True)
def get_xija_model_spec(model_name, version=None, repo_path=None,
check_version=False, timeout=5) -> tuple:
"""
Get Xija model specification for the specified ``model_name``.
Supported model names include (but are not limited to): ``'aca'``,
``'acisfp'``, ``'dea'``, ``'dpa'``, ``'psmc'``, ``'minusyz'``, and
``'pftank2t'``.
Use ``get_xija_model_names()`` for the full list.
Examples
--------
Get the latest version of the ``acisfp`` model spec from the local Ska data
directory ``$SKA/data/chandra_models``, checking that the version matches
the latest release tag on GitHub.
>>> import xija
>>> from xija.get_model_spec import get_xija_model_spec
>>> model_spec, version = get_xija_model_spec('acisfp', check_version=True)
>>> model = xija.XijaModel('acisfp', model_spec=model_spec,
... start='2020:001', stop='2020:010')
>>> model.make()
>>> model.calc()
Get the ``aca`` model spec from release version 3.30 of chandra_models from
GitHub.
>>> repo_path = 'https://github.com/sot/chandra_models.git'
>>> model_spec, version = get_xija_model_spec('aca', version='3.30',
... repo_path=repo_path)
Parameters
----------
model_name : str
Name of model
version : str
Tag, branch or commit of chandra_models to use (default=latest tag from
repo)
repo_path : str, Path
Path to directory or URL containing chandra_models repository (default
is $SKA/data/chandra_models)
check_version : bool
Check that ``version`` matches the latest release on GitHub
timeout : int, float
Timeout (sec) for querying GitHub for the expected chandra_models version.
Default = 5 sec.
Returns
-------
tuple of dict, str
Xija model specification dict, chandra_models version
"""
if repo_path is None:
repo_path = REPO_PATH
with temp_directory() as repo_path_local:
repo = git.Repo.clone_from(repo_path, repo_path_local)
if version is not None:
repo.git.checkout(version)
model_spec, version = _get_xija_model_spec(model_name, version, repo_path_local,
check_version, timeout)
return model_spec, version
def _get_xija_model_spec(model_name, version=None, repo_path=REPO_PATH,
check_version=False, timeout=5) -> dict:
models_path = _models_path(repo_path)
if not models_path.exists():
raise FileNotFoundError(f'xija models directory {models_path} does not exist')
file_glob = str(models_path / '*' / f'{model_name.lower()}_spec.json')
try:
# get_globfiles() default requires exactly one file match and returns a list
file_name = get_globfiles(file_glob)[0]
except ValueError:
names = get_xija_model_names(repo_path)
raise ValueError(f'no models matched {model_name}. Available models are: '
f'{", ".join(names)}')
model_spec = json.load(open(file_name, 'r'))
# Get version and ensure that repo is clean and tip is at latest tag
if version is None:
version = get_repo_version(repo_path)
if check_version:
gh_version = get_github_version(timeout=timeout)
if gh_version is None:
warnings.warn('Could not verify GitHub chandra_models release tag '
f'due to timeout ({timeout} sec)')
elif version != gh_version:
raise ValueError(f'version mismatch: local repo {version} vs '
f'github {gh_version}')
return model_spec, version
def get_xija_model_names(repo_path=REPO_PATH) -> List[str]:
"""Return list of available xija model names.
Examples
--------
>>> from xija.get_model_spec import get_xija_model_names
>>> names = get_xija_model_names()
['aca',
'acisfp',
'dea',
'dpa',
'4rt700t',
'minusyz',
'pm1thv2t',
'pm2thv1t',
'pm2thv2t',
'pftank2t',
'pline03t_model',
'pline04t_model',
'psmc',
'tcylaft6']
Parameters
----------
repo_path : str, Path
Path to directory containing chandra_models repository (default is
$SKA/data/chandra_models)
Returns
-------
list
List of available xija model names
"""
models_path = _models_path(repo_path)
fns = get_globfiles(str(models_path / '*' / '*_spec.json'), minfiles=0, maxfiles=None)
names = [re.sub(r'_spec\.json', '', Path(fn).name) for fn in sorted(fns)]
return names
def get_repo_version(repo_path: Path = REPO_PATH) -> str:
"""Return version (most recent tag) of models repository.
Returns
-------
str
Version (most recent tag) of models repository
"""
with temp_directory() as repo_path_local:
if platform.system() == 'Windows':
repo = git.Repo.clone_from(repo_path, repo_path_local)
else:
repo = git.Repo(repo_path)
if repo.is_dirty():
raise ValueError('repo is dirty')
tags = sorted(repo.tags, key=lambda tag: tag.commit.committed_datetime)
tag_repo = tags[-1]
if tag_repo.commit != repo.head.commit:
raise ValueError(f'repo tip is not at tag {tag_repo}')
return tag_repo.name
def get_github_version(url: str = CHANDRA_MODELS_LATEST_URL,
timeout: Union[int, float] = 5) -> Optional[bool]:
"""Get latest chandra_models GitHub repo release tag (version).
This queries GitHub for the latest release of chandra_models.
Parameters
----------
url : str
URL for latest chandra_models release on GitHub API
timeout : int, float
Request timeout (sec, default=5)
Returns
-------
str, None
Tag name (str) of latest chandra_models release on GitHub.
None if the request timed out, indicating indeterminate answer.
"""
try:
req = requests.get(url, timeout=timeout)
except (requests.ConnectTimeout, requests.ReadTimeout):
return None
if req.status_code != requests.codes.ok:
req.raise_for_status()
page_json = req.json()
return page_json['tag_name']
| 1.898438 | 2 |
gitlit/utils.py | aniruddhadave/gitlit | 0 | 12761234 | """
Utilities
@author: <NAME>
"""
import zlib
import sys
from sphinx.util.pycompat import sys_encoding
from bkcharts.stats import stats
def read_file(path):
"""Reads contents of file as bytes."""
with open(path, 'rb') as f:
return f.read()
def write_file(path, data):
"""Writes data bytes to file at given path."""
with open(path, 'wb') as f:
f.write(data)
def find_object(sha1_prefix):
"""
Finds object with the given SHA-1 prefix.
Returns the path to object in object store
Raises a ValueError if there are no objects or multiple objects
"""
if (len(sha1_prefix) < 2):
raise ValueError("Hash prefix must have 2 or more characters")
obj_dir = os.path.join('.git', 'objects', sha1_prefix[:2])
rmng = sha1_prefix[2:]
objects = [name for name in os.listdir(obj_dir) if name.startswith(rmng)]
if not objects:
raise ValueError("Object {} not found".format(sha1_prefix))
if len(objects) >= 2:
raise ValueError("Multiple objects ({}) with prefix {}".format(
len(objects), sha1_prefix))
return os.path.join(obj_dir, objects[0])
def read_object(sha1_prefix):
"""
Reads object with given SHA-1 prefix
Return a tuple(object_type , data_bytes)
Raises ValueError of object not found
"""
from builtins import int
path = find_object(sha1_prefix)
data_full = zlib.decompress(read_file(path))
idx = data_full.index(b'\x00')
header = data_full[:idx]
object_type, size_data = header.decode().split()
size = int(size_data)
data = data_full[idx + 1:]
size_recvd = len(data)
assert size == len(data), 'expected size {} but received {} bytes'.format(
size, size_recvd)
return (object_type, data)
def cat_file(mode, sha1_prefix):
"""
Writes the contents or the info about the object with given SHA-1 prefix to stdout.
Prints raw data bytes if mode is 'commit', 'tree' or 'blob'
Prints size of the object if mode is 'size'
Prints type of the object if mode is 'type'
Prints pretty version of the object if mode is 'pretty'
"""
object_type , data = read_object(sha1_prefix)
if mode in ['commit', 'tree', 'blob']:
if object_type != mode:
raise ValueError('Expected object type {} but received {}'.
format(mode, object_type))
sys.stdout.write(data)
elif mode == 'type':
print (object_type)
elif mode == 'size':
print(len(data))
elif mode == 'pretty':
if object_type in ['commit', 'blob']:
sys_encoding.stdout.write(data)
elif object_type == 'tree':
for mode, path, sha1 in read_tree(data=data):
type_string = 'tree' if stat.S_ISDIR(mode) else 'blob'
else:
assert False, 'Unhandled object type: {}'.format(object_type)
else:
raise ValueError('Unexpected mode type: {}'.format(mode))
| 2.4375 | 2 |
eval/pred_hmd_s.py | Taye310/hmd | 0 | 12761235 | from __future__ import print_function
import torch, PIL.Image, cv2, pickle, sys, argparse
import numpy as np
import openmesh as om
from tqdm import trange
sys.path.append("../src/")
from network import shading_net
import renderer as rd
from utility import subdiv_mesh_x4
from utility import CamPara
from utility import make_trimesh
from utility import flatten_naval
from utility import smpl_detoe
from matplotlib import pyplot as plt
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--num', type = int, required = True,
help = 'data_num')
parser.add_argument('--set', type = str, required = True,
help = 'recon or syn')
opt = parser.parse_args()
assert opt.set in ["recon", "syn"], \
"set must be one of [recon, syn]"
# prepare
data_num = int(opt.num)
model_file = "../demo/pretrained_model/pretrained_shading.pth"
device = torch.device("cuda:0")
net_shading = shading_net().to(device).eval()
net_shading.load_state_dict(torch.load(model_file, map_location='cuda:0'))
renderer = rd.SMPLRenderer(face_path =
"../predef/smpl_faces.npy")
cam_para = CamPara(K = np.array([[1000, 0, 224],
[0, 1000, 224],
[0, 0, 1]]))
with open ('../predef/exempt_vert_list.pkl', 'rb') as fp:
exempt_vert_list = pickle.load(fp)
tr = trange(data_num, desc='Bar desc', leave=True)
for test_num in tr:
# read mesh
mesh = om.read_trimesh("./eval_data/%s_set/pred_save/a_%03d.obj" % \
(opt.set, test_num))
proj_sil = renderer.silhouette(verts = mesh.points())
proj_sil_l = cv2.resize(proj_sil, dsize=(448, 448))
proj_sil_l[proj_sil_l<0.5] = 0
proj_sil_l[proj_sil_l>=0.5] = 1
# load data
src_img = np.array(PIL.Image.open("./eval_data/%s_set/input_img/%03d_img.png"%\
(opt.set, test_num)))
src_img_l = cv2.resize(src_img, dsize=(448, 448))
input_arr = np.rollaxis(src_img_l, 2, 0)
input_arr = np.expand_dims(input_arr, 0)
input_arr = torch.tensor(input_arr).float().to(device)
input_arr = input_arr/255.0
proj_sil_l = np.expand_dims(proj_sil_l, 0)
proj_sil_l = np.expand_dims(proj_sil_l, 0)
proj_sil_l = torch.tensor(proj_sil_l)
proj_sil_l = proj_sil_l.float().to(device)
# predict
pred = net_shading(input_arr, proj_sil_l)
pred_depth = np.array(pred.data.cpu()[0][0])
# pred_depth = np.load('/home/zhangtianyi/github/hmd/eval/eval_data/syn_set/pred_depth/' + '%03d_img.npy'%\
# (test_num))
# pred_depth = pred_depth*5.0
#show_img_arr(src_img)
mesh = flatten_naval(mesh)
# remove toes
mesh = smpl_detoe(mesh)
# subdivide the mesh to x4
subdiv_mesh = subdiv_mesh_x4(mesh)
# genrate boundary buffering mask
sil_img = rd.render_sil(subdiv_mesh)
bound_img = rd.render_bound(subdiv_mesh)
radius = 10
circ_template = np.zeros((radius*2+1, radius*2+1))
for i in range(radius):
cv2.circle(img = circ_template,
center = (radius, radius),
radius = i+2,
color = (radius-i)*0.1,
thickness = 2)
img_size = bound_img.shape
draw_img = np.zeros(img_size, dtype=np.float)
draw_img = np.pad(draw_img, radius, 'edge')
for y in range(img_size[0]):
for x in range(img_size[1]):
if bound_img[y, x] == 0:
continue
win = draw_img[y:y+2*radius+1, x:x+2*radius+1]
win[circ_template>win] = circ_template[circ_template>win]
draw_img[y:y+2*radius+1, x:x+2*radius+1] = win
final_mask = sil_img - draw_img[10:10+img_size[0], 10:10+img_size[1]]
final_mask[sil_img==0] = 0
# apply bias
d_max = np.max(pred_depth[pred_depth!=0])
d_min = np.min(pred_depth[pred_depth!=0])
bias = -(d_max - d_min)/2.
pred_depth = pred_depth + bias
# apply bright scale
weight_map = np.dot(src_img_l[...,:3], [0.299, 0.587, 0.114])
pred_depth = pred_depth * weight_map / 255.
pred_depth = pred_depth * 0.001
pred_depth = pred_depth * final_mask
# plt.imshow(pred_depth)
# plt.show()
# project mesh to depth and merge with depth difference
proj_depth, visi_map = rd.render_depth(subdiv_mesh, require_visi = True)
# get all visible vertex index
verts = subdiv_mesh.points()
faces = subdiv_mesh.face_vertex_indices()
visi_vert_inds = []
for y in range(visi_map.shape[0]):
for x in range(visi_map.shape[1]):
f_ind = visi_map[y, x]
if f_ind >= len(faces):
continue
else:
fv = faces[f_ind]
visi_vert_inds.append(fv[0])
visi_vert_inds.append(fv[1])
visi_vert_inds.append(fv[2])
visi_vert_inds = set(visi_vert_inds)
# filter out exempt version
visi_vert_inds = list(set(visi_vert_inds).difference(exempt_vert_list))
visi_vert_inds_m = []
for i in visi_vert_inds:
xy = cam_para.project(verts[i])
x = int(round(xy[1]))
y = int(round(xy[0]))
if x<0 or y<0 or x>=448 or y>=448:
continue
if np.absolute(proj_depth[x, y] - verts[i,2])<0.01:
visi_vert_inds_m.append(i)
for i in visi_vert_inds_m:
xy = cam_para.project(verts[i])
x = int(round(xy[1]))
y = int(round(xy[0]))
depth = proj_depth[x, y] + pred_depth[x, y]
#print(depth, verts[i])
if depth>8.:
continue
verts[i][2] = depth
deformed_mesh = make_trimesh(verts, faces)
om.write_mesh("./eval_data/%s_set/pred_save/s_%03d.obj" % \
(opt.set, test_num), deformed_mesh)
| 2.046875 | 2 |
src/tiden_gridgain/apps/__init__.py | mshonichev/tiden_gridgain | 0 | 12761236 | from .hazelcast import Hazelcast, HzException
from .mysql import Mysql
from .webconsole import Webconsole
from .gridgain import Gridgain
__all__ = [
"Hazelcast",
"HzException",
"Mysql",
"Webconsole",
"Gridgain",
]
| 1.109375 | 1 |
rapidsms/backends/kannel/migrations/0002_auto_20150801_2142.py | catalpainternational/rapidsms | 330 | 12761237 | <reponame>catalpainternational/rapidsms
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('kannel', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='deliveryreport',
name='message_id',
field=models.CharField(max_length=255, verbose_name='Message ID'),
preserve_default=True,
),
migrations.AlterField(
model_name='deliveryreport',
name='sms_id',
field=models.CharField(max_length=36, verbose_name='SMS ID'),
preserve_default=True,
),
migrations.AlterField(
model_name='deliveryreport',
name='smsc',
field=models.CharField(max_length=255, verbose_name='SMSC'),
preserve_default=True,
),
migrations.AlterField(
model_name='deliveryreport',
name='status',
field=models.SmallIntegerField(choices=[(1, 'Delivery Success'), (2, 'Delivery Failure'), (4, 'Message Buffered'), (8, 'SMSC Submit'), (16, 'SMSC Reject')]),
preserve_default=True,
),
]
| 1.664063 | 2 |
cogs/skill.py | shamimi367/charmy.py | 0 | 12761238 | from discord.ext import commands
import discord
import functions as fnc
from cogs.help import get_help_skill
class Skill(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
# Cogが読み込まれた時に発動
async def on_ready(self):
print("Load Skill module...")
@commands.command()
async def skill(self, ctx, *args):
if len(args) == 0 or args[0] == "":
await ctx.send("調べたいレアスキルの名前を入力してね!")
return
search = args[0]
if args[0] == "-help":
embed = get_help_skill()
await ctx.send(embed=embed)
return
sql = "SELECT name, skill, effect FROM characters\
LEFT OUTER JOIN skills\
ON characters.skill_cd = skills.id\
WHERE skill_cd = (SELECT id FROM skills WHERE skill LIKE %s);"
result = fnc.select_sql_with_param_fetch(sql, ("%" + str(search) + "%",))
if result is None:
await ctx.send("そのレアスキルはまだ未実装か、名前が間違ってるよ!")
return
results = fnc.select_sql_with_param_fetchall(sql, ("%" + str(search) + "%",))
members = ""
for i in range(len(results)):
members += results[i][0] + " / "
embed = discord.Embed(
title=results[0][1], description=results[0][2], color=0x7289DA
)
embed.set_footer(text="スキル保有者: " + members)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Skill(bot))
| 2.78125 | 3 |
main.py | kirillkuzin/proxydealer | 0 | 12761239 | <reponame>kirillkuzin/proxydealer
import asyncio
from fastapi import FastAPI
from proxybroker import Broker
app = FastAPI()
proxies = asyncio.Queue()
broker = Broker(proxies)
@app.post('/')
async def handler_find_new_proxies():
task = asyncio.create_task(broker.find(types=['HTTPS'],
limit=1000))
await task
@app.get('/')
async def handler_get_proxy():
proxy = proxies.get_nowait()
proxy_data = {'host': proxy.host,
'port': proxy.port,
'types': proxy.types}
return proxy_data
@app.get('/check')
async def handler_check_proxy_queue():
return proxies.qsize()
| 2.265625 | 2 |
eds/openmtc-gevent/futile/src/futile/caching/__init__.py | piyush82/elastest-device-emulator-service | 0 | 12761240 | '''
Created on 17.07.2011
@author: kca
'''
from ..collections import OrderedDict
import futile
class LRUCache(OrderedDict):
max_items = 100
def __init__(self, max_items = None, threadsafe = None, *args, **kw):
super(LRUCache, self).__init__(*args, **kw)
if max_items is not None:
if max_items <= 0:
raise ValueError(max_items)
self.max_items = max_items
if threadsafe is None:
threadsafe = futile.THREADSAFE
if threadsafe:
from threading import RLock
self.__lock = RLock()
else:
self.__lock = None
self.__getitem__ = self._getitem
self.__setitem__ = self._setitem
def __getitem__(self, k):
if self.__lock is None:
return self._getitem(k)
with self.__lock:
return self._getitem(k)
def get(self, k, default = None):
try:
return self[k]
except KeyError:
return default
def _getitem(self, k):
v = super(LRUCache, self).__getitem__(k)
del self[k]
super(LRUCache, self).__setitem__(k, v)
return v
def __iter__(self):
for k in tuple(super(LRUCache, self).__iter__()):
yield k
def __setitem__(self, k, v):
if self.__lock is None:
return self._setitem(k, v)
with self.__lock:
self._setitem(k, v)
def _setitem(self, k, v):
super(LRUCache, self).__setitem__(k, v)
if len(self) > self.max_items:
self.popitem(False)
| 3 | 3 |
app/news/views.py | mzs9540/covid19 | 1 | 12761241 | <gh_stars>1-10
from rest_framework import viewsets, generics
from rest_framework.authentication import TokenAuthentication
from core import models
from core.permissions.permission import PermissionsForStaff
from news import serializers
class WhoNewsViewSet(viewsets.ModelViewSet):
"""Manage News in the database"""
serializer_class = serializers.WhoNewsSerializer
queryset = models.CovidNews.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (PermissionsForStaff,)
class UpdatesListView(generics.ListAPIView):
"""Manage Update of India in the database"""
permission_classes = (PermissionsForStaff,)
authentication_classes = (TokenAuthentication,)
def get_queryset(self):
if self.kwargs['news'] == 'india':
self.kwargs['model'] = models.IndiaCovid19Update
return models.IndiaCovid19Update.objects.all()
def get_serializer_class(self):
serializers.UpdatesSerializer.Meta.model = self.kwargs['model']
return serializers.UpdatesSerializer
| 2.171875 | 2 |
imm/test/test_alphabet.py | EBI-Metagenomics/imm-py | 0 | 12761242 | <gh_stars>0
import pytest
from imm import Alphabet
def test_alphabet():
abc = Alphabet.create(b"ACGT", b"X")
assert abc.length == 4
assert abc.has_symbol(b"A")
assert abc.has_symbol(b"C")
assert abc.has_symbol(b"G")
assert abc.has_symbol(b"T")
assert abc.symbol_idx(b"A") == 0
assert abc.symbol_idx(b"C") == 1
assert abc.symbol_idx(b"G") == 2
assert abc.symbol_idx(b"T") == 3
assert abc.symbol_id(0) == b"A"
assert abc.symbol_id(1) == b"C"
assert abc.symbol_id(2) == b"G"
assert abc.symbol_id(3) == b"T"
assert abc.symbols == b"ACGT"
assert str(abc) == "{ACGT}"
assert repr(abc) == "<Alphabet:{ACGT}>"
with pytest.raises(TypeError):
Alphabet.create("ACGTç", b"X")
with pytest.raises(RuntimeError):
Alphabet.create("ACGTç".encode(), b"X")
| 2.59375 | 3 |
boj/11966.py | pparkddo/ps | 1 | 12761243 | import math
value = math.log2(int(input()))
print(1 if int(value) == value else 0)
| 3.140625 | 3 |
utilities.py | SalmanBurhan/PyJackett | 0 | 12761244 | def human_readable_size(size, decimal_places=2):
for unit in ['B','KB','MB','GB','TB']:
if size < 1024.0:
break
size /= 1024.0
return f"{size:.{decimal_places}f} {unit}"
| 2.875 | 3 |
00_Tools/SineModel-1.0/SineModel.py | EdwardLin2014/CNN-with-IBM-for-Singing-Voice-Separation | 20 | 12761245 | <filename>00_Tools/SineModel-1.0/SineModel.py
import os, sys
import numpy as np
import math
from scipy.fftpack import fft, ifft
Tool_UtilFunc_DirStr = '../UtilFunc-1.0/'
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), Tool_UtilFunc_DirStr))
tol = 1e-14 # threshold used to compute phase
def dftAnal(x, N, window):
'''
%%
% Analysis of a signal using the discrete Fourier transform
% x: input signal,
% N: DFT size (no constraint on power 2!),
% window: analysis window (in term of vector)
% returns X, mX, pX: complex, magnitude and phase spectrum
'''
# window the input sound
xw = x * window
# zero-phase window in fftbuffer
M = window.size # window Size
hM1 = int(math.floor((M+1)/2)) # half analysis window size by floor
hM2 = int(math.floor(M/2)) # half analysis window size by floor
fftbuffer = np.zeros(N) # initialize buffer for FFT
if hM2%2 == 1:
fftbuffer[:hM2] = xw[hM1:]
fftbuffer[-hM1:] = xw[:hM1]
else: # hM1 == hM2
fftbuffer[:hM1] = xw[hM2:]
fftbuffer[-hM2:] = xw[:hM2]
# Compute FFT
hN = int(math.floor(N/2))+1 # size of positive spectrum, it includes sample 0
X = fft(fftbuffer)
X = X[:hN]
# for phase calculation set to 0 the small values
X.real[np.abs(X.real) < tol] = 0.0
X.imag[np.abs(X.imag) < tol] = 0.0
# unwrapped phase spectrum of positive frequencies
pX = np.unwrap(np.angle(X))
# compute absolute value of positive side
mX = abs(X)
return X, mX, pX
def stft(x, Parm):
'''
%% Analysis of a sound using the short-time Fourier transform
%% Input:
% x: audio signal as a column vector!
% Parm: STFT configuration,
% Parm.window: analysis window (in term of vector)
% Parm.M: window size,
% Parm.N: DFT size (no constraint on power 2!),
% Parm.H: hop size,
%% Ouput:
% X: complex spectrogram,
% mX: magnitude spectrogram,
% pX: phase spectrogram,
% remain: audio signal between the center of last frame and the end of
% audio signal; for synthesis
% numFrames: number of frames,
% numBins: number of bins
'''
window = Parm.window
M = Parm.M
N = Parm.N
H = Parm.H
# prepare x
hM1 = int(math.floor((M+1)/2)) # half analysis window size by floor
hM2 = int(math.floor(M/2)) # half analysis window size by floor
x = np.append(np.zeros(hM2),x) # add zeros at beginning to center first window at sample 0
x = np.append(x,np.zeros(hM1)) # add zeros at the end to analyze last sample
# prepare window
window = window / sum(window) # normalize analysis window
# prepare stft looping
pin = list(np.arange(hM2, x.size-hM1, H))
remain = x.size - hM1 - pin[-1] - 1
# prepare output
numFrames = len(pin);
hN = int(math.floor(N/2))+1 # size of positive spectrum, it includes sample 0
numBins = hN;
X = np.zeros((hN, numFrames),dtype=complex)
mX = np.zeros((hN, numFrames))
pX = np.zeros((hN, numFrames))
# Note index diff for odd/even size of analysis window
t = 0;
if hM2%2 == 1:
for i in pin:
X[:,t], mX[:,t], pX[:,t] = dftAnal(x[(i-hM1+1):i+hM2+1], N, window)
t = t + 1
else:
for i in pin:
X[:,t], mX[:,t], pX[:,t] = dftAnal(x[(i-hM1):i+hM2], N, window)
t = t + 1
return X, mX, pX, remain, numFrames, numBins
def dftSynth(mX, pX, M, N):
'''
%%
% Synthesis of a signal using the discrete Fourier transform
% mX: magnitude spectrum,
% pX: phase spectrum,
% M: window size,
% N: DFT size (no constraint on power 2!)
% returns y: output signal
'''
hN = mX.size # size of positive spectrum, it includes sample 0
hM1 = int(math.floor((M+1)/2)) # half analysis window size by rounding
hM2 = int(math.floor(M/2)) # half analysis window size by floor
y = np.zeros(M) # initialize output array
Y = np.zeros(N,dtype=complex) # clean output spectrum
Y[:hN] = mX * np.exp(1j*pX)
if hN%2 == 1:
Y[hN:] = mX[-1:1:-1] * np.exp(-1j*pX[-1:1:-1])
else:
Y[hN:] = mX[:0:-1] * np.exp(-1j*pX[:0:-1])
fftbuffer = np.real(ifft(Y)) # compute inverse FFT
if hM2%2 == 1:
y[:hM1] = fftbuffer[-hM1:] # undo zero-phase window
y[hM1:] = fftbuffer[:hM2]
else: # hM1 == hM2
y[:hM2] = fftbuffer[-hM2:] # undo zero-phase window
y[hM2:] = fftbuffer[:hM1]
return y
def istft(mY, pY, Parm):
'''
%% Synthesis of a sound using the short-time Fourier transform
%% Input:
% mY: magnitude spectrogram,
% pY: phase spectrogram,
% Parm: STFT configuration,
% Parm.M: window size,
% Parm.N: DFT size (no constraint on power 2!),
% Parm.H: hop size,
% remain: audio signal between the center of last frame and the end of
% audio signal; for synthesis
%% Ouput:
% y: output sound
'''
remain = Parm.remain
M = Parm.M
N = Parm.N
H = Parm.H
# prepare istft looping
hM1 = int(math.floor((M+1)/2)) # half analysis window size by rounding
hM2 = int(math.floor(M/2)) # half analysis window size by floor
numFrames = mY[0,:].size # number of frames
y = np.zeros(hM2 + 1 + (numFrames-1)*H + remain + hM1) # initialize output array
## run istft
# Note index diff for odd/even size of analysis window
pin = list(np.arange(hM2, y.size-hM1, H))
t = 0
if hM2%2 == 1:
for i in pin:
ytmp = dftSynth(mY[:,t], pY[:,t], M, N)
y[(i-hM1+1):i+hM2+1] = y[(i-hM1+1):i+hM2+1] + H*ytmp # overlap-add to generate output sound
t = t + 1
else:
for i in pin:
ytmp = dftSynth(mY[:,t], pY[:,t], M, N)
y[(i-hM1):i+hM2] = y[(i-hM1):i+hM2] + H*ytmp # overlap-add to generate output sound
t = t + 1
# delete half of first window and half of the last window which was added in stft
y = y[hM2:-hM1+1]
return y
| 2.40625 | 2 |
scripts/benchmarking_ovito.py | hpleva/ai4materials | 23 | 12761246 | import ovito
print("Hello, this is OVITO %i.%i.%i" % ovito.version)
# Import OVITO modules.
from ovito.io import *
from ovito.modifiers import *
from ovito.data import *
from collections import Counter
# Import standard Python and NumPy modules.
import sys
import numpy
import os
from ovito.pipeline import StaticSource, Pipeline
from ovito.io.ase import ase_to_ovito
from ase.atoms import Atoms
from ase.db import connect
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import numpy as np
import matplotlib.pyplot as plt
import itertools
##################
# run with
# conda activate ovito
# ~/apps/ovito-3.0.0-dev284-x86_64/bin/ovitos benchmarking_ovito.py
####################
def read_ase_db(db_path):
"""From the path to an ASE database file, return a list of ASE atom object contained in it.
.. codeauthor:: <NAME> <<EMAIL>>
"""
db = connect(db_path)
ase_list = []
for idx_db in range(len(db)):
atoms = db.get_atoms(selection=idx_db + 1, add_additional_information=True)
# put info from atoms.info['data'] back at their original place (atoms.info)
# this is because when an ASE atoms object is saved into the SQLite database,
# ASE does not save automatically atoms.info but instead to
# atoms.info are saved in atoms.info['data']
if 'data' in atoms.info.keys():
atoms.info = atoms.info['data']
ase_list.append(atoms)
return ase_list
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.show()
#filepath = '/home/ziletti/Documents/calc_nomadml/rot_inv_3d/structures_for_paper/four_grains/four_grains_poly.xyz'
#node = import_file(filepath, columns=["Particle Type", "Position.X", "Position.Y", "Position.Z"])
ase_db_dataset_dir = '/home/ziletti/Documents/calc_nomadml/rot_inv_3d/db_ase'
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_pristine' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-0.1%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-0.2%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-0.6%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-1%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-2%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-4%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-5%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-8%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-10%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-12%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-20%' + '.db')
ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-30%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-50%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-1%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-2%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-5%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-10%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-20%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-50%' + '.db')
ase_atoms_list = read_ase_db(db_path=ase_db)
y_pred = []
y_true = []
atom_classes_list = []
for idx, atoms in enumerate(ase_atoms_list):
if idx % 1000 == 0:
print(idx)
# if str(atoms.info['target']) == '227':
if str(atoms.info['target']) == '227' or str(atoms.info['target']) == '221':
pass
# if False:
# pass
else:
# atoms = atoms*(2, 2, 2)
data = ase_to_ovito(atoms)
node = Pipeline(source=StaticSource(data=data))
# node.modifiers.append(CommonNeighborAnalysisModifier(mode=CommonNeighborAnalysisModifier.Mode.FixedCutoff))
# node.modifiers.append(CommonNeighborAnalysisModifier(mode=CommonNeighborAnalysisModifier.Mode.AdaptiveCutoff))
node.modifiers.append(AcklandJonesModifier())
# node.modifiers.append(BondAngleAnalysisModifier())
# node.modifiers.append(PolyhedralTemplateMatchingModifier(rmsd_cutoff=0.0))
# Let OVITO's data pipeline do the heavy work.
node.compute()
# A two-dimensional array containing the three CNA indices
# computed for each bond in the system.
atom_classes = list(node.output.particle_properties['Structure Type'].array)
#AcklandJonesModifier.Type.OTHER(0)
#AcklandJonesModifier.Type.FCC(1)
#AcklandJonesModifier.Type.HCP(2)
#AcklandJonesModifier.Type.BCC(3)
#AcklandJonesModifier.Type.ICO(4)
# CommonNeighborAnalysisModifier.Type.OTHER(0)
# CommonNeighborAnalysisModifier.Type.FCC(1)
# CommonNeighborAnalysisModifier.Type.HCP(2)
# CommonNeighborAnalysisModifier.Type.BCC(3)
# CommonNeighborAnalysisModifier.Type.ICO(4)
#
classes = dict(ack_jones=['None', '225', '194', '229', 'Ic'], cna=['None', '225', '194', '229', 'Ic'],
ptm=['None', '225', '194', '229', 'Ic', '221', '227', '227'],
baa=['None', '225', '194', '229', 'Ic'])
# ovito 3.0.0
# Type.OTHER(0)
# PolyhedralTemplateMatchingModifier.Type.FCC(1)
# PolyhedralTemplateMatchingModifier.Type.HCP(2)
# PolyhedralTemplateMatchingModifier.Type.BCC(3)
# PolyhedralTemplateMatchingModifier.Type.ICO(4)
# PolyhedralTemplateMatchingModifier.Type.SC(5)
# PolyhedralTemplateMatchingModifier.Type.CUBIC_DIAMOND(6)
# PolyhedralTemplateMatchingModifier.Type.HEX_DIAMOND(7)
y_pred_i = [classes['cna'][item] for item in atom_classes]
#y_pred_acna = [acna_classes[item] for item in y_pred]
# y_pred_baa = [baa_classes[item] for item in y_pred]
#print(y_pred_this)
#atoms = atoms * (2, 2, 2)
atom_class_true = [str(atoms.info['target'])] * len(atoms)
y_true.extend(atom_class_true)
y_pred.extend(y_pred_i)
atom_classes_list.extend(atom_classes)
print(len(y_true))
print('y_true', Counter(y_true))
print('y_pred', Counter(y_pred))
#print(Counter(y_true), Counter(y_pred))
print('Accuracy: {}'.format(accuracy_score(y_true, y_pred)))
cnf_matrix = confusion_matrix(y_true, y_pred)
np.set_printoptions(precision=4)
print(cnf_matrix)
# y_pred Counter({'194': 583828, '229': 116999, '225': 115152, 'None': 968})
ack_jones_classes = ['194', '229', '225', 'None']
# plot_confusion_matrix(cnf_matrix, classes=ack_jones_classes,
# normalize=False, title='Confusion matrix, without normalization')
# Loop over particles and print their CNA indices.
#for idx_particle, particle_index in enumerate(range(node.output.number_of_particles)):
#pass
# Print particle index (1-based).
#sys.stdout.write("%i " % (particle_index + 1))
#outname = 'BondAngleAnalysis.counts.'
#print(node.output.particle_properties['Structure Type'].array[idx_particle])
# print(y_pred[idx_particle])
# Create local list with CNA indices of the bonds of the current particle.
#bond_index_list = list(bond_enumerator.bonds_of_particle(particle_index))
#local_cna_indices = cna_indices[bond_index_list]
# Count how often each type of CNA triplet occurred.
#unique_triplets, triplet_counts = row_histogram(local_cna_indices)
# Print list of triplets with their respective counts.
#for triplet, count in zip(unique_triplets, triplet_counts):
# sys.stdout.write("%s:%i " % (triplet, count))
# End of particle line
#sys.stdout.write("\n")
| 2.625 | 3 |
LCD/boot.py | UncleEngineer/MicroPython | 0 | 12761247 | from esp8266_i2c_lcd import I2cLcd
from machine import I2C
from machine import Pin
# i2c = I2C(scl=Pin(22),sda=Pin(21),freq=100000)
# lcd = I2cLcd(i2c, 0x27, 2, 16)
# lcd.clear()
# lcd.putstr('Uncle Engineer\nMicroPython')
i2c = I2C(scl=Pin(22),sda=Pin(21),freq=100000)
lcd = I2cLcd(i2c, 0x27, 2, 16)
import utime as time
text = (' '*16)+ 'Uncle Engineer MicroPython'
count = 0
counttext = len(text)
while True:
lcd.clear()
print(text[count:16+count])
lcd.putstr(text[count:16+count])
time.sleep(0.5)
count += 1
if count > counttext:
count = 0
| 2.625 | 3 |
scripts/getYahooArticles.py | ethanabowen/StockProject | 1 | 12761248 | <gh_stars>1-10
import sys
import pymongo
import urllib.request
import json
import time
from datetime import datetime
from bs4 import BeautifulSoup # To get everything
from pymongo import MongoClient
def main(arg1):
#connect to MongoDB
client = MongoClient()
db = client.Stocks
articles = db.Articles
#Query site for HTML
ticker = arg1
sourceSite = 'Yahoo'
url = 'http://finance.yahoo.com/q/h?s=' + ticker
#print(url)
response = urllib.request.urlopen(url)
#Parse and store Article information from Yahoo
htmlArticles = []
soup = BeautifulSoup(response)
ul = soup.find('div', { 'class' : 'mod yfi_quote_headline withsky'})
for q in ul.find_all('ul'): #All articles for a day
#print("q\t",q)
w = q.find('li') #First li
while(w is not None): #Iterate through each article
#print("w\t",w)
title = w.a.string
link = w.a['href']
cite = w.cite.text.replace(w.cite.span.text, '')
date = w.cite.span.text
article = [title, link, cite, date]
htmlArticles.append(article)
w = w.next_sibling #Next Article
#Setup for storage in Mongodb
for article in htmlArticles:
jsonArticle = {}
jsonArticle['ticker'] = ticker
jsonArticle['title'] = article[0]
jsonArticle['url'] = article[1]
jsonArticle['cite'] = article[2]
jsonArticle['date'] = article[3]
jsonArticle['source'] = sourceSite
jsonArticle['weight'] = "0"
jsonArticle['createTimeInMillis'] = datetime.now().microsecond
articles.update({ 'ticker': ticker, 'url' : jsonArticle['url'] }, jsonArticle , upsert=True) #Insert into Mongo
#Document, in the form of a Python dictionary....it's just JSON...
#insertValue = { "url": "http://www.wsj.com/articles/u-s-chinese-universities-to-launch-technology-design-program-1434657616?ru=yahoo?mod=yahoo_itp",
#"title": "U.S., Chinese Universities to Launch Technology, Design Program",
#"date": "June 18, 2015 4:00 p.m. ET",
#"cite": "",
#"weight": "1"
#"id": "0"
#ticker: ""
#"last_updated": datetime.datetime.utcnow()}
#print(articles.find_one({'ticker': ticker}))
| 3 | 3 |
ms_deisotope/test/test_memory_source.py | mstim/ms_deisotope | 1 | 12761249 | import unittest
from ms_deisotope.data_source import memory
from ms_deisotope.test.common import datafile
from ms_deisotope.data_source import infer_type
scan_ids = [
"controllerType=0 controllerNumber=1 scan=10014",
"controllerType=0 controllerNumber=1 scan=10015",
"controllerType=0 controllerNumber=1 scan=10016"
]
class TestMemoryScanSource(unittest.TestCase):
path = datafile("three_test_scans.mzML")
@property
def source_reader(self):
return infer_type.MSFileLoader(self.path)
@property
def prepare_source(self):
source = self.source_reader
loader = memory.MemoryScanLoader.build(source)
return loader
def test_iteration(self):
g = iter(scan_ids)
bunch = next(self.prepare_source)
assert bunch.precursor.id == next(g)
for product in bunch.products:
assert product.id == next(g)
def test_source_file_name_none(self):
source = self.prepare_source
assert source.source_file_name is None
if __name__ == '__main__':
unittest.main()
| 2.203125 | 2 |
classify_real_time.py | taeho0819/classify-real-time | 0 | 12761250 | <filename>classify_real_time.py
import argparse
import os.path
import re
import sys
import tarfile
import cv2
from time import sleep
import numpy as np
from six.moves import urllib
import tensorflow as tf
import time
from gtts import gTTS
import pygame
import os
from threading import Thread
import cv2
model_dir = '/tmp/imagenet'
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# Threaded class for performance improvement
class VideoStream:
def __init__(self, src=0):
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
def start(self):
Thread(target=self.update, args=()).start()
return self
def update(self):
while True:
if self.stopped:
return
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# Return the latest frame
return self.frame
def stop(self):
self.stopped = True
class NodeLookup(object):
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
model_dir, 'test.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
model_dir, 'test.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def create_graph():
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(os.path.join(
model_dir, 'retrained_graph.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def maybe_download_and_extract():
# Download and extract model tar file
dest_directory = model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' %
(filename,
float(
count *
block_size) /
float(total_size) *
100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
# Download and create graph
maybe_download_and_extract()
create_graph()
# Variables declarations
frame_count = 0
score = 0
start = time.time()
pygame.mixer.init()
pred = 0
last = 0
human_string = None
# Init video stream
vs = VideoStream(src=0).start()
# Start tensroflow session
with tf.Session() as sess:
#softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
while True:
frame = vs.read()
frame_count += 1
# Only run every 5 frames
if frame_count % 5 == 0:
# Save the image as the fist layer of inception is a DecodeJpeg
cv2.imwrite("current_frame.jpg", frame)
image_data = tf.gfile.FastGFile("./current_frame.jpg", 'rb').read()
predictions = sess.run(
softmax_tensor, {
'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
node_lookup = NodeLookup()
# change n_pred for more predictions
n_pred = 2
top_k = predictions.argsort()[-n_pred:][::-1]
print(top_k)
print("###")
labels = []
for node_id in top_k:
human_string_n = node_lookup.id_to_string(node_id)
print(human_string_n)
print("여기다")
score = predictions[node_id]
labels.append(human_string_n)
if score > .3:
# Some manual corrections
# Kind of cheating
if human_string_n == "stethoscope":
human_string_n = "Headphones"
if human_string_n == "spatula":
human_string_n = "fork"
if human_string_n == "iPod":
human_string_n = "iPhone"
if human_string_n == "shirtsstripe":
human_string_n = "shirtsstripe"
if human_string_n == "hoodiecheck":
human_string_n = "hoodiecheck"
if human_string_n == "hoodiedefault":
human_string_n = "hoodiedefault"
if human_string_n == "tshirtsdefault":
human_string_n = "tshirtsdefault"
if human_string_n == "shirtsdefault":
human_string_n = "shirtsdefault"
if human_string_n == "jacketmilitary":
human_string_n = "jacketmilitary"
if human_string_n == "shirtscheck":
human_string_n = "shirtscheck"
if human_string_n == "tshirtsstripe":
human_string_n = "tshirtsstripe"
if human_string_n == "hoodiestripe":
human_string_n = "hoodiestripe"
if human_string_n == "suitdefault":
human_string_n = "suitdefault"
human_string = human_string_n
print(human_string)
print("??????????")
lst = human_string.split()
print(labels)
print("^^^^^^^^^^")
human_string = " ".join(lst[0:2])
#human_string_filename = str(lst[0])
current = time.time()
fps = frame_count / (current - start)
# Speech module
if last > 40 and pygame.mixer.music.get_busy(
) == False and human_string == human_string_n:
pred += 1
name = human_string + ".mp3"
# Only get from google if we dont have it
if not os.path.isfile(name):
tts = gTTS(text="I see a " + human_string, lang='en')
tts.save(name)
last = 0
pygame.mixer.music.load(name)
pygame.mixer.music.play()
# Show info during some time
if last < 40 and frame_count > 10:
cv2.putText(frame, human_string, (20, 400),
cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 255, 255))
cv2.putText(frame, str(np.round(score, 2)) + "%",
(20, 440), cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 255, 255))
if frame_count > 20:
cv2.putText(frame, "fps: " + str(np.round(fps, 2)),
(460, 460), cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 255, 255))
cv2.imshow("Frame", frame)
last += 1
# if the 'q' key is pressed, stop the loop
if cv2.waitKey(1) & 0xFF == ord("q"):
break
# cleanup everything
vs.stop()
cv2.destroyAllWindows()
sess.close()
print("Done")
| 2.359375 | 2 |