code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import skflow
from tensorflow.examples.tutorials.mnist import input_data
### Download and load MNIST data.
mnist = input_data.read_data_sets('MNIST_data')
#mnist = skflow.datasets.load_dataset('mnist')
### Linear classifier.
classifier = skflow.TensorFlowLinearClassifier(
n_classes=10, batch_size=100, steps=1000, learning_rate=0.01)
classifier.fit(mnist.train.images, mnist.train.labels)
score = metrics.accuracy_score(mnist.test.labels, classifier.predict(mnist.test.images))
print('Accuracy: {0:f}'.format(score))
### Convolutional network
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
def conv_model(X, y):
# reshape X to 4d tensor with 2nd and 3rd dimensions being image width and height
# final dimension being the number of color channels
X = tf.reshape(X, [-1, 28, 28, 1])
# first conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = skflow.ops.conv2d(X, n_filters=32, filter_shape=[5, 5],
bias=True, activation=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# second conv layer will compute 64 features for each 5x5 patch
with tf.variable_scope('conv_layer2'):
h_conv2 = skflow.ops.conv2d(h_pool1, n_filters=64, filter_shape=[5, 5],
bias=True, activation=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# densely connected layer with 1024 neurons
h_fc1 = skflow.ops.dnn(h_pool2_flat, [1024], activation=tf.nn.relu, dropout=0.5)
return skflow.models.logistic_regression(h_fc1, y)
# Training and predicting
classifier = skflow.TensorFlowEstimator(
model_fn=conv_model, n_classes=10, batch_size=100, steps=20000,
learning_rate=0.001)
classifier.fit(mnist.train.images, mnist.train.labels)
score = metrics.accuracy_score(mnist.test.labels, classifier.predict(mnist.test.images))
print('Accuracy: {0:f}'.format(score))
| hpssjellis/forth-tensorflow | skflow-examples/z16-mnist.py | Python | mit | 3,192 |
# -*- coding: utf-8 -*-
"""
Sound Pressure Level Plugin
Copyright (C) 2018 Olaf Lüke <olaf@tinkerforge.com>
__init__.py: package initialization
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from brickv.plugin_system.plugins.sound_pressure_level.sound_pressure_level import SoundPressureLevel
device_class = SoundPressureLevel
| Tinkerforge/brickv | src/brickv/plugin_system/plugins/sound_pressure_level/__init__.py | Python | gpl-2.0 | 965 |
# -*- coding: utf-8 -*-
# Copyright 2017 Eficent Business and IT Consulting Services S.L.
# - Jordi Ballester Alomar
# Copyright 2017 MATMOZ d.o.o.
# - Matjaž Mozetič
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from . import purchase
| sysadminmatmoz/pmis | purchase_stock_analytic/models/__init__.py | Python | agpl-3.0 | 265 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016, 2017 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Pytest configuration."""
from __future__ import absolute_import, print_function
import os
import tempfile
from collections import namedtuple
import pytest
import six
from flask import Flask
from flask_babelex import Babel
from flask_breadcrumbs import Breadcrumbs
from flask_mail import Mail
from flask_menu import Menu
from flask_security.utils import encrypt_password
from invenio_access import InvenioAccess
from invenio_access.models import ActionUsers
from invenio_access.permissions import superuser_access
from invenio_accounts import InvenioAccounts
from invenio_accounts.models import Role, User
from invenio_db import db as db_
from invenio_db import InvenioDB
from invenio_oauth2server import InvenioOAuth2Server, \
InvenioOAuth2ServerREST, current_oauth2server
from invenio_oauth2server.models import Token
from six import iteritems
from sqlalchemy_utils.functions import create_database, database_exists
from invenio_accounts_rest import InvenioAccountsREST
@pytest.fixture()
def accounts_rest_permission_factory():
"""Permission factory of a module."""
# will be initialized later as user_id: [role ids]
allowed_users = {
'read_role': {},
'update_role': {},
'delete_role': {},
'read_roles_list': [],
'create_role': [],
'assign_role': {},
'unassign_role': {},
'read_user_roles_list': {},
'read_user_properties': {},
'update_user_properties': {},
'read_users_list': [],
}
def role_permission_factory_sub(action):
def permission_factory(role):
from flask_login import current_user
return (current_user.is_authenticated and
current_user.id in allowed_users[action] and
role.id in allowed_users[action][current_user.id])
return lambda role: type('permission_factory', (), {
'can': lambda self: permission_factory(role)
})()
def list_permission_factory_sub(action):
def l_permission_factory(*args, **kwargs):
from flask_login import current_user
return (current_user.is_authenticated and
current_user.id in allowed_users[action])
return lambda: type('permission_factory', (), {
'can': lambda self: l_permission_factory()
})()
def reassign_role_permission_factory_sub(action):
def rr_permission_factory(role, user):
from flask_login import current_user
return (current_user.is_authenticated and
current_user.id in allowed_users[action] and
(role.id, user.id) in allowed_users[action][
current_user.id])
return lambda role_user: type('permission_factory', (), {
'can': lambda self: rr_permission_factory(role_user[0],
role_user[1])
})()
def user_permission_factory_sub(action):
def u_permission_factory(user):
from flask_login import current_user
return (current_user.is_authenticated and
current_user.id in allowed_users[action] and
user.id in allowed_users[action][current_user.id])
return lambda user: type('permission_factory', (), {
'can': lambda self: u_permission_factory(user)
})()
return {
'read_role': role_permission_factory_sub('read_role'),
'update_role': role_permission_factory_sub('update_role'),
'delete_role': role_permission_factory_sub('delete_role'),
'read_roles_list': list_permission_factory_sub('read_roles_list'),
'create_role': list_permission_factory_sub('create_role'),
'assign_role': reassign_role_permission_factory_sub('assign_role'),
'unassign_role': reassign_role_permission_factory_sub('unassign_role'),
'read_user_roles_list': user_permission_factory_sub(
'read_user_roles_list'),
'read_user_properties': user_permission_factory_sub(
'read_user_properties'),
'update_user_properties': user_permission_factory_sub(
'update_user_properties'),
'read_users_list': list_permission_factory_sub('read_users_list'),
'allowed_users': allowed_users,
}
@pytest.yield_fixture()
def with_profiles(app):
"""Return True if invenio-userprofiles is installed, else False."""
return 'invenio-userprofiles' in app.extensions
@pytest.yield_fixture()
def app(request, accounts_rest_permission_factory):
"""Flask application fixture."""
instance_path = tempfile.mkdtemp()
app = Flask(__name__, instance_path=instance_path)
InvenioAccess(app)
InvenioAccounts(app)
InvenioAccountsREST(app)
InvenioOAuth2Server(app)
InvenioOAuth2ServerREST(app)
InvenioDB(app)
Babel(app)
Mail(app)
Menu(app)
Breadcrumbs(app)
# this is done mainly for coverage so that tests are run with and without
# userprofiles being loaded in the app
if not hasattr(request, 'param') or \
'with_profiles' not in request.param or \
request.param['with_profiles']:
# tests without invenio-userprofiles being installed at all
try:
from invenio_userprofiles import InvenioUserProfiles
InvenioUserProfiles(app)
except ImportError:
pass
read_role = accounts_rest_permission_factory['read_role']
update_role = accounts_rest_permission_factory['update_role']
delete_role = accounts_rest_permission_factory['delete_role']
read_roles = accounts_rest_permission_factory['read_roles_list']
create_role = accounts_rest_permission_factory['create_role']
assign_role = accounts_rest_permission_factory['assign_role']
unassign_role = accounts_rest_permission_factory['unassign_role']
user_roles = accounts_rest_permission_factory['read_user_roles_list']
read_user_prop = accounts_rest_permission_factory['read_user_properties']
mod_user_prop = accounts_rest_permission_factory['update_user_properties']
read_users = accounts_rest_permission_factory['read_users_list']
app.config.update(
ACCOUNTS_REST_READ_ROLE_PERMISSION_FACTORY=read_role,
ACCOUNTS_REST_UPDATE_ROLE_PERMISSION_FACTORY=update_role,
ACCOUNTS_REST_DELETE_ROLE_PERMISSION_FACTORY=delete_role,
ACCOUNTS_REST_READ_ROLES_LIST_PERMISSION_FACTORY=read_roles,
ACCOUNTS_REST_CREATE_ROLE_PERMISSION_FACTORY=create_role,
ACCOUNTS_REST_ASSIGN_ROLE_PERMISSION_FACTORY=assign_role,
ACCOUNTS_REST_UNASSIGN_ROLE_PERMISSION_FACTORY=unassign_role,
ACCOUNTS_REST_READ_USER_ROLES_LIST_PERMISSION_FACTORY=user_roles,
ACCOUNTS_REST_READ_USER_PROPERTIES_PERMISSION_FACTORY=read_user_prop,
ACCOUNTS_REST_UPDATE_USER_PROPERTIES_PERMISSION_FACTORY=mod_user_prop,
ACCOUNTS_REST_READ_USERS_LIST_PERMISSION_FACTORY=read_users,
OAUTH2SERVER_CLIENT_ID_SALT_LEN=40,
OAUTH2SERVER_CLIENT_SECRET_SALT_LEN=60,
OAUTH2SERVER_TOKEN_PERSONAL_SALT_LEN=60,
SECRET_KEY='changeme',
TESTING=True,
SERVER_NAME='localhost',
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI', 'sqlite:///test.db'),
SECURITY_SEND_PASSWORD_CHANGE_EMAIL=False
)
from invenio_oauth2server.views.server import blueprint
with app.app_context():
db_.create_all()
yield app
with app.app_context():
db_.drop_all()
@pytest.yield_fixture()
def db(app):
"""Setup database."""
with app.app_context():
db_.init_app(app)
if not database_exists(str(db_.engine.url)):
create_database(str(db_.engine.url))
db_.create_all()
yield db_
with app.app_context():
db_.session.remove()
db_.drop_all()
@pytest.fixture()
def users_data(with_profiles):
"""User data fixture."""
def user_data(idx):
data = dict(
id=41 + idx,
email='user{}@inveniosoftware.org'.format(idx),
password='pass1',
active=True,
)
if with_profiles:
data.update({
'profile': {
'user_id': 41 + idx,
'full_name': 'full_name',
'username': 'username{}'.format(idx)
}
})
return data
users = {
'user{}'.format(idx): user_data(idx) for idx in range(1, 5)
}
users.update({
'inactive': dict(
id=40,
email='inactive@inveniosoftware.org',
password='pass1',
active=False
),
'admin': {
"id": 41,
"email": 'admin@inveniosoftware.org',
"password": 'pass1',
"active": True
}
})
return users
@pytest.fixture()
def roles_data():
"""Role data fixture."""
_roles_data = [
dict(name='role{}'.format(idx), description='desc{}'.format(idx))
for idx in range(10)
]
return _roles_data
@pytest.fixture()
def users(app, db, roles_data, users_data, create_roles):
"""Create test users."""
ds = app.extensions['invenio-accounts'].datastore
result = {}
with app.app_context():
with db.session.begin_nested():
for user_key, user_data in iteritems(users_data):
user_data['password'] = encrypt_password(user_data['password'])
user = ds.create_user(**user_data)
result[user_key] = user
roles = Role.query.filter(
Role.id.in_(role['id'] for role in create_roles[:5])).all()
result['user1'].roles.extend(roles)
db.session.add(ActionUsers.allow(
superuser_access,
user=result['admin'],
))
for user in result.values():
scopes = current_oauth2server.scope_choices()
db.session.add(user)
user.allowed_token = Token.create_personal(
name='allowed_token',
user_id=user.id,
scopes=[s[0] for s in scopes]
).access_token
user_ref = namedtuple('UserRef', 'id, allowed_token, data')
result_user = {
name: user_ref(
id=user.id,
data=users_data[name],
allowed_token=user.allowed_token,
) for name, user in six.iteritems(result)
}
db.session.commit()
return result_user
@pytest.fixture()
def create_roles(app, db, roles_data):
"""Create test roles."""
ds = app.extensions['invenio-accounts'].datastore
roles = []
with app.app_context():
with db.session.begin_nested():
for rd in roles_data:
r = ds.create_role(**rd)
db.session.add(r)
roles.append((rd, r))
db.session.commit()
for role in roles:
role[0]['id'] = role[1].id
return [data for data, role in roles]
| PaulinaLach/invenio-accounts-rest | tests/conftest.py | Python | gpl-2.0 | 12,127 |
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from netforce.database import get_connection
from datetime import *
import time
from pprint import pprint
def js_time(s):
d=datetime.strptime(s,"%Y-%m-%d %H:%M:%S")
return time.mktime(d.timetuple()) * 1000
def js_date(s):
d=datetime.strptime(s,"%Y-%m-%d")
return time.mktime(d.timetuple()) * 1000
class ReportIssue(Model):
_name = "report.issue"
_store = False
def get_issue_chart(self, context={}):
actions=[]
for issue in get_model("issue").search_browse([]):
if issue.date_created:
actions.append((issue.date_created,"open"))
if issue.state=="closed" and issue.date_closed:
actions.append((issue.date_closed,"close"))
actions.sort()
values=[]
num_issues=0
for d,action in actions:
if action=="open":
num_issues+=1
elif action=="close":
num_issues-=1
values.append((js_time(d), num_issues))
data = {
"value": values,
}
return data
def get_issue_close_chart(self, context={}):
closed={}
for issue in get_model("issue").search_browse([["state","=","closed"],["date_closed","!=",None]]):
d=issue.date_closed[:10]
closed.setdefault(d,0)
closed[d]+=1
values=[]
for d,n in sorted(closed.items()):
values.append((js_date(d), n))
data = {
"value": [{
"key": "Closed",
"values": values,
}]
}
pprint(data)
return data
ReportIssue.register()
| sidzan/netforce | netforce_support/netforce_support/models/report_issue.py | Python | mit | 2,805 |
from __future__ import unicode_literals
def device_from_request(request):
"""
Determine's the device name from the request by first looking for an
overridding cookie, and if not found then matching the user agent.
Used at both the template level for choosing the template to load and
also at the cache level as a cache key prefix.
"""
from mezzanine.conf import settings
try:
# If a device was set via cookie, match available devices.
for (device, _) in settings.DEVICE_USER_AGENTS:
if device == request.COOKIES["mezzanine-device"]:
return device
except KeyError:
# If a device wasn't set via cookie, match user agent.
try:
user_agent = request.META["HTTP_USER_AGENT"].lower()
except KeyError:
pass
else:
try:
user_agent = user_agent.decode("utf-8")
except (AttributeError, UnicodeDecodeError):
pass
for (device, ua_strings) in settings.DEVICE_USER_AGENTS:
for ua_string in ua_strings:
if ua_string.lower() in user_agent:
return device
return ""
def templates_for_device(request, templates):
"""
Given a template name (or list of them), returns the template names
as a list, with each name prefixed with the device directory
inserted before it's associate default in the list.
"""
from mezzanine.conf import settings
if not isinstance(templates, (list, tuple)):
templates = [templates]
device = device_from_request(request)
device_templates = []
for template in templates:
if device:
device_templates.append("%s/%s" % (device, template))
if settings.DEVICE_DEFAULT and settings.DEVICE_DEFAULT != device:
default = "%s/%s" % (settings.DEVICE_DEFAULT, template)
device_templates.append(default)
device_templates.append(template)
return device_templates
| webounty/mezzanine | mezzanine/utils/device.py | Python | bsd-2-clause | 2,035 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"Example code to perform int8 GEMM"
import logging
import sys
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm.topi.cuda.tensor_intrin import dp4a
DO_TUNING = True
PRETUNED_INDEX = 75333
intrin_dp4a = dp4a("local", "local", "local")
@autotvm.template
def gemm_int8(n, m, l):
A = te.placeholder((n, l), name="A", dtype="int8")
B = te.placeholder((m, l), name="B", dtype="int8")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(n, m),
lambda i, j: te.sum(A[i, k].astype("int32") * B[j, k].astype("int32"), axis=k),
name="C",
)
cfg = autotvm.get_config()
s = te.create_schedule(C.op)
y, x = C.op.axis
AA = s.cache_read(A, "shared", [C])
BB = s.cache_read(B, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
k = CC.op.reduce_axis[0]
cfg.define_split(
"tile_k",
cfg.axis(k),
num_outputs=3,
filter=lambda entity: entity.size[2] == 4 and entity.size[0] * 2 >= entity.size[1],
)
ko, kt, ki = cfg["tile_k"].apply(s, CC, k)
s[CC].tensorize(ki, intrin_dp4a)
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
def block_size_filter(entity):
return (
entity.size[0] * 2 >= entity.size[1] * 2
and entity.size[1] <= 16
and entity.size[3] <= 4
)
cfg.define_split("tile_y", cfg.axis(y), num_outputs=4, filter=block_size_filter)
cfg.define_split("tile_x", cfg.axis(x), num_outputs=4, filter=block_size_filter)
by, tyz, ty, yi = cfg["tile_y"].apply(s, C, y)
bx, txz, tx, xi = cfg["tile_x"].apply(s, C, x)
s[C].bind(by, block_y)
s[C].bind(bx, block_x)
s[C].bind(tyz, te.thread_axis("vthread"))
s[C].bind(txz, te.thread_axis("vthread"))
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi)
s[CC].compute_at(s[C], tx)
yo, xo = CC.op.axis
s[CC].reorder(ko, kt, yo, xo, ki)
s[CC].unroll(kt)
for stage in [AL, BL]:
s[stage].compute_at(s[CC], kt)
_, xi = s[stage].split(stage.op.axis[1], factor=4)
s[stage].vectorize(xi)
s[stage].double_buffer()
cfg.define_knob("storage_align", [16, 48])
for stage in [AA, BB]:
s[stage].storage_align(s[stage].op.axis[0], cfg["storage_align"].val, 0)
s[stage].compute_at(s[CC], ko)
fused = s[stage].fuse(*s[stage].op.axis)
ty, tx = s[stage].split(fused, nparts=cfg["tile_y"].size[2])
tx, xi = s[stage].split(tx, nparts=cfg["tile_x"].size[2])
_, xi = s[stage].split(xi, factor=16)
s[stage].bind(ty, thread_y)
s[stage].bind(tx, thread_x)
s[stage].vectorize(xi)
cfg.define_knob("auto_unroll_max_step", [512, 1500])
s[C].pragma(by, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[C].pragma(by, "unroll_explicit", False)
cfg.add_flop(n * m * l * 2)
return s, [A, B, C]
if __name__ == "__main__":
N = 2048
n = m = l = N
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
task = autotvm.task.create(gemm_int8, args=(n, m, l), target="cuda")
print(task.config_space)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4),
)
log_name = "gemm_int8.log"
if DO_TUNING:
tuner = autotvm.tuner.XGBTuner(task)
tuner.tune(
n_trial=1000,
measure_option=measure_option,
callbacks=[autotvm.callback.log_to_file(log_name)],
)
dispatch_context = autotvm.apply_history_best(log_name)
best_config = dispatch_context.query(task.target, task.workload)
print("\nBest config:")
print(best_config)
else:
config = task.config_space.get(PRETUNED_INDEX)
dispatch_context = autotvm.task.ApplyConfig(config)
print("Using pretuned config:")
print(config)
with dispatch_context:
with tvm.target.Target("cuda"):
s, arg_bufs = gemm_int8(n, m, l)
f = tvm.build(s, arg_bufs, "cuda", name="gemm_int8")
dev = tvm.device("cuda", 0)
a_np = np.random.randint(size=(n, l), low=-128, high=127, dtype="int8")
b_np = np.random.randint(size=(m, l), low=-128, high=127, dtype="int8")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((n, m), dtype="int32"), dev)
f(a, b, c)
tvm.testing.assert_allclose(
c.numpy(), np.dot(a_np.astype("int32"), b_np.T.astype("int32")), rtol=1e-5
)
num_ops = 2 * l * m * n
num_runs = 1000
timer_f = f.time_evaluator(f.entry_name, dev, number=num_runs)
t = timer_f(a, b, c).mean
GOPS = num_ops / (t * 1e3) / 1e6
print("average time cost of %d runs = %g ms, %g GOPS." % (num_runs, t * 1e3, GOPS))
| dmlc/tvm | apps/topi_recipe/gemm/gemm_int8.py | Python | apache-2.0 | 5,879 |
class Background(object):
def __init__(self, resources):
self.resources = resources
self.type = 'solid'
self.color = (255, 255, 255)
self.image = None
self.image_name = ''
def set_solid(self, color):
self.type = 'solid'
if color != self.color:
if type(color) == tuple:
self.color = color
if type(color) == str or type(color) == unicode:
color_tuple = tuple(map(int, color.split(',')))
self.color = color_tuple
def set_image(self, image_name):
self.type = 'image'
if image_name != self.image_name:
image = self.resources.load_resource('images', image_name)
self.image = image.content
self.image_name = image_name
| ProgrammaBol/wiggler | wiggler/engine/background.py | Python | gpl-3.0 | 808 |
# -*- coding: utf-8 -*-
from sdl2 import SDL_Delay,\
SDL_GetTicks,\
SDL_KEYDOWN,\
SDL_KEYUP,\
SDL_QUIT,\
SDL_Rect,\
SDL_RenderCopy,\
SDLK_ESCAPE,\
SDLK_UP,\
SDLK_DOWN,\
SDLK_RETURN,\
SDL_Quit
from sdl2.ext import Resources,\
get_events
from const import WindowSize, Colors
from input import Input
from ui import DialogBox
from game import Game
FPS = 60 # units.FPS
MAX_FRAME_TIME = int(5 * (1000 / FPS))
RESOURCES = Resources(__file__, 'resources')
class Menu:
def __init__(self, window, world, renderer, factory):
self.window = window
self.renderer = renderer
self.world = world
self.factory = factory
self.rsystem = factory.create_sprite_render_system(window)
self.menu_bg = RESOURCES.get_path("menu_bg.png")
self.menu_cursor = RESOURCES.get_path("menu_cursor.png")
self.running = True
self.position = 460, 340
self.cursor_start_position = 370, 330
self.cursor_position = 0
self.cursor_sprite_size = 32
self.background_sprite = self.factory.from_image(self.menu_bg)
self.cursor_sprite = self.factory.from_image(self.menu_cursor)
self.text = {0: "START",
1: "OPTIONS",
2: "EXIT"}
self.dialog = DialogBox(self.factory,
font_size=32,
fg_color=Colors.WHITE,
bg_color=Colors.BLACK,
font_name="04B_20__.TTF",
text=self.text,
position=self.position,
renderer=self.renderer)
self.sprites = [self.background_sprite]
sprites = self.dialog.get_sprites()
for sprite in sprites:
self.sprites.append(sprite)
self.sprites.append(self.cursor_sprite)
def __del__(self):
SDL_Quit()
def update(self, elapsed_time):
self.cursor_sprite.position = self.cursor_start_position[0], self.cursor_start_position[1] \
+ self.cursor_position * self.cursor_sprite_size
def run(self):
menu_input = Input()
last_update_time = SDL_GetTicks() # units.MS
while self.running:
start_time = SDL_GetTicks() # units.MS
menu_input.begin_new_frame()
menu_events = get_events()
for event in menu_events:
if event.type == SDL_KEYDOWN:
menu_input.key_down_event(event)
elif event.type == SDL_KEYUP:
menu_input.key_up_event(event)
elif event.type == SDL_QUIT:
self.running = False
break
# Exit
if menu_input.was_key_pressed(SDLK_ESCAPE):
self.running = False
# Move the cursor
elif menu_input.was_key_pressed(SDLK_UP):
if self.cursor_position != 0:
self.cursor_position -= 1
elif menu_input.was_key_pressed(SDLK_DOWN):
if self.cursor_position != 2:
self.cursor_position += 1
# Select option
elif menu_input.was_key_pressed(SDLK_RETURN):
self.running = False
if self.cursor_position == 0:
self.launch_game()
current_time = SDL_GetTicks() # units.MS
elapsed_time = current_time - last_update_time # units.MS
self.update(min(elapsed_time, MAX_FRAME_TIME))
last_update_time = current_time
self.renderer.render(self.sprites)
# This loop lasts 1/60th of a second, or 1000/60th ms
ms_per_frame = 1000 // FPS # units.MS
elapsed_time = SDL_GetTicks() - start_time # units.MS
if elapsed_time < ms_per_frame:
SDL_Delay(ms_per_frame - elapsed_time)
def launch_game(self):
game = Game(self.world, self.window, self.renderer, self.factory)
game.run()
self.running = True
self.run()
| ep0s/soulmaster | menu.py | Python | gpl-3.0 | 4,205 |
import glob
import os
import openmc
import pytest
from tests.testing_harness import PyAPITestHarness
from tests.regression_tests import config
@pytest.fixture
def model():
# Materials
mat = openmc.Material()
mat.set_density('g/cm3', 4.5)
mat.add_nuclide('U235', 1.0)
materials = openmc.Materials([mat])
# Geometry
sph = openmc.Sphere(r=10.0, boundary_type='vacuum')
cell = openmc.Cell(fill=mat, region=-sph)
geometry = openmc.Geometry([cell])
# Settings
settings = openmc.Settings()
settings.run_mode = 'eigenvalue'
settings.batches = 10
settings.inactive = 5
settings.particles = 200
# Choose a sufficiently low threshold to trigger after more than 10 batches.
# 0.004 seems to take 13 batches.
settings.keff_trigger = {'type': 'std_dev', 'threshold': 0.004}
settings.trigger_max_batches = 1000
settings.trigger_batch_interval = 1
settings.trigger_active = True
settings.verbosity = 1 # to test that this works even with no output
# Tallies
t = openmc.Tally()
t.scores = ['flux']
tallies = openmc.Tallies([t])
# Put it all together
model = openmc.model.Model(materials=materials,
geometry=geometry,
settings=settings,
tallies=tallies)
return model
class TriggerStatepointRestartTestHarness(PyAPITestHarness):
def __init__(self, statepoint, model=None):
super().__init__(statepoint, model)
self._restart_sp = None
self._final_sp = None
# store the statepoint filename pattern separately to sp_name so we can reuse it
self._sp_pattern = self._sp_name
def _test_output_created(self):
"""Make sure statepoint files have been created."""
spfiles = sorted(glob.glob(self._sp_pattern))
assert len(spfiles) == 2, \
'Two statepoint files should have been created'
if not self._final_sp:
# First non-restart run
self._restart_sp = spfiles[0]
self._final_sp = spfiles[1]
else:
# Second restart run
assert spfiles[1] == self._final_sp, \
'Final statepoint names were different'
# Use the final_sp as the sp_name for the 'standard' results tests
self._sp_name = self._final_sp
def execute_test(self):
"""
Perform initial and restart runs using the model.run method,
Check all inputs and outputs which should be the same as those
generated using the normal PyAPITestHarness update methods.
"""
try:
args = {'openmc_exec': config['exe'], 'event_based': config['event']}
if config['mpi']:
args['mpi_args'] = [config['mpiexec'], '-n', config['mpi_np']]
# First non-restart run
spfile = self._model.run(**args)
sp_batchno_1 = 0
print('Last sp file: %s' % spfile)
assert spfile
with openmc.StatePoint(spfile) as sp:
sp_batchno_1 = sp.current_batch
k_combined_1 = sp.k_combined
assert sp_batchno_1 > 10
print('Last batch no = %d' % sp_batchno_1)
self._write_inputs(self._get_inputs())
self._compare_inputs()
self._test_output_created()
self._write_results(self._get_results())
self._compare_results()
# Second restart run
restart_spfile = glob.glob(os.path.join(os.getcwd(), self._restart_sp))
assert len(restart_spfile) == 1
args['restart_file'] = restart_spfile[0]
spfile = self._model.run(**args)
sp_batchno_2 = 0
assert spfile
with openmc.StatePoint(spfile) as sp:
sp_batchno_2 = sp.current_batch
k_combined_2 = sp.k_combined
assert sp_batchno_2 > 10
assert sp_batchno_1 == sp_batchno_2, \
'Different final batch number after restart'
# need str() here as uncertainties.ufloat instances are always different
assert str(k_combined_1) == str(k_combined_2), \
'Different final k_combined after restart'
self._write_inputs(self._get_inputs())
self._compare_inputs()
self._test_output_created()
self._write_results(self._get_results())
self._compare_results()
finally:
self._cleanup()
def test_trigger_statepoint_restart(model):
# Assuming we converge within 1000 batches, the statepoint filename
# should include the batch number padded by at least one '0'.
harness = TriggerStatepointRestartTestHarness('statepoint.0*.h5', model)
harness.main()
| liangjg/openmc | tests/regression_tests/trigger_statepoint_restart/test.py | Python | mit | 4,835 |
#!/usr/bin/env python2
import argparse
import difflib
import os
import subprocess
import sys
import urllib2
def main():
parser = argparse.ArgumentParser(
description='Reformats C++ source files that have changed from a given '
'git ref.')
parser.add_argument('--url',
default='https://clang.clementine-player.org/format',
help='a URL of a Clang-in-the-cloud service')
parser.add_argument('--ref', default='origin/master',
help='the git-ref to compare against')
parser.add_argument('--extension', action='append', metavar='EXT',
default=['cc', 'cpp', 'h', 'c', 'cxx', 'm', 'mm'],
help='file extensions to reformat')
parser.add_argument('-i', dest='inplace', action='store_true',
help='edit files inplace instead of showing a diff')
parser.add_argument('--files', nargs='*', metavar='FIL',
default=[],
help='get files as arguments instead of git')
args = parser.parse_args()
try:
root_dir = subprocess.check_output([
'git', 'rev-parse', '--show-toplevel']).strip()
except subprocess.CalledProcessError:
# Probably we were not called from a git working directory, just ignore this
# error.
return
changed_files = subprocess.check_output([
'git', 'diff-index', args.ref, '--name-only']).splitlines()
if not changed_files:
print >> sys.stderr, 'No changes from %s' % args.ref
if not args.files and not changed_files:
print >> sys.stderr, "Use --files to select files for reformat"
return
if args.files:
changed_files = args.files
for filename in changed_files:
if not os.path.splitext(filename)[1][1:] in args.extension:
continue
path = os.path.join(root_dir, filename)
if not os.path.exists(path):
# Probably a deletion
continue
original = open(path).read()
response = urllib2.urlopen(args.url, original)
formatted = response.read()
if original == formatted:
print >> sys.stderr, '%s: formatting is correct!' % filename
continue
diff = difflib.unified_diff(
original.split('\n'), formatted.split('\n'),
os.path.join('a', filename), os.path.join('b', filename),
lineterm='')
if args.inplace:
with open(path, 'w') as fh:
fh.write(formatted)
print >> sys.stderr, '%s: %d insertion(s), %d deletion(s)' % (
filename,
sum(1 for x in diff if x.startswith('+')),
sum(1 for x in diff if x.startswith('-')))
else:
print '\n'.join(diff)
if __name__ == '__main__':
main()
| clementine-player/Clementine | dist/format.py | Python | gpl-3.0 | 2,569 |
#!/usr/bin/env python3
#
# Copyright (c) 2019 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Time series manager."""
from concurrent.futures import ThreadPoolExecutor
from tornado import gen
from influxdb import InfluxDBClient
from empower.core.service import EService
from empower.core.serialize import serialize
DEFAULT_DATABASE = "empower"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8086
DEFAULT_USERNAME = "root"
DEFAULT_PASSWORD = "password"
class InfluxTimeSeriesManager(EService):
"""Time series manager."""
def __init__(self, context, service_id, database, host, port, username,
password):
super().__init__(context=context, service_id=service_id,
database=database, host=host, port=port,
username=username, password=password)
self.thread_pool = None
self.influxdb_client = None
# stats buffer, cannot rely on ThreadPoolExecutor because
# it cannot clear pending jobs when shutdown is called
self.stats = []
self.busy = False
def start(self):
"""Start time series manager manager."""
super().start()
self.thread_pool = ThreadPoolExecutor(1)
self.influxdb_client = InfluxDBClient(host=self.host,
port=self.port,
username=self.username,
password=self.password,
timeout=3,
database=self.database)
try:
# create database, it has no effect if it is already present
self.influxdb_client.create_database(self.database)
self.log.info("Connected to InfluxDB database %s", self.database)
except Exception as ex:
self.log.exception(ex)
@property
def database(self):
"""Return database."""
return self.params["database"]
@database.setter
def database(self, value):
"""Set database."""
self.params["database"] = value
@property
def host(self):
"""Return host."""
return self.params["host"]
@host.setter
def host(self, value):
"""Set host."""
self.params["host"] = value
@property
def port(self):
"""Return port."""
return self.params["port"]
@port.setter
def port(self, value):
"""Set port."""
self.params["port"] = int(value)
@property
def username(self):
"""Return username."""
return self.params["username"]
@username.setter
def username(self, value):
"""Set username."""
self.params["username"] = value
@property
def password(self):
"""Return password."""
return self.params["password"]
@password.setter
def password(self, value):
"""Set password."""
self.params["password"] = value
@gen.coroutine
def write_points(self, points):
"""Add new points to the DB."""
# the sender thread is already working, buffer data
if self.busy:
self.stats.append(points)
return
self.busy = True
error = yield self.thread_pool.submit(self.__write_points_worker,
points)
self.busy = False
# clear buffer in case of error
if error:
self.stats.clear()
# pop buffered data and send it
if self.stats:
self.write_points(self.stats.pop(0))
def __write_points_worker(self, points):
try:
self.influxdb_client.write_points(points=serialize(points))
except Exception as ex:
self.log.exception(ex)
return True
return False
def launch(context, service_id, database=DEFAULT_DATABASE,
host=DEFAULT_HOST, port=DEFAULT_PORT, username=DEFAULT_USERNAME,
password=DEFAULT_PASSWORD):
""" Initialize the module. """
return InfluxTimeSeriesManager(context=context, service_id=service_id,
database=database, host=host, port=port,
username=username, password=password)
| rriggio/empower-runtime | empower/managers/timeseriesmanager/timeseriesmanager.py | Python | apache-2.0 | 4,835 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import inspect
import itertools
import random
import warnings
import numpy as np
from .gd import GradientDescent
from .bfgs import Lbfgs
from .cg import NonlinearConjugateGradient
from .rprop import Rprop
from .rmsprop import RmsProp
from .adadelta import Adadelta
from .adam import Adam
try:
from sklearn.grid_search import ParameterSampler
except ImportError:
pass
def is_garray(cand):
return hasattr(cand, 'as_numpy_array')
def is_array(cand):
return is_garray(cand) or isinstance(cand, np.ndarray)
def clear_info(info):
"""Clean up contents of info dictionary for better use.
Keys to be removed are ``args``, ``kwargs`` and any non-scalar numpy or
gnumpy arrays. Numpy scalars are converted to floats.
Examples
--------
>>> import numpy as np
>>> info = {'args': None, 'foo': np.zeros(3), 'bar': np.array(1),
... 'loss': 1.}
>>> cleared = clear_info(info)
>>> cleared == {'bar': 1.0, 'loss': 1.0}
True
"""
items = info.iteritems()
items = ((k, float(v.reshape((1,))[0]) if is_array(v) and v.size == 1 else v)
for k, v in items)
items = ((k, v) for k, v in items if not is_array(v))
items = ((k, v) for k, v in items if k not in ('args', 'kwargs'))
return dict(items)
def coroutine(f):
"""Turn a generator function into a coroutine by calling .next() once."""
def started(*args, **kwargs):
cr = f(*args, **kwargs)
next(cr)
return cr
return started
def aslist(item):
if not isinstance(item, (list, tuple)):
item = [item]
return item
def mini_slices(n_samples, batch_size):
"""Yield slices of size `batch_size` that work with a container of length
`n_samples`."""
n_batches, rest = divmod(n_samples, batch_size)
if rest != 0:
n_batches += 1
return [slice(i * batch_size, (i + 1) * batch_size) for i in range(n_batches)]
def draw_mini_slices(n_samples, batch_size, with_replacement=False):
slices = mini_slices(n_samples, batch_size)
idxs = range(len(slices))
if with_replacement:
yield random.choice(slices)
else:
while True:
random.shuffle(idxs)
for i in idxs:
yield slices[i]
def draw_mini_indices(n_samples, batch_size):
assert n_samples > batch_size
idxs = range(n_samples)
random.shuffle(idxs)
pos = 0
while True:
while pos + batch_size <= n_samples:
yield idxs[pos:pos + batch_size]
pos += batch_size
batch = idxs[pos:]
needed = batch_size - len(batch)
random.shuffle(idxs)
batch += idxs[0:needed]
yield batch
pos = needed
def optimizer(identifier, wrt, *args, **kwargs):
"""Return an optimizer with the desired configuration.
This is a convenience function if one wants to try out different optimizers
but wants to change as little code as possible.
Additional arguments and keyword arguments will be passed to the constructor
of the class. If the found class does not take the arguments supplied, this
will `not` throw an error, but pass silently.
:param identifier: String identifying the optimizer to use. Can be either
``asgd``, ``gd``, ``lbfgs``, ``ncg``, ``rprop``, ``adadelta`` or
``smd``.
:param wrt: Numpy array pointing to the data to optimize.
"""
klass_map = {
'gd': GradientDescent,
'lbfgs': Lbfgs,
'ncg': NonlinearConjugateGradient,
'rprop': Rprop,
'rmsprop': RmsProp,
'adadelta': Adadelta,
'adam': Adam,
}
# Find out which arguments to pass on.
klass = klass_map[identifier]
argspec = inspect.getargspec(klass.__init__)
if argspec.keywords is None:
# Issue a warning for each of the arguments that have been passed
# to this optimizer but were not used.
expected_keys = set(argspec.args)
given_keys = set(kwargs.keys())
unused_keys = given_keys - expected_keys
for i in unused_keys:
warnings.warn('Argument named %s is not expected by %s'
% (i, klass))
# We need to filter stuff out.
used_keys = expected_keys & given_keys
kwargs = dict((k, kwargs[k]) for k in used_keys)
try:
opt = klass(wrt, *args, **kwargs)
except TypeError:
raise TypeError('required arguments for %s: %s' % (klass, argspec.args))
return opt
def shaped_from_flat(flat, shapes):
"""Given a one dimensional array ``flat``, return a list of views of shapes
``shapes`` on that array.
Each view will point to a distinct memory region, consecutively allocated
in flat.
Parameters
----------
flat : array_like
Array of one dimension.
shapes : list of tuples of ints
Each entry of this list specifies the shape of the corresponding view
into ``flat``.
Returns
-------
views : list of arrays
Each entry has the shape given in ``shapes`` and points as a view into
``flat``.
"""
shapes = [(i,) if isinstance(i, int) else i for i in shapes]
sizes = [np.prod(i) for i in shapes]
n_used = 0
views = []
for size, shape in zip(sizes, shapes):
this = flat[n_used:n_used + size]
n_used += size
this.shape = shape
views.append(this)
return views
def empty_with_views(shapes, empty_func=np.empty):
"""Create an array and views shaped according to ``shapes``.
The ``shapes`` parameter is a list of tuples of ints. Each tuple
represents a desired shape for an array which will be allocated in a bigger
memory region. This memory region will be represented by an array as well.
For example, the shape speciciation ``[2, (3, 2)]`` will create an array
``flat`` of size 8. The first view will have a size of ``(2,)`` and point
to the first two entries, i.e. ``flat`[:2]`, while the second array will
have a shape of ``(3, 2)`` and point to the elements ``flat[2:8]``.
Parameters
----------
spec : list of tuples of ints
Specification of the desired shapes.
empty_func : callable
function that returns a memory region given an integer of the desired
size. (Examples include ``numpy.empty``, which is the default,
``gnumpy.empty`` and ``theano.tensor.empty``.
Returns
-------
flat : array_like (depending on ``empty_func``)
Memory region containing all the views.
views : list of array_like
Variable number of results. Each contains a view into the array
``flat``.
Examples
--------
>>> from climin.util import empty_with_views
>>> flat, (w, b) = empty_with_views([(3, 2), 2])
>>> w[...] = 1
>>> b[...] = 2
>>> flat
array([ 1., 1., 1., 1., 1., 1., 2., 2.])
>>> flat[0] = 3
>>> w
array([[ 3., 1.],
[ 1., 1.],
[ 1., 1.]])
"""
shapes = [(i,) if isinstance(i, int) else i for i in shapes]
sizes = [np.prod(i) for i in shapes]
n_pars = sum(sizes)
flat = empty_func(n_pars)
views = shaped_from_flat(flat, shapes)
return flat, views
def minibatches(arr, batch_size, d=0):
"""Return a list of views of the given arr.
Each view represents a mini bach of the data.
Parameters
----------
arr : array_like
Array to obtain batches from. Needs to be slicable. If ``d > 0``, needs
to have a ``.shape`` attribute from which the number of samples can
be obtained.
batch_size : int
Size of a batch. Last batch might be smaller if ``batch_size`` is not a
divisor of ``arr``.
d : int, optional, default: 0
Dimension along which the data samples are separated and thus slicing
should be done.
Returns
-------
mini_batches : list
Each item of the list is a view of ``arr``. Views are ordered.
"""
# This alternative is to make this work with lists in the case of d == 0.
if d == 0:
n_batches, rest = divmod(len(arr), batch_size)
else:
n_batches, rest = divmod(arr.shape[d], batch_size)
if rest:
n_batches += 1
slices = (slice(i * batch_size, (i + 1) * batch_size)
for i in range(n_batches))
if d == 0:
res = [arr[i] for i in slices]
elif d == 1:
res = [arr[:, i] for i in slices]
elif d == 2:
res = [arr[:, :, i] for i in slices]
return res
def iter_minibatches(lst, batch_size, dims, n_cycles=False, random_state=None):
"""Return an iterator that successively yields tuples containing aligned
minibatches of size `batch_size` from slicable objects given in `lst`, in
random order without replacement.
Because different containers might require slicing over different
dimensions, the dimension of each container has to be givens as a list
`dims`.
Parameters
----------
lst : list of array_like
Each item of the list will be sliced into mini batches in alignemnt with
the others.
batch_size : int
Size of each batch. Last batch might be smaller.
dims : list
Aligned with ``lst``, gives the dimension along which the data samples
are separated.
n_cycles : int or False, optional [default: False]
Number of cycles after which to stop the iterator. If ``False``, will
yield forever.
random_state : a numpy.random.RandomState object, optional [default : None]
Random number generator that will act as a seed for the minibatch order
Returns
-------
batches : iterator
Infinite iterator of mini batches in random order (without
replacement).
"""
batches = [minibatches(i, batch_size, d) for i, d in zip(lst, dims)]
if len(batches) > 1:
if any(len(i) != len(batches[0]) for i in batches[1:]):
raise ValueError("containers to be batched have different lengths")
counter = itertools.count()
if random_state is not None:
random.seed(random_state.normal())
while True:
indices = [i for i, _ in enumerate(batches[0])]
while True:
random.shuffle(indices)
for i in indices:
yield tuple(b[i] for b in batches)
count = next(counter)
if n_cycles and count >= n_cycles:
raise StopIteration()
class OptimizerDistribution(object):
"""OptimizerDistribution class.
Can be used for specifying optimizers in scikit-learn's randomized parameter
search.
Attributes
----------
options : dict
Maps an optimizer key to a grid to sample from.
"""
def __init__(self, **options):
"""Create an OptimizerDistribution object.
Parameters
----------
options : dict
Maps an optimizer key to a grid to sample from.
"""
self.options = options
def rvs(self):
opt = random.choice(list(self.options.keys()))
grid = self.options[opt]
sample = list(ParameterSampler(grid, n_iter=1))[0]
return opt, sample
| superbobry/climin | climin/util.py | Python | bsd-3-clause | 11,296 |
# Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Tests for create_volume TaskFlow """
import time
from cinder import context
from cinder import test
from cinder.volume.flows.api import create_volume
class fake_scheduler_rpc_api(object):
def __init__(self, expected_spec, test_inst):
self.expected_spec = expected_spec
self.test_inst = test_inst
def create_volume(self, ctxt, topic, volume_id, snapshot_id=None,
image_id=None, request_spec=None,
filter_properties=None):
self.test_inst.assertEqual(self.expected_spec, request_spec)
class fake_volume_api(object):
def __init__(self, expected_spec, test_inst):
self.expected_spec = expected_spec
self.test_inst = test_inst
def create_volume(self, ctxt, volume, host,
request_spec, filter_properties,
allow_reschedule=True,
snapshot_id=None, image_id=None,
source_volid=None):
self.test_inst.assertEqual(self.expected_spec, request_spec)
self.test_inst.assertEqual(request_spec['source_volid'], source_volid)
self.test_inst.assertEqual(request_spec['snapshot_id'], snapshot_id)
self.test_inst.assertEqual(request_spec['image_id'], image_id)
class fake_db(object):
def volume_get(self, *args, **kwargs):
return {'host': 'barf'}
def volume_update(self, *args, **kwargs):
return {'host': 'farb'}
def snapshot_get(self, *args, **kwargs):
return {'volume_id': 1}
class CreateVolumeFlowTestCase(test.TestCase):
def time_inc(self):
self.counter += 1
return self.counter
def setUp(self):
super(CreateVolumeFlowTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.counter = float(0)
# Ensure that time.time() always returns more than the last time it was
# called to avoid div by zero errors.
self.counter = float(0)
self.stubs.Set(time, 'time', self.time_inc)
def test_cast_create_volume(self):
props = {}
spec = {'volume_id': None,
'source_volid': None,
'snapshot_id': None,
'image_id': None}
task = create_volume.VolumeCastTask(
fake_scheduler_rpc_api(spec, self),
fake_volume_api(spec, self),
fake_db())
task._cast_create_volume(self.ctxt, spec, props)
spec = {'volume_id': 1,
'source_volid': 2,
'snapshot_id': 3,
'image_id': 4}
task = create_volume.VolumeCastTask(
fake_scheduler_rpc_api(spec, self),
fake_volume_api(spec, self),
fake_db())
task._cast_create_volume(self.ctxt, spec, props)
def tearDown(self):
self.stubs.UnsetAll()
super(CreateVolumeFlowTestCase, self).tearDown()
| Thingee/cinder | cinder/tests/test_create_volume_flow.py | Python | apache-2.0 | 3,535 |
# encoding: utf-8
"""
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
from __future__ import absolute_import
from __future__ import print_function
import clawpack.geoclaw.topotools as topo
import os
import datetime
import shutil
import gzip
import numpy as np
from clawpack.geoclaw.surge.storm import Storm
import clawpack.clawutil as clawutil
# Time Conversions
def days2seconds(days):
return days * 60.0**2 * 24.0
# Setrun directory for storing topo and dtopo files:
#setrundir = os.path.join(os.environ["CLAW"], 'geoclaw', 'setrun')
#datadir = os.path.join(os.environ["CLAW"],'geoclaw','ireneimage')
DATA= os.path.join(os.environ.get('datadir', os.getcwd()))
# ------------------------------
def setrun(claw_pkg='geoclaw'):
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
# ------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
# (or to amr2ez.data for AMR)
# ------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = -89.83 # west longitude
clawdata.upper[0] = -28.62 # east longitude
clawdata.lower[1] = 12.96 # south latitude
clawdata.upper[1] = 63.80 # north latitude
# Number of grid cells:
degree_factor = 4 # (0.25º,0.25º) ~ (25237.5 m, 27693.2 m) resolution
clawdata.num_cells[0] = int(clawdata.upper[0] - clawdata.lower[0]) \
* degree_factor
clawdata.num_cells[1] = int(clawdata.upper[1] - clawdata.lower[1]) \
* degree_factor
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
# First three are from shallow GeoClaw, fourth is friction and last 3 are
# storm fields
clawdata.num_aux = 3 + 1 + 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = -days2seconds(3)
# Restart from checkpoint file of a previous run?
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
# -------------
# Output times:
# --------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
# The solution at initial time t0 is always written in addition.
clawdata.output_style = 1
if clawdata.output_style == 1:
# Output nout frames at equally spaced times up to tfinal:
clawdata.tfinal = days2seconds(3)
recurrence = 4
clawdata.num_output_times = int((clawdata.tfinal - clawdata.t0) *
recurrence / (60**2 * 24))
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list of output times.
clawdata.output_times = [0.5, 1.0]
elif clawdata.output_style == 3:
# Output every iout timesteps with a total of ntot time steps:
clawdata.output_step_interval = 1
clawdata.total_steps = 1
clawdata.output_t0 = True
clawdata.output_format = 'ascii' # 'ascii' or 'binary'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'all'
clawdata.output_aux_onlyonce = False # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==1: variable time steps used based on cfl_desired,
# if dt_variable==0: fixed time steps dt = dt_initial will always be used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# If dt_variable==0 then dt=dt_initial for all steps:
clawdata.dt_initial = 0.016
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used, and max to allow without
# retaking step with a smaller dt:
clawdata.cfl_desired = 0.75
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 10000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 1
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 1
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'mc' ==> MC limiter
# 4 or 'vanleer' ==> van Leer
clawdata.limiter = ['mc', 'mc', 'mc']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none'
# ==> no source term (src routine never called)
# src_split == 1 or 'godunov'
# ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang'
# ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 'godunov'
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 => user specified (must modify bcN.f to use this option)
# 1 => extrapolation (non-reflecting outflow)
# 2 => periodic (must specify this at both boundaries)
# 3 => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap'
clawdata.bc_upper[0] = 'extrap'
clawdata.bc_lower[1] = 'extrap'
clawdata.bc_upper[1] = 'extrap'
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif np.abs(clawdata.checkpt_style) == 1:
# Checkpoint only at tfinal.
pass
elif np.abs(clawdata.checkpt_style) == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = [0.1, 0.15]
elif np.abs(clawdata.checkpt_style) == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 5
# List of refinement ratios at each level (length at least mxnest-1)
amrdata.refinement_ratios_x = [2, 2, 2, 6, 16]
amrdata.refinement_ratios_y = [2, 2, 2, 6, 16]
amrdata.refinement_ratios_t = [2, 2, 2, 6, 16]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length maux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center', 'capacity', 'yleft', 'center', 'center',
'center', 'center']
# Flag using refinement routine flag2refine rather than richardson error
amrdata.flag_richardson = False # use Richardson?
amrdata.flag2refine = True
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 3
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.700000
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
# More AMR parameters can be set -- see the defaults in pyclaw/data.py
# == setregions.data values ==
regions = rundata.regiondata.regions
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
# Gauges from NOAA Inundation Dashboard for Hurricane Irene
rundata.gaugedata.gauges.append([1, -76.518855, 34.589072,
rundata.clawdata.t0,
rundata.clawdata.tfinal])
rundata.gaugedata.gauges.append([2, -76.01, 37.16,
rundata.clawdata.t0,
rundata.clawdata.tfinal])
rundata.gaugedata.gauges.append([3, -74.30, 39.26,
rundata.clawdata.t0,
rundata.clawdata.tfinal])
rundata.gaugedata.gauges.append([4, -74.0, 40.5,
rundata.clawdata.t0,
rundata.clawdata.tfinal])
rundata.gaugedata.gauges.append([5, -72.0, 41.23,
rundata.clawdata.t0,
rundata.clawdata.tfinal])
rundata.gaugedata.gauges.append([6, -70.95, 42.36,
rundata.clawdata.t0,
rundata.clawdata.tfinal])
# Force the gauges to also record the wind and pressure fields
rundata.gaugedata.aux_out_fields = [4, 5, 6]
# ------------------------------------------------------------------
# GeoClaw specific parameters:
# ------------------------------------------------------------------
rundata = setgeo(rundata)
return rundata
# end of function setrun
# ----------------------
# -------------------
def setgeo(rundata):
"""
Set GeoClaw specific runtime parameters.
For documentation see ....
"""
geo_data = rundata.geo_data
# == Physics ==
geo_data.gravity = 9.81
geo_data.coordinate_system = 2
geo_data.earth_radius = 6367.5e3
geo_data.rho = 1025.0
geo_data.rho_air = 1.15
geo_data.ambient_pressure = 101.3e3
# == Forcing Options
geo_data.coriolis_forcing = True
geo_data.friction_forcing = True
geo_data.friction_depth = 1e10
# == Algorithm and Initial Conditions ==
# Due to seasonal swelling of gulf we set sea level higher
geo_data.sea_level = 0
geo_data.dry_tolerance = 1.e-2
# Refinement Criteria
refine_data = rundata.refinement_data
refine_data.wave_tolerance = 1.0
refine_data.speed_tolerance = [1.0, 2.0, 3.0, 4.0]
refine_data.deep_depth = 300.0
refine_data.max_level_deep = 4
refine_data.variable_dt_refinement_ratios = True
# == settopo.data values ==
topo_data = rundata.topo_data
topo_data.topofiles = []
# for topography, append lines of the form
# See regions for control over these regions, need better bathy data for
# the smaller domains
irene_path=os.path.join(DATA, 'irenetopo.tt3')
topo_data.topofiles.append([3, 1, 5, rundata.clawdata.t0,
rundata.clawdata.tfinal,
irene_path])
# == setfixedgrids.data values ==
rundata.fixed_grid_data.fixedgrids = []
# for fixed grids append lines of the form
# [t1,t2,noutput,x1,x2,y1,y2,xpoints,ypoints,\
# ioutarrivaltimes,ioutsurfacemax]
# ================
# Set Surge Data
# ================
data = rundata.surge_data
# Source term controls
data.wind_forcing = True
data.drag_law = 1
data.pressure_forcing = True
data.display_landfall_time = True
# AMR parameters, m/s and m respectively
data.wind_refine = [20.0, 40.0, 60.0]
data.R_refine = [60.0e3, 40e3, 20e3]
# Storm parameters - Parameterized storm (Holland 1980)
data.storm_specification_type = 'holland80' # (type 1)
data.storm_file = os.path.expandvars(os.path.join(os.getcwd(),
'irene.storm'))
# Convert ATCF data to GeoClaw format
clawutil.data.get_remote_file(
"http://ftp.nhc.noaa.gov/atcf/archive/2011/bal092011.dat.gz")
atcf_path = os.path.join(DATA, "bal092011.dat")
# Note that the get_remote_file function does not support gzip files which
# are not also tar files. The following code handles this
with gzip.open(".".join((atcf_path, 'gz')), 'rb') as atcf_file, \
open(atcf_path, 'w') as atcf_unzipped_file:
atcf_unzipped_file.write(atcf_file.read().decode('ascii'))
irene = Storm(path=atcf_path, file_format="ATCF")
# Calculate landfall time - Need to specify as the file above does not
# include this info (9/13/2008 ~ 7 UTC)
irene.time_offset = datetime.datetime(2011, 8, 27, 12)
irene.write(data.storm_file, file_format='geoclaw')
# =======================
# Set Variable Friction
# =======================
data = rundata.friction_data
# Variable friction
data.variable_friction = True
# Region based friction
# Entire domain
data.friction_regions.append([rundata.clawdata.lower,
rundata.clawdata.upper,
[np.infty, 0.0, -np.infty],
[0.030, 0.022]])
# La-Tex Shelf
data.friction_regions.append([(-98, 25.25), (-90, 30),
[np.infty, -10.0, -200.0, -np.infty],
[0.030, 0.012, 0.022]])
return rundata
# end of function setgeo
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
if len(sys.argv) == 2:
rundata = setrun(sys.argv[1])
else:
rundata = setrun()
rundata.write()
| mandli/surge-examples | irene/setrun.py | Python | mit | 16,326 |
from muntjac.api import Application, Button, GridLayout, Label, Window
from muntjac.ui.button import IClickListener
class Calc(Application, IClickListener):
"""A simple calculator using Muntjac."""
def __init__(self):
super(Calc, self).__init__()
# All variables are automatically stored in the session.
self._current = 0.0
self._stored = 0.0
self._lastOperationRequested = 'C'
# User interface components
self._display = Label('0.0')
def init(self):
# Application.init is called once for each application. Here it
# creates the UI and connects it to the business logic.
# Create the main layout for our application (4 columns, 5 rows)
layout = GridLayout(4, 5)
# Create the main window for the application using the main layout.
# The main window is shown when the application is starts.
self.setMainWindow(Window('Calculator Application', layout))
# Create a result label that over all 4 columns in the first row
layout.addComponent(self._display, 0, 0, 3, 0)
# The operations for the calculator in the order they appear on the
# screen (left to right, top to bottom)
operations = ['7', '8', '9', '/', '4', '5', '6',
'*', '1', '2', '3', '-', '0', '=', 'C', '+']
for caption in operations:
# Create a button and use this application for event handling
button = Button(caption)
button.addListener(self)
# Add the button to our main layout
layout.addComponent(button)
def buttonClick(self, event):
# Event handler for button clicks. Called for all the buttons in
# the application.
# Get the button that was clicked
button = event.getButton()
# Get the requested operation from the button caption
requestedOperation = button.getCaption()[0]
# Calculate the new value
newValue = self.calculate(requestedOperation)
# Update the result label with the new value
self._display.setValue(newValue)
def calculate(self, requestedOperation):
# Calculator "business logic" implemented here to keep the example
# minimal
if '0' <= requestedOperation and requestedOperation <= '9':
self._current = ((self._current * 10) +
float('' + requestedOperation))
return self._current
last = self._lastOperationRequested
if last == '+':
self._stored += self._current
elif last == '-':
self._stored -= self._current
elif last == '/':
try:
self._stored /= self._current
except ZeroDivisionError:
pass
elif last == '*':
self._stored *= self._current
elif last == 'C':
self._stored = self._current
self._lastOperationRequested = requestedOperation
self._current = 0.0
if requestedOperation == 'C':
self._stored = 0.0
return self._stored
if __name__ == '__main__':
from muntjac.main import muntjac
muntjac(Calc, nogui=True, forever=True, debug=True)
| rwl/muntjac | muntjac/demo/Calc.py | Python | apache-2.0 | 3,259 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Invoke tasks. To run a task, run ``$ invoke <COMMAND>``. To see a list of
commands, run ``$ invoke --list``.
"""
import os
import sys
import code
import json
import platform
import subprocess
import logging
from time import sleep
import invoke
from invoke import Collection
from website import settings
from utils import pip_install, bin_prefix
logging.getLogger('invoke').setLevel(logging.CRITICAL)
# gets the root path for all the scripts that rely on it
HERE = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
CONSTRAINTS_PATH = os.path.join(HERE, 'requirements', 'constraints.txt')
try:
__import__('rednose')
except ImportError:
TEST_CMD = 'nosetests'
else:
TEST_CMD = 'nosetests --rednose'
ns = Collection()
try:
from admin import tasks as admin_tasks
ns.add_collection(Collection.from_module(admin_tasks), name='admin')
except ImportError:
pass
def task(*args, **kwargs):
"""Behaves the same way as invoke.task. Adds the task
to the root namespace.
"""
if len(args) == 1 and callable(args[0]):
new_task = invoke.task(args[0])
ns.add_task(new_task)
return new_task
def decorator(f):
new_task = invoke.task(f, *args, **kwargs)
ns.add_task(new_task)
return new_task
return decorator
@task
def server(ctx, host=None, port=5000, debug=True, live=False, gitlogs=False):
"""Run the app server."""
if gitlogs:
git_logs(ctx)
from website.app import init_app
os.environ['DJANGO_SETTINGS_MODULE'] = 'api.base.settings'
app = init_app(set_backends=True, routes=True)
settings.API_SERVER_PORT = port
if live:
from livereload import Server
server = Server(app.wsgi_app)
server.watch(os.path.join(HERE, 'website', 'static', 'public'))
server.serve(port=port)
else:
if settings.SECURE_MODE:
context = (settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY)
else:
context = None
app.run(host=host, port=port, debug=debug, threaded=debug, extra_files=[settings.ASSET_HASH_PATH], ssl_context=context)
@task
def git_logs(ctx, branch=None):
from scripts.meta import gatherer
gatherer.main(branch=branch)
@task
def apiserver(ctx, port=8000, wait=True, host='127.0.0.1'):
"""Run the API server."""
env = os.environ.copy()
cmd = 'DJANGO_SETTINGS_MODULE=api.base.settings {} manage.py runserver {}:{} --nothreading'\
.format(sys.executable, host, port)
if settings.SECURE_MODE:
cmd = cmd.replace('runserver', 'runsslserver')
cmd += ' --certificate {} --key {}'.format(settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY)
if wait:
return ctx.run(cmd, echo=True, pty=True)
from subprocess import Popen
return Popen(cmd, shell=True, env=env)
@task
def adminserver(ctx, port=8001, host='127.0.0.1'):
"""Run the Admin server."""
env = 'DJANGO_SETTINGS_MODULE="admin.base.settings"'
cmd = '{} python manage.py runserver {}:{} --nothreading'.format(env, host, port)
if settings.SECURE_MODE:
cmd = cmd.replace('runserver', 'runsslserver')
cmd += ' --certificate {} --key {}'.format(settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY)
ctx.run(cmd, echo=True, pty=True)
SHELL_BANNER = """
{version}
+--------------------------------------------------+
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
|ccccccccccccccccccccccOOOOOOOccccccccccccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccOOOOOOOOOOOOccccccccccccccccccc|
|cccccccccOOOOOOOcccOOOOOOOOOOOOcccOOOOOOOccccccccc|
|cccccccOOOOOOOOOOccOOOOOsssOOOOcOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOccOOssssssOOccOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOsOcOssssssOOOOOOOOOOOOOOOccccccc|
|cccccccOOOOOOOOOOOssccOOOOOOcOssOOOOOOOOOOcccccccc|
|cccccccccOOOOOOOsssOccccccccccOssOOOOOOOcccccccccc|
|cccccOOOccccOOssssOccccccccccccOssssOccccOOOcccccc|
|ccOOOOOOOOOOOOOccccccccccccccccccccOOOOOOOOOOOOccc|
|cOOOOOOOOssssssOcccccccccccccccccOOssssssOOOOOOOOc|
|cOOOOOOOssssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOsssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOOssssOOccccccccccccccccccOsssssOOOOOOOOcc|
|cccOOOOOOOOOOOOOOOccccccccccccccOOOOOOOOOOOOOOOccc|
|ccccccccccccOOssssOOccccccccccOssssOOOcccccccccccc|
|ccccccccOOOOOOOOOssOccccOOcccOsssOOOOOOOOccccccccc|
|cccccccOOOOOOOOOOOsOcOOssssOcOssOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOOOsssssssOcOOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOcccOssssssOcccOOOOOOOOOOOccccccc|
|ccccccccOOOOOOOOOcccOOOOOOOOOOcccOOOOOOOOOcccccccc|
|ccccccccccOOOOcccccOOOOOOOOOOOcccccOOOOccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccccOOOOccccccccccccccccccccccc|
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
+--------------------------------------------------+
Welcome to the OSF Python Shell. Happy hacking!
{transaction}
Available variables:
{context}
"""
TRANSACTION_WARNING = """
*** TRANSACTION AUTOMATICALLY STARTED ***
To persist changes run 'commit()'.
Keep in mind that changing documents will lock them.
This feature can be disabled with the '--no-transaction' flag.
"""
def make_shell_context(auto_transact=True):
from modularodm import Q
from framework.auth import User, Auth
from framework.mongo import database
from website.app import init_app
from website.project.model import Node
from website import models # all models
from website import settings
import requests
from framework.transactions import commands
from framework.transactions import context as tcontext
app = init_app()
def commit():
commands.commit()
print('Transaction committed.')
if auto_transact:
commands.begin()
print('New transaction opened.')
def rollback():
commands.rollback()
print('Transaction rolled back.')
if auto_transact:
commands.begin()
print('New transaction opened.')
context = {
'transaction': tcontext.TokuTransaction,
'start_transaction': commands.begin,
'commit': commit,
'rollback': rollback,
'app': app,
'db': database,
'User': User,
'Auth': Auth,
'Node': Node,
'Q': Q,
'models': models,
'run_tests': test,
'rget': requests.get,
'rpost': requests.post,
'rdelete': requests.delete,
'rput': requests.put,
'settings': settings,
}
try: # Add a fake factory for generating fake names, emails, etc.
from faker import Factory
fake = Factory.create()
context['fake'] = fake
except ImportError:
pass
if auto_transact:
commands.begin()
return context
def format_context(context):
lines = []
for name, obj in context.items():
line = '{name}: {obj!r}'.format(**locals())
lines.append(line)
return '\n'.join(lines)
# Shell command adapted from Flask-Script. See NOTICE for license info.
@task
def shell(ctx, transaction=True):
context = make_shell_context(auto_transact=transaction)
banner = SHELL_BANNER.format(version=sys.version,
context=format_context(context),
transaction=TRANSACTION_WARNING if transaction else ''
)
try:
try:
# 0.10.x
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(banner=banner)
ipshell(global_ns={}, local_ns=context)
except ImportError:
# 0.12+
from IPython import embed
embed(banner1=banner, user_ns=context)
return
except ImportError:
pass
# fallback to basic python shell
code.interact(banner, local=context)
return
@task(aliases=['mongo'])
def mongoserver(ctx, daemon=False, config=None):
"""Run the mongod process.
"""
if not config:
platform_configs = {
'darwin': '/usr/local/etc/tokumx.conf', # default for homebrew install
'linux': '/etc/tokumx.conf',
}
platform = str(sys.platform).lower()
config = platform_configs.get(platform)
port = settings.DB_PORT
cmd = 'mongod --port {0}'.format(port)
if config:
cmd += ' --config {0}'.format(config)
if daemon:
cmd += ' --fork'
ctx.run(cmd, echo=True)
@task(aliases=['mongoshell'])
def mongoclient(ctx):
"""Run the mongo shell for the OSF database."""
db = settings.DB_NAME
port = settings.DB_PORT
ctx.run('mongo {db} --port {port}'.format(db=db, port=port), pty=True)
@task
def mongodump(ctx, path):
"""Back up the contents of the running OSF database"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = 'mongodump --db {db} --port {port} --out {path}'.format(
db=db,
port=port,
path=path,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
ctx.run(cmd, echo=True)
print()
print('To restore from the dumped database, run `invoke mongorestore {0}`'.format(
os.path.join(path, settings.DB_NAME)))
@task
def mongorestore(ctx, path, drop=False):
"""Restores the running OSF database with the contents of the database at
the location given its argument.
By default, the contents of the specified database are added to
the existing database. The `--drop` option will cause the existing database
to be dropped.
A caveat: if you `invoke mongodump {path}`, you must restore with
`invoke mongorestore {path}/{settings.DB_NAME}, as that's where the
database dump will be stored.
"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = 'mongorestore --db {db} --port {port}'.format(
db=db,
port=port,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
if drop:
cmd += ' --drop'
cmd += ' ' + path
ctx.run(cmd, echo=True)
@task
def sharejs(ctx, host=None, port=None, db_url=None, cors_allow_origin=None):
"""Start a local ShareJS server."""
if host:
os.environ['SHAREJS_SERVER_HOST'] = host
if port:
os.environ['SHAREJS_SERVER_PORT'] = port
if db_url:
os.environ['SHAREJS_DB_URL'] = db_url
if cors_allow_origin:
os.environ['SHAREJS_CORS_ALLOW_ORIGIN'] = cors_allow_origin
if settings.SENTRY_DSN:
os.environ['SHAREJS_SENTRY_DSN'] = settings.SENTRY_DSN
share_server = os.path.join(settings.ADDON_PATH, 'wiki', 'shareServer.js')
ctx.run('node {0}'.format(share_server))
@task(aliases=['celery'])
def celery_worker(ctx, level='debug', hostname=None, beat=False):
"""Run the Celery process."""
cmd = 'celery worker -A framework.celery_tasks -l {0}'.format(level)
if hostname:
cmd = cmd + ' --hostname={}'.format(hostname)
# beat sets up a cron like scheduler, refer to website/settings
if beat:
cmd = cmd + ' --beat'
ctx.run(bin_prefix(cmd), pty=True)
@task(aliases=['beat'])
def celery_beat(ctx, level='debug', schedule=None):
"""Run the Celery process."""
# beat sets up a cron like scheduler, refer to website/settings
cmd = 'celery beat -A framework.celery_tasks -l {0} --pidfile='.format(level)
if schedule:
cmd = cmd + ' --schedule={}'.format(schedule)
ctx.run(bin_prefix(cmd), pty=True)
@task
def rabbitmq(ctx):
"""Start a local rabbitmq server.
NOTE: this is for development only. The production environment should start
the server as a daemon.
"""
ctx.run('rabbitmq-server', pty=True)
@task(aliases=['elastic'])
def elasticsearch(ctx):
"""Start a local elasticsearch server
NOTE: Requires that elasticsearch is installed. See README for instructions
"""
import platform
if platform.linux_distribution()[0] == 'Ubuntu':
ctx.run('sudo service elasticsearch start')
elif platform.system() == 'Darwin': # Mac OSX
ctx.run('elasticsearch')
else:
print('Your system is not recognized, you will have to start elasticsearch manually')
@task
def migrate_search(ctx, delete=False, index=settings.ELASTIC_INDEX):
"""Migrate the search-enabled models."""
from website.search_migration.migrate import migrate
migrate(delete, index=index)
@task
def rebuild_search(ctx):
"""Delete and recreate the index for elasticsearch"""
ctx.run('curl -s -XDELETE {uri}/{index}*'.format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
ctx.run('curl -s -XPUT {uri}/{index}'.format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
migrate_search(ctx)
@task
def mailserver(ctx, port=1025):
"""Run a SMTP test server."""
cmd = 'python -m smtpd -n -c DebuggingServer localhost:{port}'.format(port=port)
ctx.run(bin_prefix(cmd), pty=True)
@task
def jshint(ctx):
"""Run JSHint syntax check"""
js_folder = os.path.join(HERE, 'website', 'static', 'js')
cmd = 'jshint {}'.format(js_folder)
ctx.run(cmd, echo=True)
@task(aliases=['flake8'])
def flake(ctx):
ctx.run('flake8 .', echo=True)
@task(aliases=['req'])
def requirements(ctx, base=False, addons=False, release=False, dev=False, metrics=False, quick=False):
"""Install python dependencies.
Examples:
inv requirements
inv requirements --quick
Quick requirements are, in order, addons, dev and the base requirements. You should be able to use --quick for
day to day development.
By default, base requirements will run. However, if any set of addons, release, dev, or metrics are chosen, base
will have to be mentioned explicitly in order to run. This is to remain compatible with previous usages. Release
requirements will prevent dev, metrics, and base from running.
"""
if quick:
base = True
addons = True
dev = True
if not(addons or dev or metrics):
base = True
if release or addons:
addon_requirements(ctx)
# "release" takes precedence
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
ctx.run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
else:
if dev: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
ctx.run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
if metrics: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'metrics.txt')
ctx.run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
if base: # then base requirements
req_file = os.path.join(HERE, 'requirements.txt')
ctx.run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
@task
def test_module(ctx, module=None, verbosity=2):
"""Helper for running tests.
"""
# Allow selecting specific submodule
module_fmt = ' '.join(module) if isinstance(module, list) else module
args = ' --verbosity={0} -s {1}'.format(verbosity, module_fmt)
# Use pty so the process buffers "correctly"
ctx.run(bin_prefix(TEST_CMD) + args, pty=True)
@task
def test_osf(ctx):
"""Run the OSF test suite."""
test_module(ctx, module='tests/')
@task
def test_api(ctx):
"""Run the API test suite."""
test_module(ctx, module='api_tests/')
@task
def test_admin(ctx):
"""Run the Admin test suite."""
# test_module(ctx, module="admin_tests/")
module = 'admin_tests/'
module_fmt = ' '.join(module) if isinstance(module, list) else module
admin_tasks.manage(ctx, 'test {}'.format(module_fmt))
@task
def test_varnish(ctx):
"""Run the Varnish test suite."""
proc = apiserver(ctx, wait=False)
sleep(5)
test_module(ctx, module='api/caching/tests/test_caching.py')
proc.kill()
@task
def test_addons(ctx):
"""Run all the tests in the addons directory.
"""
modules = []
for addon in settings.ADDONS_REQUESTED:
module = os.path.join(settings.BASE_PATH, 'addons', addon)
modules.append(module)
test_module(ctx, module=modules)
@task
def test(ctx, all=False, syntax=False):
"""
Run unit tests: OSF (always), plus addons and syntax checks (optional)
"""
if syntax:
flake(ctx)
jshint(ctx)
test_osf(ctx)
test_api(ctx)
test_admin(ctx)
if all:
test_addons(ctx)
karma(ctx, single=True, browsers='PhantomJS')
@task
def test_travis_osf(ctx):
"""
Run half of the tests to help travis go faster
"""
flake(ctx)
jshint(ctx)
test_osf(ctx)
@task
def test_travis_else(ctx):
"""
Run other half of the tests to help travis go faster
"""
test_addons(ctx)
test_api(ctx)
test_admin(ctx)
karma(ctx, single=True, browsers='PhantomJS')
@task
def test_travis_varnish(ctx):
test_varnish(ctx)
@task
def karma(ctx, single=False, sauce=False, browsers=None):
"""Run JS tests with Karma. Requires PhantomJS to be installed."""
karma_bin = os.path.join(
HERE, 'node_modules', 'karma', 'bin', 'karma'
)
cmd = '{} start'.format(karma_bin)
if sauce:
cmd += ' karma.saucelabs.conf.js'
if single:
cmd += ' --single-run'
# Use browsers if specified on the command-line, otherwise default
# what's specified in karma.conf.js
if browsers:
cmd += ' --browsers {}'.format(browsers)
ctx.run(cmd, echo=True)
@task
def wheelhouse(ctx, addons=False, release=False, dev=False, metrics=False):
"""Build wheels for python dependencies.
Examples:
inv wheelhouse --dev
inv wheelhouse --addons
inv wheelhouse --release
inv wheelhouse --metrics
"""
if release or addons:
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
req_file = os.path.join(path, 'requirements.txt')
if os.path.exists(req_file):
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
ctx.run(cmd, pty=True)
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev:
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
elif metrics:
req_file = os.path.join(HERE, 'requirements', 'metrics.txt')
else:
req_file = os.path.join(HERE, 'requirements.txt')
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
ctx.run(cmd, pty=True)
@task
def addon_requirements(ctx):
"""Install all addon requirements."""
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
requirements_file = os.path.join(path, 'requirements.txt')
if os.path.isdir(path) and os.path.isfile(requirements_file):
print('Installing requirements for {0}'.format(directory))
ctx.run(
pip_install(requirements_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
print('Finished installing addon requirements')
@task
def encryption(ctx, owner=None):
"""Generate GnuPG key.
For local development:
> invoke encryption
On Linode:
> sudo env/bin/invoke encryption --owner www-data
"""
if not settings.USE_GNUPG:
print('GnuPG is not enabled. No GnuPG key will be generated.')
return
import gnupg
gpg = gnupg.GPG(gnupghome=settings.GNUPG_HOME, gpgbinary=settings.GNUPG_BINARY)
keys = gpg.list_keys()
if keys:
print('Existing GnuPG key found')
return
print('Generating GnuPG key')
input_data = gpg.gen_key_input(name_real='OSF Generated Key')
gpg.gen_key(input_data)
if owner:
ctx.run('sudo chown -R {0} {1}'.format(owner, settings.GNUPG_HOME))
@task
def travis_addon_settings(ctx):
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path):
try:
open(os.path.join(path, 'local-travis.py'))
ctx.run('cp {path}/local-travis.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_addon_settings(ctx):
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path) and not os.path.isfile(os.path.join(path, 'local.py')):
try:
open(os.path.join(path, 'local-dist.py'))
ctx.run('cp {path}/local-dist.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_settings(ctx, addons=False):
# Website settings
if not os.path.isfile('website/settings/local.py'):
print('Creating local.py file')
ctx.run('cp website/settings/local-dist.py website/settings/local.py')
# Addon settings
if addons:
copy_addon_settings(ctx)
@task
def packages(ctx):
brew_commands = [
'update',
'upgrade',
'install libxml2',
'install libxslt',
'install elasticsearch',
'install gpg',
'install node',
'tap tokutek/tokumx',
'install tokumx-bin',
]
if platform.system() == 'Darwin':
print('Running brew commands')
for item in brew_commands:
command = 'brew {cmd}'.format(cmd=item)
ctx.run(command)
elif platform.system() == 'Linux':
# TODO: Write a script similar to brew bundle for Ubuntu
# e.g., run('sudo apt-get install [list of packages]')
pass
@task(aliases=['bower'])
def bower_install(ctx):
print('Installing bower-managed packages')
bower_bin = os.path.join(HERE, 'node_modules', 'bower', 'bin', 'bower')
ctx.run('{} prune'.format(bower_bin), echo=True)
ctx.run('{} install'.format(bower_bin), echo=True)
@task
def setup(ctx):
"""Creates local settings, installs requirements, and generates encryption key"""
copy_settings(ctx, addons=True)
packages(ctx)
requirements(ctx, addons=True, dev=True)
encryption(ctx)
# Build nodeCategories.json before building assets
build_js_config_files(ctx)
assets(ctx, dev=True, watch=False)
@task
def clear_sessions(ctx, months=1, dry_run=False):
from website.app import init_app
init_app(routes=False, set_backends=True)
from scripts import clear_sessions
clear_sessions.clear_sessions_relative(months=months, dry_run=dry_run)
# Release tasks
@task
def hotfix(ctx, name, finish=False, push=False):
"""Rename hotfix branch to hotfix/<next-patch-version> and optionally
finish hotfix.
"""
print('Checking out master to calculate curent version')
ctx.run('git checkout master')
latest_version = latest_tag_info()['current_version']
print('Current version is: {}'.format(latest_version))
major, minor, patch = latest_version.split('.')
next_patch_version = '.'.join([major, minor, str(int(patch) + 1)])
print('Bumping to next patch version: {}'.format(next_patch_version))
print('Renaming branch...')
new_branch_name = 'hotfix/{}'.format(next_patch_version)
ctx.run('git checkout {}'.format(name), echo=True)
ctx.run('git branch -m {}'.format(new_branch_name), echo=True)
if finish:
ctx.run('git flow hotfix finish {}'.format(next_patch_version), echo=True, pty=True)
if push:
ctx.run('git push origin master', echo=True)
ctx.run('git push --tags', echo=True)
ctx.run('git push origin develop', echo=True)
@task
def feature(ctx, name, finish=False, push=False):
"""Rename the current branch to a feature branch and optionally finish it."""
print('Renaming branch...')
ctx.run('git branch -m feature/{}'.format(name), echo=True)
if finish:
ctx.run('git flow feature finish {}'.format(name), echo=True)
if push:
ctx.run('git push origin develop', echo=True)
# Adapted from bumpversion
def latest_tag_info():
try:
# git-describe doesn't update the git-index, so we do that
# subprocess.check_output(["git", "update-index", "--refresh"])
# get info about the latest tag in git
describe_out = subprocess.check_output([
'git',
'describe',
'--dirty',
'--tags',
'--long',
'--abbrev=40'
], stderr=subprocess.STDOUT
).decode().split('-')
except subprocess.CalledProcessError as err:
raise err
# logger.warn("Error when running git describe")
return {}
info = {}
if describe_out[-1].strip() == 'dirty':
info['dirty'] = True
describe_out.pop()
info['commit_sha'] = describe_out.pop().lstrip('g')
info['distance_to_latest_tag'] = int(describe_out.pop())
info['current_version'] = describe_out.pop().lstrip('v')
# assert type(info["current_version"]) == str
assert 0 == len(describe_out)
return info
# Tasks for generating and bundling SSL certificates
# See http://cosdev.readthedocs.org/en/latest/osf/ops.html for details
@task
def generate_key(ctx, domain, bits=2048):
cmd = 'openssl genrsa -des3 -out {0}.key {1}'.format(domain, bits)
ctx.run(cmd)
@task
def generate_key_nopass(ctx, domain):
cmd = 'openssl rsa -in {domain}.key -out {domain}.key.nopass'.format(
domain=domain
)
ctx.run(cmd)
@task
def generate_csr(ctx, domain):
cmd = 'openssl req -new -key {domain}.key.nopass -out {domain}.csr'.format(
domain=domain
)
ctx.run(cmd)
@task
def request_ssl_cert(ctx, domain):
"""Generate a key, a key with password removed, and a signing request for
the specified domain.
Usage:
> invoke request_ssl_cert pizza.osf.io
"""
generate_key(ctx, domain)
generate_key_nopass(ctx, domain)
generate_csr(ctx, domain)
@task
def bundle_certs(ctx, domain, cert_path):
"""Concatenate certificates from NameCheap in the correct order. Certificate
files must be in the same directory.
"""
cert_files = [
'{0}.crt'.format(domain),
'COMODORSADomainValidationSecureServerCA.crt',
'COMODORSAAddTrustCA.crt',
'AddTrustExternalCARoot.crt',
]
certs = ' '.join(
os.path.join(cert_path, cert_file)
for cert_file in cert_files
)
cmd = 'cat {certs} > {domain}.bundle.crt'.format(
certs=certs,
domain=domain,
)
ctx.run(cmd)
@task
def clean_assets(ctx):
"""Remove built JS files."""
public_path = os.path.join(HERE, 'website', 'static', 'public')
js_path = os.path.join(public_path, 'js')
ctx.run('rm -rf {0}'.format(js_path), echo=True)
@task(aliases=['pack'])
def webpack(ctx, clean=False, watch=False, dev=False, colors=False):
"""Build static assets with webpack."""
if clean:
clean_assets(ctx)
webpack_bin = os.path.join(HERE, 'node_modules', 'webpack', 'bin', 'webpack.js')
args = [webpack_bin]
args += ['--progress']
if watch:
args += ['--watch']
if colors:
args += ['--colors']
config_file = 'webpack.dev.config.js' if dev else 'webpack.prod.config.js'
args += ['--config {0}'.format(config_file)]
command = ' '.join(args)
ctx.run(command, echo=True)
@task()
def build_js_config_files(ctx):
from website import settings
print('Building JS config files...')
with open(os.path.join(settings.STATIC_FOLDER, 'built', 'nodeCategories.json'), 'wb') as fp:
json.dump(settings.NODE_CATEGORY_MAP, fp)
print('...Done.')
@task()
def assets(ctx, dev=False, watch=False, colors=False):
"""Install and build static assets."""
npm = 'npm install'
if not dev:
npm += ' --production'
ctx.run(npm, echo=True)
bower_install(ctx)
build_js_config_files(ctx)
# Always set clean=False to prevent possible mistakes
# on prod
webpack(ctx, clean=False, watch=watch, dev=dev, colors=colors)
@task
def generate_self_signed(ctx, domain):
"""Generate self-signed SSL key and certificate.
"""
cmd = (
'openssl req -x509 -nodes -days 365 -newkey rsa:2048'
' -keyout {0}.key -out {0}.crt'
).format(domain)
ctx.run(cmd)
@task
def update_citation_styles(ctx):
from scripts import parse_citation_styles
total = parse_citation_styles.main()
print('Parsed {} styles'.format(total))
@task
def clean(ctx, verbose=False):
ctx.run('find . -name "*.pyc" -delete', echo=True)
@task(default=True)
def usage(ctx):
ctx.run('invoke --list')
### Maintenance Tasks ###
@task
def set_maintenance(ctx, start=None, end=None):
from website.maintenance import set_maintenance, get_maintenance
"""Set the time period for the maintenance notice to be displayed.
If no start or end values are displayed, default to starting now
and ending 24 hours from now. If no timezone info is passed along,
everything will be converted to UTC.
If a given end time results in a start that is after the end, start
will be changed to be 24 hours before the end time.
Examples:
invoke set_maintenance_state
invoke set_maintenance_state --start 2016-03-16T15:41:00-04:00
invoke set_maintenance_state --end 2016-03-16T15:41:00-04:00
"""
set_maintenance(start, end)
state = get_maintenance()
print('Maintenance notice up for {} to {}.'.format(state['start'], state['end']))
@task
def unset_maintenance(ctx):
from website.maintenance import unset_maintenance
print('Taking down maintenance notice...')
unset_maintenance()
print('...Done.')
| DanielSBrown/osf.io | tasks/__init__.py | Python | apache-2.0 | 30,673 |
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
# This code doesn't do much but makes sure the native extension is loaded
# which is what we are testing here.
channel = grpc.insecure_channel('localhost:1000')
del channel
print 'Success!'
| endlessm/chromium-browser | third_party/grpc/src/test/distrib/python/distribtest.py | Python | bsd-3-clause | 781 |
"""Simple sample showing basic usage pattern"""
import time
from dispatch import Signal
request_started = Signal(providing_args=["remote_addr"])
request_ended = Signal(providing_args=["time_start", "time_end"])
class Request(object):
def __init__(self, remote_addr):
self.remote_addr = remote_addr
self.time_start = time.time()
def enter(self):
request_started.send(sender=self, remote_addr=self.remote_addr)
def leave(self):
time_end = time.time()
request_ended.send(sender=self, time_start=self.time_start,
time_end=time_end)
def mycallback(signal, sender, **kwargs):
print("Received signal: %s" % repr(kwargs))
if __name__ == "__main__":
request_started.connect(mycallback)
request_ended.connect(mycallback)
request = Request("localhost")
request.enter()
request.leave()
| ask/dispatch | examples/simple_sample.py | Python | bsd-3-clause | 904 |
#!/usr/bin/env python3
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=broad-except
"""Check by which hostblock list a host was blocked."""
import sys
import io
import os
import os.path
import configparser
import urllib.request
from PyQt5.QtCore import QStandardPaths
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
from qutebrowser.browser import adblock
def main():
"""Check by which hostblock list a host was blocked."""
if len(sys.argv) != 2:
print("Usage: {} <host>".format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
confdir = QStandardPaths.writableLocation(QStandardPaths.ConfigLocation)
confdir = confdir.replace('/', os.sep)
if confdir.split(os.sep)[-1] != 'qutebrowser':
confdir = os.path.join(confdir, 'qutebrowser')
confpath = os.path.join(confdir, 'qutebrowser.conf')
parser = configparser.ConfigParser()
print("config path: {}".format(confpath))
successful = parser.read(confpath, encoding='utf-8')
if not successful:
raise OSError("configparser did not read files successfully!")
lists = parser['content']['host-block-lists']
for url in lists.split(','):
print("checking {}...".format(url))
raw_file = urllib.request.urlopen(url)
byte_io = io.BytesIO(raw_file.read())
f = adblock.get_fileobj(byte_io)
for line in f:
if sys.argv[1] in line:
print("FOUND {} in {}:".format(sys.argv[1], url))
print(" " + line.rstrip())
if __name__ == '__main__':
main()
| mnick/qutebrowser | scripts/hostblock_blame.py | Python | gpl-3.0 | 2,337 |
"""
Copyright 2007 Free Software Foundation, Inc.
This file is part of GNU Radio
SPDX-License-Identifier: GPL-2.0-or-later
"""
from __future__ import absolute_import
from . import Actions
from .Constants import STATE_CACHE_SIZE
class StateCache(object):
"""
The state cache is an interface to a list to record data/states and to revert to previous states.
States are recorded into the list in a circular fassion by using an index for the current state,
and counters for the range where states are stored.
"""
def __init__(self, initial_state):
"""
StateCache constructor.
Args:
initial_state: the initial state (nested data)
"""
self.states = [None] * STATE_CACHE_SIZE # fill states
self.current_state_index = 0
self.num_prev_states = 0
self.num_next_states = 0
self.states[0] = initial_state
self.update_actions()
def save_new_state(self, state):
"""
Save a new state.
Place the new state at the next index and add one to the number of previous states.
Args:
state: the new state
"""
self.current_state_index = (self.current_state_index + 1) % STATE_CACHE_SIZE
self.states[self.current_state_index] = state
self.num_prev_states = self.num_prev_states + 1
if self.num_prev_states == STATE_CACHE_SIZE:
self.num_prev_states = STATE_CACHE_SIZE - 1
self.num_next_states = 0
self.update_actions()
def get_current_state(self):
"""
Get the state at the current index.
Returns:
the current state (nested data)
"""
self.update_actions()
return self.states[self.current_state_index]
def get_prev_state(self):
"""
Get the previous state and decrement the current index.
Returns:
the previous state or None
"""
if self.num_prev_states > 0:
self.current_state_index = (
self.current_state_index + STATE_CACHE_SIZE - 1
) % STATE_CACHE_SIZE
self.num_next_states = self.num_next_states + 1
self.num_prev_states = self.num_prev_states - 1
return self.get_current_state()
return None
def get_next_state(self):
"""
Get the nest state and increment the current index.
Returns:
the next state or None
"""
if self.num_next_states > 0:
self.current_state_index = (self.current_state_index + 1) % STATE_CACHE_SIZE
self.num_next_states = self.num_next_states - 1
self.num_prev_states = self.num_prev_states + 1
return self.get_current_state()
return None
def update_actions(self):
"""
Update the undo and redo actions based on the number of next and prev states.
"""
Actions.FLOW_GRAPH_REDO.set_enabled(self.num_next_states != 0)
Actions.FLOW_GRAPH_UNDO.set_enabled(self.num_prev_states != 0)
| skoslowski/gnuradio | grc/gui/StateCache.py | Python | gpl-3.0 | 3,077 |
"""
From http://flask.pocoo.org/snippets/35/
"""
class ReverseProxied:
"""Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
:param app: the WSGI application
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get("HTTP_X_SCRIPT_NAME", "")
if script_name:
environ["SCRIPT_NAME"] = script_name
path_info = environ["PATH_INFO"]
if path_info.startswith(script_name):
environ["PATH_INFO"] = path_info[len(script_name) :]
scheme = environ.get("HTTP_X_SCHEME", "")
if scheme:
environ["wsgi.url_scheme"] = scheme
return self.app(environ, start_response)
| tiggerntatie/brython-server | brythonserver/reverseproxied.py | Python | mit | 1,213 |
from __future__ import print_function
import sys
import argparse
from argparse import RawTextHelpFormatter
from bashlex import parser, ast
class nodevisitor(ast.nodevisitor):
def __init__(self, positions):
self.positions = positions
def visitcommandsubstitution(self, n, command):
# log the start and end positions of this command substitution
self.positions.append(n.pos)
# do not recurse into child nodes
return False
desc = '''replace all occurrences of $() and `` with the string given in -s
$ commandsubstitution-remover.py -s nope -c 'foo $(bar)'
foo nope
within words:
$ commandsubstitution-remover.py -c '"foo $(bar) baz"'
"foo XXX baz"
but not within single quotes, since they cancel special meaning:
$ commandsubstitution-remover.py -c "foo '"'$(bar)'"'"
foo '$(bar)'
(this a simple script to demonstrate how to traverse the ast produced
by bashlex)
'''
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description=desc,
formatter_class=RawTextHelpFormatter)
argparser.add_argument('-s', dest='replacement', metavar='S', default='XXX',
help='replace occurrences with S (default: XXX)')
group = argparser.add_mutually_exclusive_group()
group.add_argument('file', metavar='file', type=file, nargs='?',
help='file to parse')
group.add_argument('-c', dest='expression',
help='string to parse')
args = argparser.parse_args()
if args.expression:
s = args.expression
elif args.file:
s = args.file.read()
else:
s = sys.stdin.read()
trees = parser.parse(s)
positions = []
for tree in trees:
visitor = nodevisitor(positions)
visitor.visit(tree)
# do replacements from the end so the indicies will be correct
positions.reverse()
postprocessed = list(s)
for start, end in positions:
# replace the portion of the input where the substitution occurred
# with the replacement string
postprocessed[start:end] = args.replacement
print(''.join(postprocessed))
| idank/bashlex | examples/commandsubstitution-remover.py | Python | gpl-3.0 | 2,193 |
#!/usr/bin/env python
############################################################################
##
## Copyright (C) 2004-2005 Trolltech AS. All rights reserved.
##
## This file is part of the example classes of the Qt Toolkit.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following information to ensure GNU
## General Public Licensing requirements will be met:
## http://www.trolltech.com/products/qt/opensource.html
##
## If you are unsure which license is appropriate for your use, please
## review the following information:
## http://www.trolltech.com/products/qt/licensing.html or contact the
## sales department at sales@trolltech.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
# This is only needed for Python v2 but is harmless for Python v3.
from PySide import QtCore, QtGui, QtSql
import connection
class CustomSqlModel(QtSql.QSqlQueryModel):
def data(self, index, role):
value = super(CustomSqlModel, self).data(index, role)
if value is not None and role == QtCore.Qt.DisplayRole:
if index.column() == 0:
return '#%d' % value
elif index.column() == 2:
return value.upper()
if role == QtCore.Qt.TextColorRole and index.column() == 1:
return QtGui.QColor(QtCore.Qt.blue)
return value
class EditableSqlModel(QtSql.QSqlQueryModel):
def flags(self, index):
flags = super(EditableSqlModel, self).flags(index)
if index.column() in (1, 2):
flags |= QtCore.Qt.ItemIsEditable
return flags
def setData(self, index, value, role):
if index.column() not in (1, 2):
return False
primaryKeyIndex = self.index(index.row(), 0)
id = self.data(primaryKeyIndex)
self.clear()
if index.column() == 1:
ok = self.setFirstName(id, value)
else:
ok = self.setLastName(id, value)
self.refresh()
return ok
def refresh(self):
self.setQuery('select * from person')
self.setHeaderData(0, QtCore.Qt.Horizontal, "ID")
self.setHeaderData(1, QtCore.Qt.Horizontal, "First name")
self.setHeaderData(2, QtCore.Qt.Horizontal, "Last name")
def setFirstName(self, personId, firstName):
query = QtSql.QSqlQuery()
query.prepare('update person set firstname = ? where id = ?')
query.addBindValue(firstName)
query.addBindValue(personId)
return query.exec_()
def setLastName(self, personId, lastName):
query = QtSql.QSqlQuery()
query.prepare('update person set lastname = ? where id = ?')
query.addBindValue(lastName)
query.addBindValue(personId)
return query.exec_()
def initializeModel(model):
model.setQuery('select * from person')
model.setHeaderData(0, QtCore.Qt.Horizontal, "ID")
model.setHeaderData(1, QtCore.Qt.Horizontal, "First name")
model.setHeaderData(2, QtCore.Qt.Horizontal, "Last name")
offset = 0
views = []
def createView(title, model):
global offset, views
view = QtGui.QTableView()
views.append(view)
view.setModel(model)
view.setWindowTitle(title)
view.move(100 + offset, 100 + offset)
offset += 20
view.show()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
if not connection.createConnection():
sys.exit(1)
plainModel = QtSql.QSqlQueryModel()
editableModel = EditableSqlModel()
customModel = CustomSqlModel()
initializeModel(plainModel)
initializeModel(editableModel)
initializeModel(customModel)
createView("Plain Query Model", plainModel)
createView("Editable Query Model", editableModel)
createView("Custom Query Model", customModel)
sys.exit(app.exec_())
| Southpaw-TACTIC/Team | src/python/Lib/site-packages/PySide/examples/sql/querymodel.py | Python | epl-1.0 | 4,176 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
""" @file TP.py
Temporal pooler implementation.
This is the Python implementation and is used as the base class for the C++
implementation.
"""
import copy
import cPickle as pickle
import itertools
import numpy
from nupic.bindings.math import Random
from nupic.bindings.algorithms import getSegmentActivityLevel, isSegmentActive
from nupic.math import GetNTAReal
from nupic.research.TrivialPredictor import TrivialPredictor
from nupic.support.consoleprinter import ConsolePrinterMixin
# Default verbosity while running unit tests
VERBOSITY = 0
# The current TP version used to track the checkpoint state.
TP_VERSION = 1
# The numpy equivalent to the floating point type used by NTA
dtype = GetNTAReal()
class TP(ConsolePrinterMixin):
"""
Class implementing the temporal pooler algorithm as described in the
published Cortical Learning Algorithm documentation. The implementation here
attempts to closely match the pseudocode in the documentation. This
implementation does contain several additional bells and whistles such as
a column confidence measure.
@todo Document other constructor parameters.
@todo Have some higher level flags for fast learning, HiLo, Pooling, etc.
"""
def __init__(self,
numberOfCols=500,
cellsPerColumn=10,
initialPerm=0.11,
connectedPerm=0.50,
minThreshold=8,
newSynapseCount=15,
permanenceInc=0.10,
permanenceDec=0.10,
permanenceMax=1.0,
globalDecay=0.10,
activationThreshold=12,
doPooling=False,
segUpdateValidDuration=5,
burnIn=2,
collectStats=False,
seed=42,
verbosity=VERBOSITY,
checkSynapseConsistency=False, # for cpp only -- ignored
trivialPredictionMethods= '',
pamLength=1,
maxInfBacktrack=10,
maxLrnBacktrack=5,
maxAge=100000,
maxSeqLength=32,
maxSegmentsPerCell=-1,
maxSynapsesPerSegment=-1,
outputType='normal',
):
"""
Construct the TP
@param pamLength Number of time steps to remain in "Pay Attention Mode" after
we detect we've reached the end of a learned sequence. Setting
this to 0 disables PAM mode. When we are in PAM mode, we do
not burst unpredicted columns during learning, which in turn
prevents us from falling into a previously learned sequence
for a while (until we run through another 'pamLength' steps).
The advantge of PAM mode is that it requires fewer
presentations to learn a set of sequences which share
elements. The disadvantage of PAM mode is that if a learned
sequence is immediately followed by set set of elements that
should be learned as a 2nd sequence, the first pamLength
elements of that sequence will not be learned as part of that
2nd sequence.
@param maxAge Controls global decay. Global decay will only decay segments
that have not been activated for maxAge iterations, and will
only do the global decay loop every maxAge iterations. The
default (maxAge=1) reverts to the behavior where global decay
is applied every iteration to every segment. Using maxAge > 1
can significantly speed up the TP when global decay is used.
@param maxSeqLength If not 0, we will never learn more than maxSeqLength inputs
in a row without starting over at start cells. This sets an
upper bound on the length of learned sequences and thus is
another means (besides maxAge and globalDecay) by which to
limit how much the TP tries to learn.
@param maxSegmentsPerCell The maximum number of segments allowed on a cell. This
is used to turn on "fixed size CLA" mode. When in effect,
globalDecay is not applicable and must be set to 0 and
maxAge must be set to 0. When this is used (> 0),
maxSynapsesPerSegment must also be > 0.
@param maxSynapsesPerSegment The maximum number of synapses allowed in a segment.
This is used to turn on "fixed size CLA" mode. When in effect,
globalDecay is not applicable and must be set to 0 and maxAge
must be set to 0. When this is used (> 0), maxSegmentsPerCell
must also be > 0.
@param outputType Can be one of the following: 'normal', 'activeState',
'activeState1CellPerCol'.
'normal': output the OR of the active and predicted state.
'activeState': output only the active state.
'activeState1CellPerCol': output only the active state, and at
most 1 cell/column. If more than 1 cell is active in a column,
the one with the highest confidence is sent up.
Default is 'normal'.
@param trivialPredictionMethods List (as string) of trivial predictions to compute alongside
the full TP. See TrivialPredictor.py for a list of allowed
methods.
@param doPooling If True, pooling is enabled. False is the default.
@param burnIn Used for evaluating the prediction score. Default is 2.
@param collectStats If True, collect training / inference stats. Default is
False.
"""
## @todo document
self.version = TP_VERSION
ConsolePrinterMixin.__init__(self, verbosity)
# Check arguments
assert pamLength > 0, "This implementation must have pamLength > 0"
# Fixed size CLA mode?
if maxSegmentsPerCell != -1 or maxSynapsesPerSegment != -1:
assert (maxSegmentsPerCell > 0 and maxSynapsesPerSegment > 0)
assert (globalDecay == 0.0)
assert (maxAge == 0)
assert maxSynapsesPerSegment >= newSynapseCount, ("TP requires that "
"maxSynapsesPerSegment >= newSynapseCount. (Currently %s >= %s)" % (
maxSynapsesPerSegment, newSynapseCount))
# Seed random number generator
if seed >= 0:
self._random = Random(seed)
else:
self._random = Random(numpy.random.randint(256))
# Store creation parameters
## @todo document
self.numberOfCols = numberOfCols
## @todo document
self.cellsPerColumn = cellsPerColumn
self._numberOfCells = numberOfCols * cellsPerColumn
## @todo document
self.initialPerm = numpy.float32(initialPerm)
## @todo document
self.connectedPerm = numpy.float32(connectedPerm)
## @todo document
self.minThreshold = minThreshold
## @todo document
self.newSynapseCount = newSynapseCount
## @todo document
self.permanenceInc = numpy.float32(permanenceInc)
## @todo document
self.permanenceDec = numpy.float32(permanenceDec)
## @todo document
self.permanenceMax = numpy.float32(permanenceMax)
## @todo document
self.globalDecay = numpy.float32(globalDecay)
## @todo document
self.activationThreshold = activationThreshold
## Allows to turn off pooling
self.doPooling = doPooling
## @todo document
self.segUpdateValidDuration = segUpdateValidDuration
## Used for evaluating the prediction score
self.burnIn = burnIn
## If true, collect training/inference stats
self.collectStats = collectStats
## @todo document
self.seed = seed
## @todo document
self.verbosity = verbosity
## @todo document
self.pamLength = pamLength
## @todo document
self.maxAge = maxAge
## @todo document
self.maxInfBacktrack = maxInfBacktrack
## @todo document
self.maxLrnBacktrack = maxLrnBacktrack
## @todo document
self.maxSeqLength = maxSeqLength
## @todo document
self.maxSegmentsPerCell = maxSegmentsPerCell
## @todo document
self.maxSynapsesPerSegment = maxSynapsesPerSegment
assert outputType in ('normal', 'activeState', 'activeState1CellPerCol')
## @todo document
self.outputType = outputType
# No point having larger expiration if we are not doing pooling
if not doPooling:
self.segUpdateValidDuration = 1
# Create data structures
## @todo document
self.activeColumns = [] # list of indices of active columns
## Cells are indexed by column and index in the column
# Every self.cells[column][index] contains a list of segments
# Each segment is a structure of class Segment
self.cells = []
for c in xrange(self.numberOfCols):
self.cells.append([])
for _ in xrange(self.cellsPerColumn):
self.cells[c].append([])
## @todo document
self.lrnIterationIdx = 0
## @todo document
self.iterationIdx = 0
## unique segment id, so we can put segments in hashes
self.segID = 0
## @todo document
self.currentOutput = None # for checkPrediction
## pamCounter gets reset to pamLength whenever we detect that the learning
# state is making good predictions (at least half the columns predicted).
# Whenever we do not make a good prediction, we decrement pamCounter.
# When pamCounter reaches 0, we start the learn state over again at start
# cells.
self.pamCounter = self.pamLength
# Trivial prediction algorithms
if len(trivialPredictionMethods.strip()) > 0:
## @todo document
self.trivialPredictor = TrivialPredictor(numberOfCols, verbosity,
trivialPredictionMethods)
else:
## @todo document
self.trivialPredictor = None
## If True, the TP will compute a signature for each sequence
self.collectSequenceStats = False
## This gets set when we receive a reset and cleared on the first compute
# following a reset.
self.resetCalled = False
## We keep track of the average input density here
self.avgInputDensity = None
## Keeps track of the length of the sequence currently being learned.
self.learnedSeqLength = 0
## Keeps track of the moving average of all learned sequence length.
self.avgLearnedSeqLength = 0.0
# Set attributes intialized later on.
self._prevLrnPatterns = None
self._prevInfPatterns = None
self.segmentUpdates = None
# Set attributes that are initialized in _initEphemerals.
self._stats = None
## @todo document
self.cellConfidence = None
## @todo document
self.colConfidence = None
## @todo document
self.lrnActiveState = None
## @todo document
self.infActiveState = None
## @todo document
self.lrnPredictedState = None
## @todo document
self.infPredictedState = None
self._internalStats = None
# All other members are ephemeral - don't need to be saved when we save
# state. So they get separated out into _initEphemerals, which also
# gets called when we are being restored from a saved state (via
# __setstate__)
self._initEphemerals()
def _getEphemeralMembers(self):
"""
List of our member variables that we don't need to be saved.
"""
return []
def _initEphemerals(self):
"""
Initialize all ephemeral members after being restored to a pickled state.
"""
## We store the lists of segments updates, per cell, so that they can be
# applied later during learning, when the cell gets bottom-up activation.
# We store one list per cell. The lists are identified with a hash key which
# is a tuple (column index, cell index).
self.segmentUpdates = {}
# Allocate and reset all stats
self.resetStats()
# NOTE: We don't use the same backtrack buffer for inference and learning
# because learning has a different metric for determining if an input from
# the past is potentially useful again for backtracking.
#
# Our inference backtrack buffer. This keeps track of up to
# maxInfBacktrack of previous input. Each entry is a list of active column
# inputs.
self._prevInfPatterns = []
# Our learning backtrack buffer. This keeps track of up to maxLrnBacktrack
# of previous input. Each entry is a list of active column inputs
self._prevLrnPatterns = []
# Keep integers rather than bools. Float?
stateShape = (self.numberOfCols, self.cellsPerColumn)
self.lrnActiveState = {}
self.lrnActiveState["t"] = numpy.zeros(stateShape, dtype="int8")
self.lrnActiveState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.lrnPredictedState = {}
self.lrnPredictedState["t"] = numpy.zeros(stateShape, dtype="int8")
self.lrnPredictedState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState = {}
self.infActiveState["t"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState["backup"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState["candidate"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState = {}
self.infPredictedState["t"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState["backup"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState["candidate"] = numpy.zeros(stateShape, dtype="int8")
self.cellConfidence = {}
self.cellConfidence["t"] = numpy.zeros(stateShape, dtype="float32")
self.cellConfidence["t-1"] = numpy.zeros(stateShape, dtype="float32")
self.cellConfidence["candidate"] = numpy.zeros(stateShape, dtype="float32")
self.colConfidence = {}
self.colConfidence["t"] = numpy.zeros(self.numberOfCols, dtype="float32")
self.colConfidence["t-1"] = numpy.zeros(self.numberOfCols, dtype="float32")
self.colConfidence["candidate"] = numpy.zeros(self.numberOfCols,
dtype="float32")
def __getstate__(self):
""" @internal
Return serializable state. This function will return a version of the
__dict__ with all "ephemeral" members stripped out. "Ephemeral" members
are defined as those that do not need to be (nor should be) stored
in any kind of persistent file (e.g., NuPIC network XML file.)
"""
state = self.__dict__.copy()
for ephemeralMemberName in self._getEphemeralMembers():
state.pop(ephemeralMemberName, None)
state['_random'] = self.getRandomState()
return state
def __setstate__(self, state):
""" @internal
Set the state of ourself from a serialized state.
"""
self.setRandomState(state['_random'])
del state['_random']
self.__dict__.update(state)
# Check the version of the checkpointed TP and update it to the current
# version if necessary.
if not hasattr(self, 'version'):
self._initEphemerals()
self.version = TP_VERSION
def __getattr__(self, name):
""" @internal
Patch __getattr__ so that we can catch the first access to 'cells' and load.
This function is only called when we try to access an attribute that doesn't
exist. We purposely make sure that "self.cells" doesn't exist after
unpickling so that we'll hit this, then we can load it on the first access.
If this is called at any other time, it will raise an AttributeError.
That's because:
- If 'name' is "cells", after the first call, self._realCells won't exist
so we'll get an implicit AttributeError.
- If 'name' isn't "cells", I'd expect our super wouldn't have __getattr__,
so we'll raise our own Attribute error. If the super did get __getattr__,
we'll just return what it gives us.
"""
try:
return super(TP, self).__getattr__(name)
except AttributeError:
raise AttributeError("'TP' object has no attribute '%s'" % name)
def __del__(self):
pass
def __ne__(self, tp):
return not self == tp
def __eq__(self, tp):
return not self.diff(tp)
def diff(self, tp):
diff = []
toCheck = [((), self.__getstate__(), tp.__getstate__())]
while toCheck:
keys, a, b = toCheck.pop()
if type(a) != type(b):
diff.append((keys, a, b))
elif isinstance(a, dict):
keys1 = set(a.keys())
keys2 = set(b.keys())
# If there are missing keys, add them to the diff.
if keys1 != keys2:
for k in keys1 - keys2:
diff.append((keys + (k,), d[k], None))
for k in keys2 - keys1:
diff.append((keys + (k,), None, b[k]))
# For matching keys, add the values to the list of things to check.
for k in keys1.union(keys2):
toCheck.append((keys + (k,), a[k], b[k]))
elif (isinstance(a, numpy.ndarray) or isinstance(a, list) or
isinstance(a, tuple)):
if len(a) != len(b):
diff.append((keys + (k, 'len'), len(a), len(b)))
elif not numpy.array_equal(a, b):
diff.append((keys + (k,), a, b))
#for i in xrange(len(a)):
# toCheck.append((keys + (k, i), a[i], b[i]))
elif isinstance(a, Random):
if a.getState() != b.getState():
diff.append((keys + (k,), a.getState(), b.getState()))
elif (a.__class__.__name__ == 'Cells4' and
b.__class__.__name__ == 'Cells4'):
continue
else:
try:
_ = a != b
except ValueError:
raise ValueError(type(a))
if a != b:
diff.append((keys + (k,), a, b))
return diff
def getLearnActiveStateT(self):
return self.lrnActiveState['t']
def saveToFile(self, filePath):
"""
Implemented in TP10X2.TP10X2.saveToFile
"""
pass
def loadFromFile(self, filePath):
"""
Implemented in TP10X2.TP10X2.loadFromFile
"""
pass
def setRandomSeed(self, seed):
""" @internal
Seed the random number generator.
This is used during unit testing to generate repeatable results.
"""
self._random = Random(seed)
def getRandomState(self):
""" @internal
Return the random number state.
This is used during unit testing to generate repeatable results.
"""
return pickle.dumps(self._random)
def setRandomState(self, state):
""" @internal Set the random number state.
This is used during unit testing to generate repeatable results.
"""
self._random = pickle.loads(state)
def reset(self,):
"""
Reset the state of all cells.
This is normally used between sequences while training. All internal states
are reset to 0.
"""
if self.verbosity >= 3:
print "\n==== RESET ====="
self.lrnActiveState['t-1'].fill(0)
self.lrnActiveState['t'].fill(0)
self.lrnPredictedState['t-1'].fill(0)
self.lrnPredictedState['t'].fill(0)
self.infActiveState['t-1'].fill(0)
self.infActiveState['t'].fill(0)
self.infPredictedState['t-1'].fill(0)
self.infPredictedState['t'].fill(0)
self.cellConfidence['t-1'].fill(0)
self.cellConfidence['t'].fill(0)
# Flush the segment update queue
self.segmentUpdates = {}
self._internalStats['nInfersSinceReset'] = 0
#To be removed
self._internalStats['curPredictionScore'] = 0
#New prediction score
self._internalStats['curPredictionScore2'] = 0
self._internalStats['curFalseNegativeScore'] = 0
self._internalStats['curFalsePositiveScore'] = 0
self._internalStats['curMissing'] = 0
self._internalStats['curExtra'] = 0
if self.trivialPredictor is not None:
self.trivialPredictor.reset()
# When a reset occurs, set prevSequenceSignature to the signature of the
# just-completed sequence and start accumulating histogram for the next
# sequence.
self._internalStats['prevSequenceSignature'] = None
if self.collectSequenceStats:
if self._internalStats['confHistogram'].sum() > 0:
sig = self._internalStats['confHistogram'].copy()
sig.reshape(self.numberOfCols * self.cellsPerColumn)
self._internalStats['prevSequenceSignature'] = sig
self._internalStats['confHistogram'].fill(0)
self.resetCalled = True
# Clear out input history
self._prevInfPatterns = []
self._prevLrnPatterns = []
def resetStats(self):
"""
Reset the learning and inference stats. This will usually be called by
user code at the start of each inference run (for a particular data set).
"""
self._stats = dict()
self._internalStats = dict()
self._internalStats['nInfersSinceReset'] = 0
self._internalStats['nPredictions'] = 0
#New prediction score
self._internalStats['curPredictionScore2'] = 0
self._internalStats['predictionScoreTotal2'] = 0
self._internalStats['curFalseNegativeScore'] = 0
self._internalStats['falseNegativeScoreTotal'] = 0
self._internalStats['curFalsePositiveScore'] = 0
self._internalStats['falsePositiveScoreTotal'] = 0
self._internalStats['pctExtraTotal'] = 0
self._internalStats['pctMissingTotal'] = 0
self._internalStats['curMissing'] = 0
self._internalStats['curExtra'] = 0
self._internalStats['totalMissing'] = 0
self._internalStats['totalExtra'] = 0
# Sequence signature statistics. Note that we don't reset the sequence
# signature list itself.
self._internalStats['prevSequenceSignature'] = None
if self.collectSequenceStats:
self._internalStats['confHistogram'] = (
numpy.zeros((self.numberOfCols, self.cellsPerColumn),
dtype="float32"))
if self.trivialPredictor is not None:
self.trivialPredictor.resetStats()
def getStats(self):
"""
Return the current learning and inference stats. This returns a dict
containing all the learning and inference stats we have collected since the
last resetStats(). If @ref collectStats is False, then None is returned.
@returns dict
The following keys are returned in the dict when @ref collectStats is True:
@retval nPredictions the number of predictions. This is the total
number of inferences excluding burn-in and the
last inference.
@retval curPredictionScore the score for predicting the current input
(predicted during the previous inference)
@retval curMissing the number of bits in the current input that were
not predicted to be on.
@retval curExtra the number of bits in the predicted output that
are not in the next input
@retval predictionScoreTotal the sum of every prediction score to date
@retval predictionScoreAvg predictionScoreTotal / nPredictions
@retval pctMissingTotal the total number of bits that were missed over all
predictions
@retval pctMissingAvg pctMissingTotal / nPredictions
@retval prevSequenceSignature signature for the sequence immediately preceding
the last reset. 'None' if collectSequenceStats is
False
"""
if not self.collectStats:
return None
self._stats['nPredictions'] = self._internalStats['nPredictions']
self._stats['curMissing'] = self._internalStats['curMissing']
self._stats['curExtra'] = self._internalStats['curExtra']
self._stats['totalMissing'] = self._internalStats['totalMissing']
self._stats['totalExtra'] = self._internalStats['totalExtra']
nPredictions = max(1, self._stats['nPredictions'])
# New prediction score
self._stats['curPredictionScore2'] = (
self._internalStats['curPredictionScore2'])
self._stats['predictionScoreAvg2'] = (
self._internalStats['predictionScoreTotal2'] / nPredictions)
self._stats['curFalseNegativeScore'] = (
self._internalStats['curFalseNegativeScore'])
self._stats['falseNegativeAvg'] = (
self._internalStats['falseNegativeScoreTotal'] / nPredictions)
self._stats['curFalsePositiveScore'] = (
self._internalStats['curFalsePositiveScore'])
self._stats['falsePositiveAvg'] = (
self._internalStats['falsePositiveScoreTotal'] / nPredictions)
self._stats['pctExtraAvg'] = (self._internalStats['pctExtraTotal'] /
nPredictions)
self._stats['pctMissingAvg'] = (self._internalStats['pctMissingTotal'] /
nPredictions)
# This will be None if collectSequenceStats is False
self._stats['prevSequenceSignature'] = (
self._internalStats['prevSequenceSignature'])
bestScore = -1.0
bestMethod = "none"
if self.trivialPredictor is not None:
for m in self.trivialPredictor.methods:
key = "tr_%s" % m
score = (
self.trivialPredictor._internalStats[m]['predictionScoreTotal2'] /
nPredictions)
if score > bestScore:
bestScore = score
bestMethod = m
self._stats[key] = score
key = "vs_%s" % m
self._stats[key] = self._stats['predictionScoreAvg2'] - score
self._stats["vs_all"] = self._stats['predictionScoreAvg2'] - bestScore
self._stats["tr_best"] = bestMethod
return self._stats
def _updateStatsInferEnd(self, stats, bottomUpNZ, predictedState,
colConfidence):
"""
Called at the end of learning and inference, this routine will update
a number of stats in our _internalStats dictionary, including our computed
prediction score.
@param stats internal stats dictionary
@param bottomUpNZ list of the active bottom-up inputs
@param predictedState The columns we predicted on the last time step (should
match the current bottomUpNZ in the best case)
@param colConfidence Column confidences we determined on the last time step
"""
# Return if not collecting stats
if not self.collectStats:
return
stats['nInfersSinceReset'] += 1
# Compute the prediction score, how well the prediction from the last
# time step predicted the current bottom-up input
(numExtra2, numMissing2, confidences2) = self.checkPrediction2(
patternNZs=[bottomUpNZ], output=predictedState,
colConfidence=colConfidence)
predictionScore, positivePredictionScore, negativePredictionScore = (
confidences2[0])
# Store the stats that don't depend on burn-in
stats['curPredictionScore2'] = float(predictionScore)
stats['curFalseNegativeScore'] = 1.0 - float(positivePredictionScore)
stats['curFalsePositiveScore'] = float(negativePredictionScore)
stats['curMissing'] = numMissing2
stats['curExtra'] = numExtra2
# If we are passed the burn-in period, update the accumulated stats
# Here's what various burn-in values mean:
# 0: try to predict the first element of each sequence and all subsequent
# 1: try to predict the second element of each sequence and all subsequent
# etc.
if stats['nInfersSinceReset'] <= self.burnIn:
return
# Burn-in related stats
stats['nPredictions'] += 1
numExpected = max(1.0, float(len(bottomUpNZ)))
stats['totalMissing'] += numMissing2
stats['totalExtra'] += numExtra2
stats['pctExtraTotal'] += 100.0 * numExtra2 / numExpected
stats['pctMissingTotal'] += 100.0 * numMissing2 / numExpected
stats['predictionScoreTotal2'] += float(predictionScore)
stats['falseNegativeScoreTotal'] += 1.0 - float(positivePredictionScore)
stats['falsePositiveScoreTotal'] += float(negativePredictionScore)
if self.collectSequenceStats:
# Collect cell confidences for every cell that correctly predicted current
# bottom up input. Normalize confidence across each column
cc = self.cellConfidence['t-1'] * self.infActiveState['t']
sconf = cc.sum(axis=1)
for c in range(self.numberOfCols):
if sconf[c] > 0:
cc[c, :] /= sconf[c]
# Update cell confidence histogram: add column-normalized confidence
# scores to the histogram
self._internalStats['confHistogram'] += cc
def printState(self, aState):
"""
Print an integer array that is the same shape as activeState.
@param aState TODO: document
"""
def formatRow(var, i):
s = ''
for c in range(self.numberOfCols):
if c > 0 and c % 10 == 0:
s += ' '
s += str(var[c, i])
s += ' '
return s
for i in xrange(self.cellsPerColumn):
print formatRow(aState, i)
def printConfidence(self, aState, maxCols = 20):
"""
Print a floating point array that is the same shape as activeState.
@param aState TODO: document
@param maxCols TODO: document
"""
def formatFPRow(var, i):
s = ''
for c in range(min(maxCols, self.numberOfCols)):
if c > 0 and c % 10 == 0:
s += ' '
s += ' %5.3f' % var[c, i]
s += ' '
return s
for i in xrange(self.cellsPerColumn):
print formatFPRow(aState, i)
def printColConfidence(self, aState, maxCols = 20):
"""
Print up to maxCols number from a flat floating point array.
@param aState TODO: document
@param maxCols TODO: document
"""
def formatFPRow(var):
s = ''
for c in range(min(maxCols, self.numberOfCols)):
if c > 0 and c % 10 == 0:
s += ' '
s += ' %5.3f' % var[c]
s += ' '
return s
print formatFPRow(aState)
def printStates(self, printPrevious = True, printLearnState = True):
"""
@todo document
"""
def formatRow(var, i):
s = ''
for c in range(self.numberOfCols):
if c > 0 and c % 10 == 0:
s += ' '
s += str(var[c, i])
s += ' '
return s
print "\nInference Active state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.infActiveState['t-1'], i),
print formatRow(self.infActiveState['t'], i)
print "Inference Predicted state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.infPredictedState['t-1'], i),
print formatRow(self.infPredictedState['t'], i)
if printLearnState:
print "\nLearn Active state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.lrnActiveState['t-1'], i),
print formatRow(self.lrnActiveState['t'], i)
print "Learn Predicted state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.lrnPredictedState['t-1'], i),
print formatRow(self.lrnPredictedState['t'], i)
def printOutput(self, y):
"""
@todo document
"""
print "Output"
for i in xrange(self.cellsPerColumn):
for c in xrange(self.numberOfCols):
print int(y[c, i]),
print
def printInput(self, x):
"""
@todo document
"""
print "Input"
for c in xrange(self.numberOfCols):
print int(x[c]),
print
def printParameters(self):
"""
Print the parameter settings for the TP.
"""
print "numberOfCols=", self.numberOfCols
print "cellsPerColumn=", self.cellsPerColumn
print "minThreshold=", self.minThreshold
print "newSynapseCount=", self.newSynapseCount
print "activationThreshold=", self.activationThreshold
print
print "initialPerm=", self.initialPerm
print "connectedPerm=", self.connectedPerm
print "permanenceInc=", self.permanenceInc
print "permanenceDec=", self.permanenceDec
print "permanenceMax=", self.permanenceMax
print "globalDecay=", self.globalDecay
print
print "doPooling=", self.doPooling
print "segUpdateValidDuration=", self.segUpdateValidDuration
print "pamLength=", self.pamLength
def printActiveIndices(self, state, andValues=False):
"""
Print the list of [column, cellIdx] indices for each of the active
cells in state.
@param state TODO: document
@param andValues TODO: document
"""
if len(state.shape) == 2:
(cols, cellIdxs) = state.nonzero()
else:
cols = state.nonzero()[0]
cellIdxs = numpy.zeros(len(cols))
if len(cols) == 0:
print "NONE"
return
prevCol = -1
for (col, cellIdx) in zip(cols, cellIdxs):
if col != prevCol:
if prevCol != -1:
print "] ",
print "Col %d: [" % (col),
prevCol = col
if andValues:
if len(state.shape) == 2:
value = state[col, cellIdx]
else:
value = state[col]
print "%d: %s," % (cellIdx, value),
else:
print "%d," % (cellIdx),
print "]"
def printComputeEnd(self, output, learn=False):
"""
Called at the end of inference to print out various diagnostic
information based on the current verbosity level.
@param output TODO: document
@param learn TODO: document
"""
if self.verbosity >= 3:
print "----- computeEnd summary: "
print "learn:", learn
print "numBurstingCols: %s, " % (
self.infActiveState['t'].min(axis=1).sum()),
print "curPredScore2: %s, " % (
self._internalStats['curPredictionScore2']),
print "curFalsePosScore: %s, " % (
self._internalStats['curFalsePositiveScore']),
print "1-curFalseNegScore: %s, " % (
1 - self._internalStats['curFalseNegativeScore'])
print "numSegments: ", self.getNumSegments(),
print "avgLearnedSeqLength: ", self.avgLearnedSeqLength
print "----- infActiveState (%d on) ------" % (
self.infActiveState['t'].sum())
self.printActiveIndices(self.infActiveState['t'])
if self.verbosity >= 6:
self.printState(self.infActiveState['t'])
print "----- infPredictedState (%d on)-----" % (
self.infPredictedState['t'].sum())
self.printActiveIndices(self.infPredictedState['t'])
if self.verbosity >= 6:
self.printState(self.infPredictedState['t'])
print "----- lrnActiveState (%d on) ------" % (
self.lrnActiveState['t'].sum())
self.printActiveIndices(self.lrnActiveState['t'])
if self.verbosity >= 6:
self.printState(self.lrnActiveState['t'])
print "----- lrnPredictedState (%d on)-----" % (
self.lrnPredictedState['t'].sum())
self.printActiveIndices(self.lrnPredictedState['t'])
if self.verbosity >= 6:
self.printState(self.lrnPredictedState['t'])
print "----- cellConfidence -----"
self.printActiveIndices(self.cellConfidence['t'], andValues=True)
if self.verbosity >= 6:
self.printConfidence(self.cellConfidence['t'])
print "----- colConfidence -----"
self.printActiveIndices(self.colConfidence['t'], andValues=True)
print "----- cellConfidence[t-1] for currently active cells -----"
cc = self.cellConfidence['t-1'] * self.infActiveState['t']
self.printActiveIndices(cc, andValues=True)
if self.verbosity == 4:
print "Cells, predicted segments only:"
self.printCells(predictedOnly=True)
elif self.verbosity >= 5:
print "Cells, all segments:"
self.printCells(predictedOnly=False)
print
elif self.verbosity >= 1:
print "TP: learn:", learn
print "TP: active outputs(%d):" % len(output.nonzero()[0]),
self.printActiveIndices(output.reshape(self.numberOfCols,
self.cellsPerColumn))
def printSegmentUpdates(self):
"""
@todo document
"""
print "=== SEGMENT UPDATES ===, Num = ", len(self.segmentUpdates)
for key, updateList in self.segmentUpdates.iteritems():
c, i = key[0], key[1]
print c, i, updateList
def printCell(self, c, i, onlyActiveSegments=False):
"""
@todo document
"""
if len(self.cells[c][i]) > 0:
print "Column", c, "Cell", i, ":",
print len(self.cells[c][i]), "segment(s)"
for j, s in enumerate(self.cells[c][i]):
isActive = self.isSegmentActive(s, self.infActiveState['t'])
if not onlyActiveSegments or isActive:
isActiveStr = "*" if isActive else " "
print " %sSeg #%-3d" % (isActiveStr, j),
s.debugPrint()
def printCells(self, predictedOnly=False):
"""
@todo document
"""
if predictedOnly:
print "--- PREDICTED CELLS ---"
else:
print "--- ALL CELLS ---"
print "Activation threshold=", self.activationThreshold,
print "min threshold=", self.minThreshold,
print "connected perm=", self.connectedPerm
for c in xrange(self.numberOfCols):
for i in xrange(self.cellsPerColumn):
if not predictedOnly or self.infPredictedState['t'][c, i]:
self.printCell(c, i, predictedOnly)
def getNumSegmentsInCell(self, c, i):
"""
@param c column index
@param i cell index within column
@returns the total number of synapses in cell (c, i)
"""
return len(self.cells[c][i])
def getNumSynapses(self):
"""
@returns the total number of synapses
"""
nSyns = self.getSegmentInfo()[1]
return nSyns
def getNumStrongSynapses(self):
"""
@todo implement this, it is used by the node's getParameter() call
"""
return 0
def getNumStrongSynapsesPerTimeSlot(self):
"""
@todo implement this, it is used by the node's getParameter() call
"""
return 0
def getNumSynapsesPerSegmentMax(self):
"""
@todo implement this, it is used by the node's getParameter() call, it should return the max # of synapses seen in any one segment.
"""
return 0
def getNumSynapsesPerSegmentAvg(self):
"""
@returns the average number of synapses per segment
"""
return float(self.getNumSynapses()) / max(1, self.getNumSegments())
def getNumSegments(self):
"""
@returns the total number of segments
"""
nSegs = self.getSegmentInfo()[0]
return nSegs
def getNumCells(self):
"""
@returns the total number of cells
"""
return self.numberOfCols * self.cellsPerColumn
def getSegmentOnCell(self, c, i, segIdx):
"""
@param c column index
@param i cell index in column
@param segIdx TODO: document
@returns list representing the the segment on cell (c, i) with index sidx.
Returns the segment as following list:
[ [segmentID, sequenceSegmentFlag, positiveActivations,
totalActivations, lastActiveIteration,
lastPosDutyCycle, lastPosDutyCycleIteration],
[col1, idx1, perm1],
[col2, idx2, perm2], ...
]
@retval segmentId TODO: document
@retval sequenceSegmentFlag TODO: document
@retval positiveActivations TODO: document
@retval totalActivations TODO: document
@retval lastActiveIteration TODO: document
@retval lastPosDutyCycle TODO: document
@retval lastPosDutyCycleIteration TODO: document
@retval [col1, idx1, perm1] TODO: document
"""
seg = self.cells[c][i][segIdx]
retlist = [[seg.segID, seg.isSequenceSeg, seg.positiveActivations,
seg.totalActivations, seg.lastActiveIteration,
seg._lastPosDutyCycle, seg._lastPosDutyCycleIteration]]
retlist += seg.syns
return retlist
class SegmentUpdate(object):
"""
Class used to carry instructions for updating a segment.
"""
def __init__(self, c, i, seg=None, activeSynapses=[]):
self.columnIdx = c
self.cellIdx = i
self.segment = seg # The segment object itself, not an index (can be None)
self.activeSynapses = activeSynapses
self.sequenceSegment = False
self.phase1Flag = False
# Set true if segment only reaches activationThreshold when including
# not fully connected synapses.
self.weaklyPredicting = False
def __eq__(self, other):
if set(self.__dict__.keys()) != set(other.__dict__.keys()):
return False
for k in self.__dict__:
if self.__dict__[k] != other.__dict__[k]:
return False
return True
def __ne__(self, other):
return not self == other
# Just for debugging
def __str__(self):
return ("Seg update: cell=[%d,%d]" % (self.columnIdx, self.cellIdx) +
", seq seg=" + str(self.sequenceSegment) +
", seg=" + str(self.segment) +
", synapses=" + str(self.activeSynapses))
def addToSegmentUpdates(self, c, i, segUpdate):
"""
Store a dated potential segment update. The "date" (iteration index) is used
later to determine whether the update is too old and should be forgotten.
This is controlled by parameter segUpdateValidDuration.
@param c TODO: document
@param i TODO: document
@param segUpdate TODO: document
"""
# Sometimes we might be passed an empty update
if segUpdate is None or len(segUpdate.activeSynapses) == 0:
return
key = (c, i) # key = (column index, cell index in column)
# TODO: scan list of updates for that cell and consolidate?
# But watch out for dates!
if self.segmentUpdates.has_key(key):
self.segmentUpdates[key] += [(self.lrnIterationIdx, segUpdate)]
else:
self.segmentUpdates[key] = [(self.lrnIterationIdx, segUpdate)]
def removeSegmentUpdate(self, updateInfo):
"""
Remove a segment update (called when seg update expires or is processed)
@param updateInfo tuple (creationDate, SegmentUpdate)
"""
# An updateInfo contains (creationDate, SegmentUpdate)
(creationDate, segUpdate) = updateInfo
# Key is stored in segUpdate itself...
key = (segUpdate.columnIdx, segUpdate.cellIdx)
self.segmentUpdates[key].remove(updateInfo)
def computeOutput(self):
"""Computes output for both learning and inference. In both cases, the
output is the boolean OR of activeState and predictedState at t.
Stores currentOutput for checkPrediction."""
# TODO: This operation can be sped up by:
# 1.) Pre-allocating space for the currentOutput
# 2.) Making predictedState and activeState of type 'float32' up front
# 3.) Using logical_or(self.predictedState['t'], self.activeState['t'],
# self.currentOutput)
if self.outputType == 'activeState1CellPerCol':
# Fire only the most confident cell in columns that have 2 or more
# active cells
mostActiveCellPerCol = self.cellConfidence['t'].argmax(axis=1)
self.currentOutput = numpy.zeros(self.infActiveState['t'].shape,
dtype='float32')
# Turn on the most confident cell in each column. Note here that
# Columns refers to TP columns, even though each TP column is a row
# in the numpy array.
numCols = self.currentOutput.shape[0]
self.currentOutput[(xrange(numCols), mostActiveCellPerCol)] = 1
# Don't turn on anything in columns which are not active at all
activeCols = self.infActiveState['t'].max(axis=1)
inactiveCols = numpy.where(activeCols==0)[0]
self.currentOutput[inactiveCols, :] = 0
elif self.outputType == 'activeState':
self.currentOutput = self.infActiveState['t']
elif self.outputType == 'normal':
self.currentOutput = numpy.logical_or(self.infPredictedState['t'],
self.infActiveState['t'])
else:
raise RuntimeError("Unimplemented outputType")
return self.currentOutput.reshape(-1).astype('float32')
def getActiveState(self):
""" Return the current active state. This is called by the node to
obtain the sequence output of the TP.
"""
# TODO: This operation can be sped up by making activeState of
# type 'float32' up front.
return self.infActiveState['t'].reshape(-1).astype('float32')
def getPredictedState(self):
"""
Return a numpy array, predictedCells, representing the current predicted
state.
predictedCells[c][i] represents the state of the i'th cell in the c'th
column.
@returns numpy array of predicted cells, representing the current predicted
state. predictedCells[c][i] represents the state of the i'th cell in the c'th
column.
"""
return self.infPredictedState['t']
def predict(self, nSteps):
"""
This function gives the future predictions for <nSteps> timesteps starting
from the current TP state. The TP is returned to its original state at the
end before returning.
-# We save the TP state.
-# Loop for nSteps
-# Turn-on with lateral support from the current active cells
-# Set the predicted cells as the next step's active cells. This step
in learn and infer methods use input here to correct the predictions.
We don't use any input here.
-# Revert back the TP state to the time before prediction
@param nSteps The number of future time steps to be predicted
@returns all the future predictions - a numpy array of type "float32" and
shape (nSteps, numberOfCols).
The ith row gives the tp prediction for each column at
a future timestep (t+i+1).
"""
# Save the TP dynamic state, we will use to revert back in the end
pristineTPDynamicState = self._getTPDynamicState()
assert (nSteps>0)
# multiStepColumnPredictions holds all the future prediction.
multiStepColumnPredictions = numpy.zeros((nSteps, self.numberOfCols),
dtype="float32")
# This is a (nSteps-1)+half loop. Phase 2 in both learn and infer methods
# already predicts for timestep (t+1). We use that prediction for free and
# save the half-a-loop of work.
step = 0
while True:
# We get the prediction for the columns in the next time step from
# the topDownCompute method. It internally uses confidences.
multiStepColumnPredictions[step, :] = self.topDownCompute()
# Cleanest way in python to handle one and half loops
if step == nSteps-1:
break
step += 1
# Copy t-1 into t
self.infActiveState['t-1'][:, :] = self.infActiveState['t'][:, :]
self.infPredictedState['t-1'][:, :] = self.infPredictedState['t'][:, :]
self.cellConfidence['t-1'][:, :] = self.cellConfidence['t'][:, :]
# Predicted state at "t-1" becomes the active state at "t"
self.infActiveState['t'][:, :] = self.infPredictedState['t-1'][:, :]
# Predicted state and confidence are set in phase2.
self.infPredictedState['t'].fill(0)
self.cellConfidence['t'].fill(0.0)
self.inferPhase2()
# Revert the dynamic state to the saved state
self._setTPDynamicState(pristineTPDynamicState)
return multiStepColumnPredictions
def _getTPDynamicStateVariableNames(self):
"""
Any newly added dynamic states in the TP should be added to this list.
Parameters:
--------------------------------------------
retval: The list of names of TP dynamic state variables.
"""
return ["infActiveState",
"infPredictedState",
"lrnActiveState",
"lrnPredictedState",
"cellConfidence",
"colConfidence",
]
def _getTPDynamicState(self,):
"""
Parameters:
--------------------------------------------
retval: A dict with all the dynamic state variable names as keys and
their values at this instant as values.
"""
tpDynamicState = dict()
for variableName in self._getTPDynamicStateVariableNames():
tpDynamicState[variableName] = copy.deepcopy(self.__dict__[variableName])
return tpDynamicState
def _setTPDynamicState(self, tpDynamicState):
"""
Set all the dynamic state variables from the <tpDynamicState> dict.
<tpDynamicState> dict has all the dynamic state variable names as keys and
their values at this instant as values.
We set the dynamic state variables in the tp object with these items.
"""
for variableName in self._getTPDynamicStateVariableNames():
self.__dict__[variableName] = tpDynamicState.pop(variableName)
def _updateAvgLearnedSeqLength(self, prevSeqLength):
"""Update our moving average of learned sequence length."""
if self.lrnIterationIdx < 100:
alpha = 0.5
else:
alpha = 0.1
self.avgLearnedSeqLength = ((1.0 - alpha) * self.avgLearnedSeqLength +
(alpha * prevSeqLength))
def getAvgLearnedSeqLength(self):
"""
@returns Moving average of learned sequence length
"""
return self.avgLearnedSeqLength
def inferBacktrack(self, activeColumns):
"""
This "backtracks" our inference state, trying to see if we can lock onto
the current set of inputs by assuming the sequence started up to N steps
ago on start cells.
@param activeColumns The list of active column indices
This will adjust @ref infActiveState['t'] if it does manage to lock on to a
sequence that started earlier. It will also compute infPredictedState['t']
based on the possibly updated @ref infActiveState['t'], so there is no need to
call inferPhase2() after calling inferBacktrack().
This looks at:
- @ref infActiveState['t']
This updates/modifies:
- @ref infActiveState['t']
- @ref infPredictedState['t']
- @ref colConfidence['t']
- @ref cellConfidence['t']
How it works:
-------------------------------------------------------------------
This method gets called from updateInferenceState when we detect either of
the following two conditions:
-# The current bottom-up input had too many un-expected columns
-# We fail to generate a sufficient number of predicted columns for the
next time step.
Either of these two conditions indicate that we have fallen out of a
learned sequence.
Rather than simply "giving up" and bursting on the unexpected input
columns, a better approach is to see if perhaps we are in a sequence that
started a few steps ago. The real world analogy is that you are driving
along and suddenly hit a dead-end, you will typically go back a few turns
ago and pick up again from a familiar intersection.
This back-tracking goes hand in hand with our learning methodology, which
always tries to learn again from start cells after it loses context. This
results in a network that has learned multiple, overlapping paths through
the input data, each starting at different points. The lower the global
decay and the more repeatability in the data, the longer each of these
paths will end up being.
The goal of this function is to find out which starting point in the past
leads to the current input with the most context as possible. This gives us
the best chance of predicting accurately going forward. Consider the
following example, where you have learned the following sub-sequences which
have the given frequencies:
? - Q - C - D - E 10X seq 0
? - B - C - D - F 1X seq 1
? - B - C - H - I 2X seq 2
? - B - C - D - F 3X seq 3
? - Z - A - B - C - D - J 2X seq 4
? - Z - A - B - C - H - I 1X seq 5
? - Y - A - B - C - D - F 3X seq 6
----------------------------------------
W - X - Z - A - B - C - D <= input history
^
current time step
Suppose, in the current time step, the input pattern is D and you have not
predicted D, so you need to backtrack. Suppose we can backtrack up to 6
steps in the past, which path should we choose? From the table above, we can
see that the correct answer is to assume we are in seq 1. How do we
implement the backtrack to give us this right answer? The current
implementation takes the following approach:
-# Start from the farthest point in the past.
-# For each starting point S, calculate the confidence of the current
input, conf(startingPoint=S), assuming we followed that sequence.
Note that we must have learned at least one sequence that starts at
point S.
-# If conf(startingPoint=S) is significantly different from
conf(startingPoint=S-1), then choose S-1 as the starting point.
The assumption here is that starting point S-1 is the starting point of
a learned sub-sequence that includes the current input in it's path and
that started the longest ago. It thus has the most context and will be
the best predictor going forward.
From the statistics in the above table, we can compute what the confidences
will be for each possible starting point:
startingPoint confidence of D
-----------------------------------------
B (t-2) 4/6 = 0.667 (seq 1,3)/(seq 1,2,3)
Z (t-4) 2/3 = 0.667 (seq 4)/(seq 4,5)
First of all, we do not compute any confidences at starting points t-1, t-3,
t-5, t-6 because there are no learned sequences that start at those points.
Notice here that Z is the starting point of the longest sub-sequence leading
up to the current input. Event though starting at t-2 and starting at t-4
give the same confidence value, we choose the sequence starting at t-4
because it gives the most context, and it mirrors the way that learning
extends sequences.
"""
# How much input history have we accumulated?
# The current input is always at the end of self._prevInfPatterns (at
# index -1), but it is also evaluated as a potential starting point by
# turning on it's start cells and seeing if it generates sufficient
# predictions going forward.
numPrevPatterns = len(self._prevInfPatterns)
if numPrevPatterns <= 0:
return
# This is an easy to use label for the current time step
currentTimeStepsOffset = numPrevPatterns - 1
# Save our current active state in case we fail to find a place to restart
# todo: save infActiveState['t-1'], infPredictedState['t-1']?
self.infActiveState['backup'][:, :] = self.infActiveState['t'][:, :]
# Save our t-1 predicted state because we will write over it as as evaluate
# each potential starting point.
self.infPredictedState['backup'][:, :] = self.infPredictedState['t-1'][:, :]
# We will record which previous input patterns did not generate predictions
# up to the current time step and remove all the ones at the head of the
# input history queue so that we don't waste time evaluating them again at
# a later time step.
badPatterns = []
# Let's go back in time and replay the recent inputs from start cells and
# see if we can lock onto this current set of inputs that way.
#
# Start the farthest back and work our way forward. For each starting point,
# See if firing on start cells at that point would predict the current
# input as well as generate sufficient predictions for the next time step.
#
# We want to pick the point closest to the current time step that gives us
# the relevant confidence. Think of this example, where we are at D and need
# to
# A - B - C - D
# decide if we should backtrack to C, B, or A. Suppose B-C-D is a high order
# sequence and A is unrelated to it. If we backtrock to B would we get a
# certain confidence of D, but if went went farther back, to A, the
# confidence wouldn't change, since A has no impact on the B-C-D series.
#
# So, our strategy will be to pick the "B" point, since choosing the A point
# does not impact our confidences going forward at all.
inSequence = False
candConfidence = None
candStartOffset = None
for startOffset in range(0, numPrevPatterns):
# If we have a candidate already in the past, don't bother falling back
# to start cells on the current input.
if startOffset == currentTimeStepsOffset and candConfidence is not None:
break
if self.verbosity >= 3:
print (
"Trying to lock-on using startCell state from %d steps ago:" % (
numPrevPatterns - 1 - startOffset),
self._prevInfPatterns[startOffset])
# Play through starting from starting point 'startOffset'
inSequence = False
for offset in range(startOffset, numPrevPatterns):
# If we are about to set the active columns for the current time step
# based on what we predicted, capture and save the total confidence of
# predicting the current input
if offset == currentTimeStepsOffset:
totalConfidence = self.colConfidence['t'][activeColumns].sum()
# Compute activeState[t] given bottom-up and predictedState[t-1]
self.infPredictedState['t-1'][:, :] = self.infPredictedState['t'][:, :]
inSequence = self.inferPhase1(self._prevInfPatterns[offset],
useStartCells = (offset == startOffset))
if not inSequence:
break
# Compute predictedState['t'] given activeState['t']
if self.verbosity >= 3:
print (" backtrack: computing predictions from ",
self._prevInfPatterns[offset])
inSequence = self.inferPhase2()
if not inSequence:
break
# If starting from startOffset got lost along the way, mark it as an
# invalid start point.
if not inSequence:
badPatterns.append(startOffset)
continue
# If we got to here, startOffset is a candidate starting point.
# Save this state as a candidate state. It will become the chosen state if
# we detect a change in confidences starting at a later startOffset
candConfidence = totalConfidence
candStartOffset = startOffset
if self.verbosity >= 3 and startOffset != currentTimeStepsOffset:
print (" # Prediction confidence of current input after starting %d "
"steps ago:" % (numPrevPatterns - 1 - startOffset),
totalConfidence)
if candStartOffset == currentTimeStepsOffset: # no more to try
break
self.infActiveState['candidate'][:, :] = self.infActiveState['t'][:, :]
self.infPredictedState['candidate'][:, :] = (
self.infPredictedState['t'][:, :])
self.cellConfidence['candidate'][:, :] = self.cellConfidence['t'][:, :]
self.colConfidence['candidate'][:] = self.colConfidence['t'][:]
break
# If we failed to lock on at any starting point, fall back to the original
# active state that we had on entry
if candStartOffset is None:
if self.verbosity >= 3:
print "Failed to lock on. Falling back to bursting all unpredicted."
self.infActiveState['t'][:, :] = self.infActiveState['backup'][:, :]
self.inferPhase2()
else:
if self.verbosity >= 3:
print ("Locked on to current input by using start cells from %d "
" steps ago:" % (numPrevPatterns - 1 - candStartOffset),
self._prevInfPatterns[candStartOffset])
# Install the candidate state, if it wasn't the last one we evaluated.
if candStartOffset != currentTimeStepsOffset:
self.infActiveState['t'][:, :] = self.infActiveState['candidate'][:, :]
self.infPredictedState['t'][:, :] = (
self.infPredictedState['candidate'][:, :])
self.cellConfidence['t'][:, :] = self.cellConfidence['candidate'][:, :]
self.colConfidence['t'][:] = self.colConfidence['candidate'][:]
# Remove any useless patterns at the head of the previous input pattern
# queue.
for i in range(numPrevPatterns):
if (i in badPatterns or
(candStartOffset is not None and i <= candStartOffset)):
if self.verbosity >= 3:
print ("Removing useless pattern from history:",
self._prevInfPatterns[0])
self._prevInfPatterns.pop(0)
else:
break
# Restore the original predicted state.
self.infPredictedState['t-1'][:, :] = self.infPredictedState['backup'][:, :]
def inferPhase1(self, activeColumns, useStartCells):
"""
Update the inference active state from the last set of predictions
and the current bottom-up.
This looks at:
- @ref infPredictedState['t-1']
This modifies:
- @ref infActiveState['t']
@param activeColumns list of active bottom-ups
@param useStartCells If true, ignore previous predictions and simply turn on
the start cells in the active columns
@returns True if the current input was sufficiently predicted, OR
if we started over on startCells.
False indicates that the current input was NOT predicted,
and we are now bursting on most columns.
"""
# Init to zeros to start
self.infActiveState['t'].fill(0)
# Phase 1 - turn on predicted cells in each column receiving bottom-up
# If we are following a reset, activate only the start cell in each
# column that has bottom-up
numPredictedColumns = 0
if useStartCells:
for c in activeColumns:
self.infActiveState['t'][c, 0] = 1
# else, turn on any predicted cells in each column. If there are none, then
# turn on all cells (burst the column)
else:
for c in activeColumns:
predictingCells = numpy.where(self.infPredictedState['t-1'][c] == 1)[0]
numPredictingCells = len(predictingCells)
if numPredictingCells > 0:
self.infActiveState['t'][c, predictingCells] = 1
numPredictedColumns += 1
else:
self.infActiveState['t'][c, :] = 1 # whole column bursts
# Did we predict this input well enough?
if useStartCells or numPredictedColumns >= 0.50 * len(activeColumns):
return True
else:
return False
def inferPhase2(self):
"""
Phase 2 for the inference state. The computes the predicted state, then
checks to insure that the predicted state is not over-saturated, i.e.
look too close like a burst. This indicates that there were so many
separate paths learned from the current input columns to the predicted
input columns that bursting on the current input columns is most likely
generated mix and match errors on cells in the predicted columns. If
we detect this situation, we instead turn on only the start cells in the
current active columns and re-generate the predicted state from those.
@returns True if we have a decent guess as to the next input.
Returing False from here indicates to the caller that we have
reached the end of a learned sequence.
This looks at:
- @ref infActiveState['t']
This modifies:
- @ref infPredictedState['t']
- @ref colConfidence['t']
- @ref cellConfidence['t']
"""
# Init to zeros to start
self.infPredictedState['t'].fill(0)
self.cellConfidence['t'].fill(0)
self.colConfidence['t'].fill(0)
# Phase 2 - Compute new predicted state and update cell and column
# confidences
for c in xrange(self.numberOfCols):
# For each cell in the column
for i in xrange(self.cellsPerColumn):
# For each segment in the cell
for s in self.cells[c][i]:
# See if it has the min number of active synapses
numActiveSyns = self.getSegmentActivityLevel(
s, self.infActiveState['t'], connectedSynapsesOnly=False)
if numActiveSyns < self.activationThreshold:
continue
# Incorporate the confidence into the owner cell and column
if self.verbosity >= 6:
print "incorporating DC from cell[%d,%d]: " % (c, i),
s.debugPrint()
dc = s.dutyCycle()
self.cellConfidence['t'][c, i] += dc
self.colConfidence['t'][c] += dc
# If we reach threshold on the connected synapses, predict it
# If not active, skip over it
if self.isSegmentActive(s, self.infActiveState['t']):
self.infPredictedState['t'][c, i] = 1
# Normalize column and cell confidences
sumConfidences = self.colConfidence['t'].sum()
if sumConfidences > 0:
self.colConfidence['t'] /= sumConfidences
self.cellConfidence['t'] /= sumConfidences
# Are we predicting the required minimum number of columns?
numPredictedCols = self.infPredictedState['t'].max(axis=1).sum()
if numPredictedCols >= 0.5 * self.avgInputDensity:
return True
else:
return False
def updateInferenceState(self, activeColumns):
"""
Update the inference state. Called from compute() on every iteration.
@param activeColumns The list of active column indices.
"""
# Copy t to t-1
self.infActiveState['t-1'][:, :] = self.infActiveState['t'][:, :]
self.infPredictedState['t-1'][:, :] = self.infPredictedState['t'][:, :]
self.cellConfidence['t-1'][:, :] = self.cellConfidence['t'][:, :]
self.colConfidence['t-1'][:] = self.colConfidence['t'][:]
# Each phase will zero/initilize the 't' states that it affects
# Update our inference input history
if self.maxInfBacktrack > 0:
if len(self._prevInfPatterns) > self.maxInfBacktrack:
self._prevInfPatterns.pop(0)
self._prevInfPatterns.append(activeColumns)
# Compute the active state given the predictions from last time step and
# the current bottom-up
inSequence = self.inferPhase1(activeColumns, self.resetCalled)
# If this input was considered unpredicted, let's go back in time and
# replay the recent inputs from start cells and see if we can lock onto
# this current set of inputs that way.
if not inSequence:
if self.verbosity >= 3:
print ("Too much unpredicted input, re-tracing back to try and lock on "
"at an earlier timestep.")
# inferBacktrack() will call inferPhase2() for us.
self.inferBacktrack(activeColumns)
return
# Compute the predicted cells and the cell and column confidences
inSequence = self.inferPhase2()
if not inSequence:
if self.verbosity >= 3:
print ("Not enough predictions going forward, "
"re-tracing back to try and lock on at an earlier timestep.")
# inferBacktrack() will call inferPhase2() for us.
self.inferBacktrack(activeColumns)
def learnBacktrackFrom(self, startOffset, readOnly=True):
""" @internal
A utility method called from learnBacktrack. This will backtrack
starting from the given startOffset in our prevLrnPatterns queue.
It returns True if the backtrack was successful and we managed to get
predictions all the way up to the current time step.
If readOnly, then no segments are updated or modified, otherwise, all
segment updates that belong to the given path are applied.
This updates/modifies:
- lrnActiveState['t']
This trashes:
- lrnPredictedState['t']
- lrnPredictedState['t-1']
- lrnActiveState['t-1']
@param startOffset Start offset within the prevLrnPatterns input history
@returns True if we managed to lock on to a sequence that started
earlier.
If False, we lost predictions somewhere along the way
leading up to the current time.
"""
# How much input history have we accumulated?
# The current input is always at the end of self._prevInfPatterns (at
# index -1), but it is also evaluated as a potential starting point by
# turning on it's start cells and seeing if it generates sufficient
# predictions going forward.
numPrevPatterns = len(self._prevLrnPatterns)
# This is an easy to use label for the current time step
currentTimeStepsOffset = numPrevPatterns - 1
# Clear out any old segment updates. learnPhase2() adds to the segment
# updates if we're not readOnly
if not readOnly:
self.segmentUpdates = {}
# Status message
if self.verbosity >= 3:
if readOnly:
print (
"Trying to lock-on using startCell state from %d steps ago:" % (
numPrevPatterns - 1 - startOffset),
self._prevLrnPatterns[startOffset])
else:
print (
"Locking on using startCell state from %d steps ago:" % (
numPrevPatterns - 1 - startOffset),
self._prevLrnPatterns[startOffset])
# Play through up to the current time step
inSequence = True
for offset in range(startOffset, numPrevPatterns):
# Copy predicted and active states into t-1
self.lrnPredictedState['t-1'][:, :] = self.lrnPredictedState['t'][:, :]
self.lrnActiveState['t-1'][:, :] = self.lrnActiveState['t'][:, :]
# Get the input pattern
inputColumns = self._prevLrnPatterns[offset]
# Apply segment updates from the last set of predictions
if not readOnly:
self.processSegmentUpdates(inputColumns)
# Phase 1:
# Compute activeState[t] given bottom-up and predictedState[t-1]
if offset == startOffset:
self.lrnActiveState['t'].fill(0)
for c in inputColumns:
self.lrnActiveState['t'][c, 0] = 1
inSequence = True
else:
# Uses lrnActiveState['t-1'] and lrnPredictedState['t-1']
# computes lrnActiveState['t']
inSequence = self.learnPhase1(inputColumns, readOnly=readOnly)
# Break out immediately if we fell out of sequence or reached the current
# time step
if not inSequence or offset == currentTimeStepsOffset:
break
# Phase 2:
# Computes predictedState['t'] given activeState['t'] and also queues
# up active segments into self.segmentUpdates, unless this is readOnly
if self.verbosity >= 3:
print " backtrack: computing predictions from ", inputColumns
self.learnPhase2(readOnly=readOnly)
# Return whether or not this starting point was valid
return inSequence
def learnBacktrack(self):
"""
This "backtracks" our learning state, trying to see if we can lock onto
the current set of inputs by assuming the sequence started up to N steps
ago on start cells.
This will adjust @ref lrnActiveState['t'] if it does manage to lock on to a
sequence that started earlier.
@returns >0 if we managed to lock on to a sequence that started
earlier. The value returned is how many steps in the
past we locked on.
If 0 is returned, the caller needs to change active
state to start on start cells.
How it works:
-------------------------------------------------------------------
This method gets called from updateLearningState when we detect either of
the following two conditions:
-# Our PAM counter (@ref pamCounter) expired
-# We reached the max allowed learned sequence length
Either of these two conditions indicate that we want to start over on start
cells.
Rather than start over on start cells on the current input, we can
accelerate learning by backtracking a few steps ago and seeing if perhaps
a sequence we already at least partially know already started.
This updates/modifies:
- @ref lrnActiveState['t']
This trashes:
- @ref lrnActiveState['t-1']
- @ref lrnPredictedState['t']
- @ref lrnPredictedState['t-1']
"""
# How much input history have we accumulated?
# The current input is always at the end of self._prevInfPatterns (at
# index -1), and is not a valid startingOffset to evaluate.
numPrevPatterns = len(self._prevLrnPatterns) - 1
if numPrevPatterns <= 0:
if self.verbosity >= 3:
print "lrnBacktrack: No available history to backtrack from"
return False
# We will record which previous input patterns did not generate predictions
# up to the current time step and remove all the ones at the head of the
# input history queue so that we don't waste time evaluating them again at
# a later time step.
badPatterns = []
# Let's go back in time and replay the recent inputs from start cells and
# see if we can lock onto this current set of inputs that way.
#
# Start the farthest back and work our way forward. For each starting point,
# See if firing on start cells at that point would predict the current
# input.
#
# We want to pick the point farthest in the past that has continuity
# up to the current time step
inSequence = False
for startOffset in range(0, numPrevPatterns):
# Can we backtrack from startOffset?
inSequence = self.learnBacktrackFrom(startOffset, readOnly=True)
# Done playing through the sequence from starting point startOffset
# Break out as soon as we find a good path
if inSequence:
break
# Take this bad starting point out of our input history so we don't
# try it again later.
badPatterns.append(startOffset)
# If we failed to lock on at any starting point, return failure. The caller
# will start over again on start cells
if not inSequence:
if self.verbosity >= 3:
print ("Failed to lock on. Falling back to start cells on current "
"time step.")
# Nothing in our input history was a valid starting point, so get rid
# of it so we don't try any of them again at a later iteration
self._prevLrnPatterns = []
return False
# We did find a valid starting point in the past. Now, we need to
# re-enforce all segments that became active when following this path.
if self.verbosity >= 3:
print ("Discovered path to current input by using start cells from %d "
"steps ago:" % (numPrevPatterns - startOffset),
self._prevLrnPatterns[startOffset])
self.learnBacktrackFrom(startOffset, readOnly=False)
# Remove any useless patterns at the head of the input pattern history
# queue.
for i in range(numPrevPatterns):
if i in badPatterns or i <= startOffset:
if self.verbosity >= 3:
print ("Removing useless pattern from history:",
self._prevLrnPatterns[0])
self._prevLrnPatterns.pop(0)
else:
break
return numPrevPatterns - startOffset
def learnPhase1(self, activeColumns, readOnly=False):
"""
Compute the learning active state given the predicted state and
the bottom-up input.
@param activeColumns list of active bottom-ups
@param readOnly True if being called from backtracking logic.
This tells us not to increment any segment
duty cycles or queue up any updates.
@returns True if the current input was sufficiently predicted, OR
if we started over on startCells. False indicates that the current
input was NOT predicted, well enough to consider it as "inSequence"
This looks at:
- @ref lrnActiveState['t-1']
- @ref lrnPredictedState['t-1']
This modifies:
- @ref lrnActiveState['t']
- @ref lrnActiveState['t-1']
"""
# Save previous active state and start out on a clean slate
self.lrnActiveState['t'].fill(0)
# For each column, turn on the predicted cell. There will always be at most
# one predicted cell per column
numUnpredictedColumns = 0
for c in activeColumns:
predictingCells = numpy.where(self.lrnPredictedState['t-1'][c] == 1)[0]
numPredictedCells = len(predictingCells)
assert numPredictedCells <= 1
# If we have a predicted cell, turn it on. The segment's posActivation
# count will have already been incremented by processSegmentUpdates
if numPredictedCells == 1:
i = predictingCells[0]
self.lrnActiveState['t'][c, i] = 1
continue
numUnpredictedColumns += 1
if readOnly:
continue
# If no predicted cell, pick the closest matching one to reinforce, or
# if none exists, create a new segment on a cell in that column
i, s, numActive = self.getBestMatchingCell(
c, self.lrnActiveState['t-1'], self.minThreshold)
if s is not None and s.isSequenceSegment():
if self.verbosity >= 4:
print "Learn branch 0, found segment match. Learning on col=", c
self.lrnActiveState['t'][c, i] = 1
segUpdate = self.getSegmentActiveSynapses(
c, i, s, self.lrnActiveState['t-1'], newSynapses = True)
s.totalActivations += 1
# This will update the permanences, posActivationsCount, and the
# lastActiveIteration (age).
trimSegment = self.adaptSegment(segUpdate)
if trimSegment:
self.trimSegmentsInCell(c, i, [s], minPermanence = 0.00001,
minNumSyns = 0)
# If no close match exists, create a new one
else:
# Choose a cell in this column to add a new segment to
i = self.getCellForNewSegment(c)
if (self.verbosity >= 4):
print "Learn branch 1, no match. Learning on col=", c,
print ", newCellIdxInCol=", i
self.lrnActiveState['t'][c, i] = 1
segUpdate = self.getSegmentActiveSynapses(
c, i, None, self.lrnActiveState['t-1'], newSynapses=True)
segUpdate.sequenceSegment = True # Make it a sequence segment
self.adaptSegment(segUpdate) # No need to check whether perm reached 0
# Determine if we are out of sequence or not and reset our PAM counter
# if we are in sequence
numBottomUpColumns = len(activeColumns)
if numUnpredictedColumns < numBottomUpColumns / 2:
return True # in sequence
else:
return False # out of sequence
def learnPhase2(self, readOnly=False):
"""
Compute the predicted segments given the current set of active cells.
@param readOnly True if being called from backtracking logic.
This tells us not to increment any segment
duty cycles or queue up any updates.
This computes the lrnPredictedState['t'] and queues up any segments that
became active (and the list of active synapses for each segment) into
the segmentUpdates queue
This looks at:
- @ref lrnActiveState['t']
This modifies:
- @ref lrnPredictedState['t']
- @ref segmentUpdates
"""
# Clear out predicted state to start with
self.lrnPredictedState['t'].fill(0)
# Compute new predicted state. When computing predictions for
# phase 2, we predict at most one cell per column (the one with the best
# matching segment).
for c in xrange(self.numberOfCols):
# Is there a cell predicted to turn on in this column?
i, s, numActive = self.getBestMatchingCell(
c, self.lrnActiveState['t'], minThreshold = self.activationThreshold)
if i is None:
continue
# Turn on the predicted state for the best matching cell and queue
# the pertinent segment up for an update, which will get processed if
# the cell receives bottom up in the future.
self.lrnPredictedState['t'][c, i] = 1
if readOnly:
continue
# Queue up this segment for updating
segUpdate = self.getSegmentActiveSynapses(
c, i, s, activeState=self.lrnActiveState['t'],
newSynapses=(numActive < self.newSynapseCount))
s.totalActivations += 1 # increment totalActivations
self.addToSegmentUpdates(c, i, segUpdate)
if self.doPooling:
# creates a new pooling segment if no best matching segment found
# sum(all synapses) >= minThreshold, "weak" activation
predSegment = self.getBestMatchingSegment(c, i,
self.lrnActiveState['t-1'])
segUpdate = self.getSegmentActiveSynapses(c, i, predSegment,
self.lrnActiveState['t-1'], newSynapses=True)
self.addToSegmentUpdates(c, i, segUpdate)
def updateLearningState(self, activeColumns):
"""
Update the learning state. Called from compute() on every iteration
@param activeColumns List of active column indices
"""
# Copy predicted and active states into t-1
self.lrnPredictedState['t-1'][:, :] = self.lrnPredictedState['t'][:, :]
self.lrnActiveState['t-1'][:, :] = self.lrnActiveState['t'][:, :]
# Update our learning input history
if self.maxLrnBacktrack > 0:
if len(self._prevLrnPatterns) > self.maxLrnBacktrack:
self._prevLrnPatterns.pop(0)
self._prevLrnPatterns.append(activeColumns)
if self.verbosity >= 4:
print "Previous learn patterns: \n"
print self._prevLrnPatterns
# Process queued up segment updates, now that we have bottom-up, we
# can update the permanences on the cells that we predicted to turn on
# and did receive bottom-up
self.processSegmentUpdates(activeColumns)
# Decrement the PAM counter if it is running and increment our learned
# sequence length
if self.pamCounter > 0:
self.pamCounter -= 1
self.learnedSeqLength += 1
# Phase 1 - turn on the predicted cell in each column that received
# bottom-up. If there was no predicted cell, pick one to learn to.
if not self.resetCalled:
# Uses lrnActiveState['t-1'] and lrnPredictedState['t-1']
# computes lrnActiveState['t']
inSequence = self.learnPhase1(activeColumns)
# Reset our PAM counter if we are in sequence
if inSequence:
self.pamCounter = self.pamLength
# Print status of PAM counter, learned sequence length
if self.verbosity >= 3:
print "pamCounter = ", self.pamCounter, "seqLength = ", \
self.learnedSeqLength
# Start over on start cells if any of the following occur:
# 1.) A reset was just called
# 2.) We have been loo long out of sequence (the pamCounter has expired)
# 3.) We have reached maximum allowed sequence length.
#
# Note that, unless we are following a reset, we also just learned or
# re-enforced connections to the current set of active columns because
# this input is still a valid prediction to learn.
#
# It is especially helpful to learn the connections to this input when
# you have a maxSeqLength constraint in place. Otherwise, you will have
# no continuity at all between sub-sequences of length maxSeqLength.
if (self.resetCalled or self.pamCounter == 0 or
(self.maxSeqLength != 0 and
self.learnedSeqLength >= self.maxSeqLength)):
if self.verbosity >= 3:
if self.resetCalled:
print "Starting over:", activeColumns, "(reset was called)"
elif self.pamCounter == 0:
print "Starting over:", activeColumns, "(PAM counter expired)"
else:
print "Starting over:", activeColumns, "(reached maxSeqLength)"
# Update average learned sequence length - this is a diagnostic statistic
if self.pamCounter == 0:
seqLength = self.learnedSeqLength - self.pamLength
else:
seqLength = self.learnedSeqLength
if self.verbosity >= 3:
print " learned sequence length was:", seqLength
self._updateAvgLearnedSeqLength(seqLength)
# Backtrack to an earlier starting point, if we find one
backSteps = 0
if not self.resetCalled:
backSteps = self.learnBacktrack()
# Start over in the current time step if reset was called, or we couldn't
# backtrack.
if self.resetCalled or backSteps == 0:
self.lrnActiveState['t'].fill(0)
for c in activeColumns:
self.lrnActiveState['t'][c, 0] = 1
# Remove any old input history patterns
self._prevLrnPatterns = []
# Reset PAM counter
self.pamCounter = self.pamLength
self.learnedSeqLength = backSteps
# Clear out any old segment updates from prior sequences
self.segmentUpdates = {}
# Phase 2 - Compute new predicted state. When computing predictions for
# phase 2, we predict at most one cell per column (the one with the best
# matching segment).
self.learnPhase2()
def compute(self, bottomUpInput, enableLearn, computeInfOutput=None):
"""
Handle one compute, possibly learning.
@param bottomUpInput The bottom-up input, typically from a spatial pooler
@param enableLearn If true, perform learning
@param computeInfOutput If None, default behavior is to disable the inference
output when enableLearn is on.
If true, compute the inference output
If false, do not compute the inference output
@returns TODO: document
It is an error to have both enableLearn and computeInfOutput set to False
By default, we don't compute the inference output when learning because it
slows things down, but you can override this by passing in True for
computeInfOutput
"""
# As a speed optimization for now (until we need online learning), skip
# computing the inference output while learning
if computeInfOutput is None:
if enableLearn:
computeInfOutput = False
else:
computeInfOutput = True
assert (enableLearn or computeInfOutput)
# Get the list of columns that have bottom-up
activeColumns = bottomUpInput.nonzero()[0]
if enableLearn:
self.lrnIterationIdx += 1
self.iterationIdx += 1
if self.verbosity >= 3:
print "\n==== PY Iteration: %d =====" % (self.iterationIdx)
print "Active cols:", activeColumns
# Update segment duty cycles if we are crossing a "tier"
# We determine if it's time to update the segment duty cycles. Since the
# duty cycle calculation is a moving average based on a tiered alpha, it is
# important that we update all segments on each tier boundary
if enableLearn:
if self.lrnIterationIdx in Segment.dutyCycleTiers:
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
for segment in self.cells[c][i]:
segment.dutyCycle()
# Update the average input density
if self.avgInputDensity is None:
self.avgInputDensity = len(activeColumns)
else:
self.avgInputDensity = (0.99 * self.avgInputDensity +
0.01 * len(activeColumns))
# First, update the inference state
# As a speed optimization for now (until we need online learning), skip
# computing the inference output while learning
if computeInfOutput:
self.updateInferenceState(activeColumns)
# Next, update the learning state
if enableLearn:
self.updateLearningState(activeColumns)
# Apply global decay, and remove synapses and/or segments.
# Synapses are removed if their permanence value is <= 0.
# Segments are removed when they don't have synapses anymore.
# Removal of synapses can trigger removal of whole segments!
# todo: isolate the synapse/segment retraction logic so that
# it can be called in adaptSegments, in the case where we
# do global decay only episodically.
if self.globalDecay > 0.0 and ((self.lrnIterationIdx % self.maxAge) == 0):
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
segsToDel = [] # collect and remove outside the loop
for segment in self.cells[c][i]:
age = self.lrnIterationIdx - segment.lastActiveIteration
if age <= self.maxAge:
continue
synsToDel = [] # collect and remove outside the loop
for synapse in segment.syns:
synapse[2] = synapse[2] - self.globalDecay # decrease permanence
if synapse[2] <= 0:
synsToDel.append(synapse) # add to list to delete
# 1 for sequenceSegment flag
if len(synsToDel) == segment.getNumSynapses():
segsToDel.append(segment) # will remove the whole segment
elif len(synsToDel) > 0:
for syn in synsToDel: # remove some synapses on segment
segment.syns.remove(syn)
for seg in segsToDel: # remove some segments of this cell
self.cleanUpdatesList(c, i, seg)
self.cells[c][i].remove(seg)
# Teach the trivial predictors
if self.trivialPredictor is not None:
self.trivialPredictor.learn(activeColumns)
# Update the prediction score stats
# Learning always includes inference
if self.collectStats:
if computeInfOutput:
predictedState = self.infPredictedState['t-1']
else:
predictedState = self.lrnPredictedState['t-1']
self._updateStatsInferEnd(self._internalStats,
activeColumns,
predictedState,
self.colConfidence['t-1'])
# Make trivial predictions and collect stats
if self.trivialPredictor is not None:
for m in self.trivialPredictor.methods:
if computeInfOutput:
self.trivialPredictor.infer(activeColumns)
self._updateStatsInferEnd(
self.trivialPredictor._internalStats[m],
activeColumns,
self.trivialPredictor.predictedState[m]['t-1'],
self.trivialPredictor.confidence[m]['t-1'])
# Finally return the TP output
output = self.computeOutput()
# Print diagnostic information based on the current verbosity level
self.printComputeEnd(output, learn=enableLearn)
self.resetCalled = False
return output
def infer(self, bottomUpInput):
"""
@todo document
"""
return self.compute(bottomUpInput, enableLearn=False)
def learn(self, bottomUpInput, computeInfOutput=None):
"""
@todo document
"""
return self.compute(bottomUpInput, enableLearn=True,
computeInfOutput=computeInfOutput)
def updateSegmentDutyCycles(self):
"""
This gets called on every compute. It determines if it's time to
update the segment duty cycles. Since the duty cycle calculation is a
moving average based on a tiered alpha, it is important that we update
all segments on each tier boundary.
"""
if self.lrnIterationIdx not in [100, 1000, 10000]:
return
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
for segment in self.cells[c][i]:
segment.dutyCycle()
def columnConfidences(self, cellConfidences=None):
"""
Compute the column confidences given the cell confidences. If
None is passed in for cellConfidences, it uses the stored cell confidences
from the last compute.
@param cellConfidences Cell confidences to use, or None to use the
the current cell confidences.
@returns Column confidence scores
"""
return self.colConfidence['t']
def topDownCompute(self, topDownIn=None):
"""
Top-down compute - generate expected input given output of the TP
@param topDownIn top down input from the level above us
@returns best estimate of the TP input that would have generated bottomUpOut.
"""
# For now, we will assume there is no one above us and that bottomUpOut is
# simply the output that corresponds to our currently stored column
# confidences.
# Simply return the column confidences
return self.columnConfidences()
def trimSegmentsInCell(self, colIdx, cellIdx, segList, minPermanence,
minNumSyns):
"""
This method goes through a list of segments for a given cell and
deletes all synapses whose permanence is less than minPermanence and deletes
any segments that have less than minNumSyns synapses remaining.
@param colIdx Column index
@param cellIdx Cell index within the column
@param segList List of segment references
@param minPermanence Any syn whose permamence is 0 or < minPermanence will
be deleted.
@param minNumSyns Any segment with less than minNumSyns synapses remaining
in it will be deleted.
@returns tuple (numSegsRemoved, numSynsRemoved)
"""
# Fill in defaults
if minPermanence is None:
minPermanence = self.connectedPerm
if minNumSyns is None:
minNumSyns = self.activationThreshold
# Loop through all segments
nSegsRemoved, nSynsRemoved = 0, 0
segsToDel = [] # collect and remove segments outside the loop
for segment in segList:
# List if synapses to delete
synsToDel = [syn for syn in segment.syns if syn[2] < minPermanence]
if len(synsToDel) == len(segment.syns):
segsToDel.append(segment) # will remove the whole segment
else:
if len(synsToDel) > 0:
for syn in synsToDel: # remove some synapses on segment
segment.syns.remove(syn)
nSynsRemoved += 1
if len(segment.syns) < minNumSyns:
segsToDel.append(segment)
# Remove segments that don't have enough synapses and also take them
# out of the segment update list, if they are in there
nSegsRemoved += len(segsToDel)
for seg in segsToDel: # remove some segments of this cell
self.cleanUpdatesList(colIdx, cellIdx, seg)
self.cells[colIdx][cellIdx].remove(seg)
nSynsRemoved += len(seg.syns)
return nSegsRemoved, nSynsRemoved
def trimSegments(self, minPermanence=None, minNumSyns=None):
"""
This method deletes all synapses whose permanence is less than
minPermanence and deletes any segments that have less than
minNumSyns synapses remaining.
@param minPermanence Any syn whose permamence is 0 or < minPermanence will
be deleted. If None is passed in, then
self.connectedPerm is used.
@param minNumSyns Any segment with less than minNumSyns synapses remaining
in it will be deleted. If None is passed in, then
self.activationThreshold is used.
@returns tuple (numSegsRemoved, numSynsRemoved)
"""
# Fill in defaults
if minPermanence is None:
minPermanence = self.connectedPerm
if minNumSyns is None:
minNumSyns = self.activationThreshold
# Loop through all cells
totalSegsRemoved, totalSynsRemoved = 0, 0
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
(segsRemoved, synsRemoved) = self.trimSegmentsInCell(
colIdx=c, cellIdx=i, segList=self.cells[c][i],
minPermanence=minPermanence, minNumSyns=minNumSyns)
totalSegsRemoved += segsRemoved
totalSynsRemoved += synsRemoved
# Print all cells if verbosity says to
if self.verbosity >= 5:
print "Cells, all segments:"
self.printCells(predictedOnly=False)
return totalSegsRemoved, totalSynsRemoved
def cleanUpdatesList(self, col, cellIdx, seg):
"""
Removes any update that would be for the given col, cellIdx, segIdx.
NOTE: logically, we need to do this when we delete segments, so that if
an update refers to a segment that was just deleted, we also remove
that update from the update list. However, I haven't seen it trigger
in any of the unit tests yet, so it might mean that it's not needed
and that situation doesn't occur, by construction.
"""
# TODO: check if the situation described in the docstring above actually
# occurs.
for key, updateList in self.segmentUpdates.iteritems():
c, i = key[0], key[1]
if c == col and i == cellIdx:
for update in updateList:
if update[1].segment == seg:
self.removeSegmentUpdate(update)
def finishLearning(self):
"""
Called when learning has been completed. This method just calls
trimSegments(). (finishLearning is here for backward compatibility)
"""
# Keep weakly formed synapses around because they contain confidence scores
# for paths out of learned sequenced and produce a better prediction than
# chance.
self.trimSegments(minPermanence=0.0001)
# Update all cached duty cycles for better performance right after loading
# in the trained network.
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
for segment in self.cells[c][i]:
segment.dutyCycle()
# For error checking purposes, make sure no start cell has incoming
# connections
if self.cellsPerColumn > 1:
for c in xrange(self.numberOfCols):
assert self.getNumSegmentsInCell(c, 0) == 0
def checkPrediction2(self, patternNZs, output=None, colConfidence=None,
details=False):
"""
This function will replace checkPrediction.
This function produces goodness-of-match scores for a set of input patterns,
by checking for their presence in the current and predicted output of the
TP. Returns a global count of the number of extra and missing bits, the
confidence scores for each input pattern, and (if requested) the
bits in each input pattern that were not present in the TP's prediction.
@param patternNZs a list of input patterns that we want to check for. Each
element is a list of the non-zeros in that pattern.
@param output The output of the TP. If not specified, then use the
TP's current output. This can be specified if you are
trying to check the prediction metric for an output from
the past.
@param colConfidence The column confidences. If not specified, then use the
TP's current self.colConfidence. This can be specified if you
are trying to check the prediction metrics for an output
from the past.
@param details if True, also include details of missing bits per pattern.
@returns list containing:
[
totalExtras,
totalMissing,
[conf_1, conf_2, ...],
[missing1, missing2, ...]
]
@retval totalExtras a global count of the number of 'extras', i.e. bits that
are on in the current output but not in the or of all the
passed in patterns
@retval totalMissing a global count of all the missing bits, i.e. the bits
that are on in the or of the patterns, but not in the
current output
@retval conf_i the confidence score for the i'th pattern inpatternsToCheck
This consists of 3 items as a tuple:
(predictionScore, posPredictionScore, negPredictionScore)
@retval missing_i the bits in the i'th pattern that were missing
in the output. This list is only returned if details is
True.
"""
# TODO: Add option to check predictedState only.
# Get the non-zeros in each pattern
numPatterns = len(patternNZs)
# Compute the union of all the expected patterns
orAll = set()
orAll = orAll.union(*patternNZs)
# Get the list of active columns in the output
if output is None:
assert self.currentOutput is not None
output = self.currentOutput
output = set(output.sum(axis=1).nonzero()[0])
# Compute the total extra and missing in the output
totalExtras = len(output.difference(orAll))
totalMissing = len(orAll.difference(output))
# Get the percent confidence level per column by summing the confidence
# levels of the cells in the column. During training, each segment's
# confidence number is computed as a running average of how often it
# correctly predicted bottom-up activity on that column. A cell's
# confidence number is taken from the first active segment found in the
# cell. Note that confidence will only be non-zero for predicted columns.
if colConfidence is None:
colConfidence = self.colConfidence['t']
# Assign confidences to each pattern
confidences = []
for i in xrange(numPatterns):
# Sum of the column confidences for this pattern
positivePredictionSum = colConfidence[patternNZs[i]].sum()
# How many columns in this pattern
positiveColumnCount = len(patternNZs[i])
# Sum of all the column confidences
totalPredictionSum = colConfidence.sum()
# Total number of columns
totalColumnCount = len(colConfidence)
negativePredictionSum = totalPredictionSum - positivePredictionSum
negativeColumnCount = totalColumnCount - positiveColumnCount
# Compute the average confidence score per column for this pattern
if positiveColumnCount != 0:
positivePredictionScore = positivePredictionSum
else:
positivePredictionScore = 0.0
# Compute the average confidence score per column for the other patterns
if negativeColumnCount != 0:
negativePredictionScore = negativePredictionSum
else:
negativePredictionScore = 0.0
# Scale the positive and negative prediction scores so that they sum to
# 1.0
currentSum = negativePredictionScore + positivePredictionScore
if currentSum > 0:
positivePredictionScore *= 1.0/currentSum
negativePredictionScore *= 1.0/currentSum
predictionScore = positivePredictionScore - negativePredictionScore
confidences.append((predictionScore,
positivePredictionScore,
negativePredictionScore))
# Include detail? (bits in each pattern that were missing from the output)
if details:
missingPatternBits = [set(pattern).difference(output)
for pattern in patternNZs]
return (totalExtras, totalMissing, confidences, missingPatternBits)
else:
return (totalExtras, totalMissing, confidences)
def isSegmentActive(self, seg, activeState):
"""
A segment is active if it has >= activationThreshold connected
synapses that are active due to activeState.
Notes: studied various cutoffs, none of which seem to be worthwhile
list comprehension didn't help either
@param seg TODO: document
@param activeState TODO: document
"""
# Computing in C - *much* faster
return isSegmentActive(seg.syns, activeState,
self.connectedPerm, self.activationThreshold)
def getSegmentActivityLevel(self, seg, activeState,
connectedSynapsesOnly=False):
"""
This routine computes the activity level of a segment given activeState.
It can tally up only connected synapses (permanence >= connectedPerm), or
all the synapses of the segment, at either t or t-1.
@param seg TODO: document
@param activeState TODO: document
@param connectedSynapsesOnly TODO: document
"""
# Computing in C - *much* faster
return getSegmentActivityLevel(seg.syns, activeState, connectedSynapsesOnly,
self.connectedPerm)
def getBestMatchingCell(self, c, activeState, minThreshold):
"""
Find weakly activated cell in column with at least minThreshold active
synapses.
@param c which column to look at
@param activeState the active cells
@param minThreshold minimum number of synapses required
@returns tuple (cellIdx, segment, numActiveSynapses)
"""
# Collect all cells in column c that have at least minThreshold in the most
# activated segment
bestActivityInCol = minThreshold
bestSegIdxInCol = -1
bestCellInCol = -1
for i in xrange(self.cellsPerColumn):
maxSegActivity = 0
maxSegIdx = 0
for j, s in enumerate(self.cells[c][i]):
activity = self.getSegmentActivityLevel(s, activeState)
if activity > maxSegActivity:
maxSegActivity = activity
maxSegIdx = j
if maxSegActivity >= bestActivityInCol:
bestActivityInCol = maxSegActivity
bestSegIdxInCol = maxSegIdx
bestCellInCol = i
if bestCellInCol == -1:
return (None, None, None)
else:
return (bestCellInCol, self.cells[c][bestCellInCol][bestSegIdxInCol],
bestActivityInCol)
def getBestMatchingSegment(self, c, i, activeState):
"""
For the given cell, find the segment with the largest number of active
synapses. This routine is aggressive in finding the best match. The
permanence value of synapses is allowed to be below connectedPerm. The number
of active synapses is allowed to be below activationThreshold, but must be
above minThreshold. The routine returns the segment index. If no segments are
found, then an index of -1 is returned.
@param c TODO: document
@param i TODO: document
@param activeState TODO: document
"""
maxActivity, which = self.minThreshold, -1
for j, s in enumerate(self.cells[c][i]):
activity = self.getSegmentActivityLevel(s, activeState,
connectedSynapsesOnly=False)
if activity >= maxActivity:
maxActivity, which = activity, j
if which == -1:
return None
else:
return self.cells[c][i][which]
def getCellForNewSegment(self, colIdx):
"""
Return the index of a cell in this column which is a good candidate
for adding a new segment.
When we have fixed size resources in effect, we insure that we pick a
cell which does not already have the max number of allowed segments. If
none exists, we choose the least used segment in the column to re-allocate.
@param colIdx which column to look at
@returns cell index
"""
# Not fixed size CLA, just choose a cell randomly
if self.maxSegmentsPerCell < 0:
if self.cellsPerColumn > 1:
# Don't ever choose the start cell (cell # 0) in each column
i = self._random.getUInt32(self.cellsPerColumn-1) + 1
else:
i = 0
return i
# Fixed size CLA, choose from among the cells that are below the maximum
# number of segments.
# NOTE: It is important NOT to always pick the cell with the fewest number
# of segments. The reason is that if we always do that, we are more likely
# to run into situations where we choose the same set of cell indices to
# represent an 'A' in both context 1 and context 2. This is because the
# cell indices we choose in each column of a pattern will advance in
# lockstep (i.e. we pick cell indices of 1, then cell indices of 2, etc.).
candidateCellIdxs = []
if self.cellsPerColumn == 1:
minIdx = 0
maxIdx = 0
else:
minIdx = 1 # Don't include startCell in the mix
maxIdx = self.cellsPerColumn-1
for i in xrange(minIdx, maxIdx+1):
numSegs = len(self.cells[colIdx][i])
if numSegs < self.maxSegmentsPerCell:
candidateCellIdxs.append(i)
# If we found one, return with it. Note we need to use _random to maintain
# correspondence with CPP code.
if len(candidateCellIdxs) > 0:
#candidateCellIdx = random.choice(candidateCellIdxs)
candidateCellIdx = (
candidateCellIdxs[self._random.getUInt32(len(candidateCellIdxs))])
if self.verbosity >= 5:
print "Cell [%d,%d] chosen for new segment, # of segs is %d" % (
colIdx, candidateCellIdx, len(self.cells[colIdx][candidateCellIdx]))
return candidateCellIdx
# All cells in the column are full, find a segment to free up
candidateSegment = None
candidateSegmentDC = 1.0
# For each cell in this column
for i in xrange(minIdx, maxIdx+1):
# For each segment in this cell
for s in self.cells[colIdx][i]:
dc = s.dutyCycle()
if dc < candidateSegmentDC:
candidateCellIdx = i
candidateSegmentDC = dc
candidateSegment = s
# Free up the least used segment
if self.verbosity >= 5:
print ("Deleting segment #%d for cell[%d,%d] to make room for new "
"segment" % (candidateSegment.segID, colIdx, candidateCellIdx))
candidateSegment.debugPrint()
self.cleanUpdatesList(colIdx, candidateCellIdx, candidateSegment)
self.cells[colIdx][candidateCellIdx].remove(candidateSegment)
return candidateCellIdx
def getSegmentActiveSynapses(self, c, i, s, activeState, newSynapses=False):
"""
Return a segmentUpdate data structure containing a list of proposed
changes to segment s. Let activeSynapses be the list of active synapses
where the originating cells have their activeState output = 1 at time step
t. (This list is empty if s is None since the segment doesn't exist.)
newSynapses is an optional argument that defaults to false. If newSynapses
is true, then newSynapseCount - len(activeSynapses) synapses are added to
activeSynapses. These synapses are randomly chosen from the set of cells
that have learnState = 1 at timeStep.
@param c TODO: document
@param i TODO: document
@param s TODO: document
@param activeState TODO: document
@param newSynapses TODO: document
"""
activeSynapses = []
if s is not None: # s can be None, if adding a new segment
# Here we add *integers* to activeSynapses
activeSynapses = [idx for idx, syn in enumerate(s.syns) \
if activeState[syn[0], syn[1]]]
if newSynapses: # add a few more synapses
nSynapsesToAdd = self.newSynapseCount - len(activeSynapses)
# Here we add *pairs* (colIdx, cellIdx) to activeSynapses
activeSynapses += self.chooseCellsToLearnFrom(c, i, s, nSynapsesToAdd,
activeState)
# It's still possible that activeSynapses is empty, and this will
# be handled in addToSegmentUpdates
# NOTE: activeSynapses contains a mixture of integers and pairs of integers
# - integers are indices of synapses already existing on the segment,
# that we will need to update.
# - pairs represent source (colIdx, cellIdx) of new synapses to create on
# the segment
update = TP.SegmentUpdate(c, i, s, activeSynapses)
return update
def chooseCellsToLearnFrom(self, c, i, s, n, activeState):
"""
Choose n random cells to learn from.
This function is called several times while learning with timeStep = t-1, so
we cache the set of candidates for that case. It's also called once with
timeStep = t, and we cache that set of candidates.
@returns tuple (column index, cell index).
"""
if n <= 0:
return []
tmpCandidates = numpy.where(activeState == 1)
# Candidates can be empty at this point, in which case we return
# an empty segment list. adaptSegments will do nothing when getting
# that list.
if len(tmpCandidates[0]) == 0:
return []
if s is None: # new segment
cands = [syn for syn in zip(tmpCandidates[0], tmpCandidates[1])]
else:
# We exclude any synapse that is already in this segment.
synapsesAlreadyInSegment = set((syn[0], syn[1]) for syn in s.syns)
cands = [syn for syn in zip(tmpCandidates[0], tmpCandidates[1])
if (syn[0], syn[1]) not in synapsesAlreadyInSegment]
# If we have no more candidates than requested, return all of them,
# no shuffle necessary.
if len(cands) <= n:
return cands
if n == 1: # so that we don't shuffle if only one is needed
idx = self._random.getUInt32(len(cands))
return [cands[idx]] # col and cell idx in col
# If we need more than one candidate
indices = numpy.array([j for j in range(len(cands))], dtype='uint32')
tmp = numpy.zeros(min(n, len(indices)), dtype='uint32')
self._random.getUInt32Sample(indices, tmp, True)
return [cands[j] for j in tmp]
def processSegmentUpdates(self, activeColumns):
"""
Go through the list of accumulated segment updates and process them
as follows:
if the segment update is too old, remove the update
else if the cell received bottom-up, update its permanences
else if it's still being predicted, leave it in the queue
else remove it.
@param activeColumns TODO: document
"""
# The segmentUpdates dict has keys which are the column,cellIdx of the
# owner cell. The values are lists of segment updates for that cell
removeKeys = []
trimSegments = []
for key, updateList in self.segmentUpdates.iteritems():
# Get the column number and cell index of the owner cell
c, i = key[0], key[1]
# If the cell received bottom-up, update its segments
if c in activeColumns:
action = 'update'
# If not, either keep it around if it's still predicted, or remove it
else:
# If it is still predicted, and we are pooling, keep it around
if self.doPooling and self.lrnPredictedState['t'][c, i] == 1:
action = 'keep'
else:
action = 'remove'
# Process each segment for this cell. Each segment entry contains
# [creationDate, SegmentInfo]
updateListKeep = []
if action != 'remove':
for (createDate, segUpdate) in updateList:
if self.verbosity >= 4:
print "_nLrnIterations =", self.lrnIterationIdx,
print segUpdate
# If this segment has expired. Ignore this update (and hence remove it
# from list)
if self.lrnIterationIdx - createDate > self.segUpdateValidDuration:
continue
if action == 'update':
trimSegment = self.adaptSegment(segUpdate)
if trimSegment:
trimSegments.append((segUpdate.columnIdx, segUpdate.cellIdx,
segUpdate.segment))
else:
# Keep segments that haven't expired yet (the cell is still being
# predicted)
updateListKeep.append((createDate, segUpdate))
self.segmentUpdates[key] = updateListKeep
if len(updateListKeep) == 0:
removeKeys.append(key)
# Clean out empty segment updates
for key in removeKeys:
self.segmentUpdates.pop(key)
# Trim segments that had synapses go to 0
for (c, i, segment) in trimSegments:
self.trimSegmentsInCell(c, i, [segment], minPermanence = 0.00001,
minNumSyns = 0)
def adaptSegment(self, segUpdate):
"""
This function applies segment update information to a segment in a
cell.
Synapses on the active list get their permanence counts incremented by
permanenceInc. All other synapses get their permanence counts decremented
by permanenceDec.
We also increment the positiveActivations count of the segment.
@param segUpdate SegmentUpdate instance
@returns True if some synapses were decremented to 0 and the segment is a
candidate for trimming
"""
# This will be set to True if detect that any syapses were decremented to
# 0
trimSegment = False
# segUpdate.segment is None when creating a new segment
c, i, segment = segUpdate.columnIdx, segUpdate.cellIdx, segUpdate.segment
# update.activeSynapses can be empty.
# If not, it can contain either or both integers and tuples.
# The integers are indices of synapses to update.
# The tuples represent new synapses to create (src col, src cell in col).
# We pre-process to separate these various element types.
# synToCreate is not empty only if positiveReinforcement is True.
# NOTE: the synapse indices start at *1* to skip the segment flags.
activeSynapses = segUpdate.activeSynapses
synToUpdate = set([syn for syn in activeSynapses if type(syn) == int])
# Modify an existing segment
if segment is not None:
if self.verbosity >= 4:
print "Reinforcing segment #%d for cell[%d,%d]" % (segment.segID, c, i)
print " before:",
segment.debugPrint()
# Mark it as recently useful
segment.lastActiveIteration = self.lrnIterationIdx
# Update frequency and positiveActivations
segment.positiveActivations += 1 # positiveActivations += 1
segment.dutyCycle(active=True)
# First, decrement synapses that are not active
# s is a synapse *index*, with index 0 in the segment being the tuple
# (segId, sequence segment flag). See below, creation of segments.
lastSynIndex = len(segment.syns) - 1
inactiveSynIndices = [s for s in xrange(0, lastSynIndex+1) \
if s not in synToUpdate]
trimSegment = segment.updateSynapses(inactiveSynIndices,
-self.permanenceDec)
# Now, increment active synapses
activeSynIndices = [syn for syn in synToUpdate if syn <= lastSynIndex]
segment.updateSynapses(activeSynIndices, self.permanenceInc)
# Finally, create new synapses if needed
# syn is now a tuple (src col, src cell)
synsToAdd = [syn for syn in activeSynapses if type(syn) != int]
# If we have fixed resources, get rid of some old syns if necessary
if self.maxSynapsesPerSegment > 0 \
and len(synsToAdd) + len(segment.syns) > self.maxSynapsesPerSegment:
numToFree = (len(segment.syns) + len(synsToAdd) -
self.maxSynapsesPerSegment)
segment.freeNSynapses(numToFree, inactiveSynIndices, self.verbosity)
for newSyn in synsToAdd:
segment.addSynapse(newSyn[0], newSyn[1], self.initialPerm)
if self.verbosity >= 4:
print " after:",
segment.debugPrint()
# Create a new segment
else:
# (segID, sequenceSegment flag, frequency, positiveActivations,
# totalActivations, lastActiveIteration)
newSegment = Segment(tp=self, isSequenceSeg=segUpdate.sequenceSegment)
# numpy.float32 important so that we can match with C++
for synapse in activeSynapses:
newSegment.addSynapse(synapse[0], synapse[1], self.initialPerm)
if self.verbosity >= 3:
print "New segment #%d for cell[%d,%d]" % (self.segID-1, c, i),
newSegment.debugPrint()
self.cells[c][i].append(newSegment)
return trimSegment
def getSegmentInfo(self, collectActiveData = False):
"""Returns information about the distribution of segments, synapses and
permanence values in the current TP. If requested, also returns information
regarding the number of currently active segments and synapses.
@returns tuple described below:
(
nSegments,
nSynapses,
nActiveSegs,
nActiveSynapses,
distSegSizes,
distNSegsPerCell,
distPermValues,
distAges
)
@retval nSegments total number of segments
@retval nSynapses total number of synapses
@retval nActiveSegs total no. of active segments (0 if collectActiveData
is False)
@retval nActiveSynapses total no. of active synapses 0 if collectActiveData
is False
@retval distSegSizes a dict where d[n] = number of segments with n synapses
@retval distNSegsPerCell a dict where d[n] = number of cells with n segments
@retval distPermValues a dict where d[p] = number of synapses with perm = p/10
@retval distAges a list of tuples (ageRange, numSegments)
"""
nSegments, nSynapses = 0, 0
nActiveSegs, nActiveSynapses = 0, 0
distSegSizes, distNSegsPerCell = {}, {}
distPermValues = {} # Num synapses with given permanence values
numAgeBuckets = 20
distAges = []
ageBucketSize = int((self.lrnIterationIdx+20) / 20)
for i in range(numAgeBuckets):
distAges.append(['%d-%d' % (i*ageBucketSize, (i+1)*ageBucketSize-1), 0])
for c in xrange(self.numberOfCols):
for i in xrange(self.cellsPerColumn):
if len(self.cells[c][i]) > 0:
nSegmentsThisCell = len(self.cells[c][i])
nSegments += nSegmentsThisCell
if distNSegsPerCell.has_key(nSegmentsThisCell):
distNSegsPerCell[nSegmentsThisCell] += 1
else:
distNSegsPerCell[nSegmentsThisCell] = 1
for seg in self.cells[c][i]:
nSynapsesThisSeg = seg.getNumSynapses()
nSynapses += nSynapsesThisSeg
if distSegSizes.has_key(nSynapsesThisSeg):
distSegSizes[nSynapsesThisSeg] += 1
else:
distSegSizes[nSynapsesThisSeg] = 1
# Accumulate permanence value histogram
for syn in seg.syns:
p = int(syn[2]*10)
if distPermValues.has_key(p):
distPermValues[p] += 1
else:
distPermValues[p] = 1
# Accumulate segment age histogram
age = self.lrnIterationIdx - seg.lastActiveIteration
ageBucket = int(age/ageBucketSize)
distAges[ageBucket][1] += 1
# Get active synapse statistics if requested
if collectActiveData:
if self.isSegmentActive(seg, self.infActiveState['t']):
nActiveSegs += 1
for syn in seg.syns:
if self.activeState['t'][syn[0]][syn[1]] == 1:
nActiveSynapses += 1
return (nSegments, nSynapses, nActiveSegs, nActiveSynapses,
distSegSizes, distNSegsPerCell, distPermValues, distAges)
class Segment(object):
"""
The Segment class is a container for all of the segment variables and
the synapses it owns.
"""
## These are iteration count tiers used when computing segment duty cycle.
dutyCycleTiers = [0, 100, 320, 1000,
3200, 10000, 32000, 100000,
320000]
## This is the alpha used in each tier. dutyCycleAlphas[n] is used when
# `iterationIdx > dutyCycleTiers[n]`.
dutyCycleAlphas = [None, 0.0032, 0.0010, 0.00032,
0.00010, 0.000032, 0.00001, 0.0000032,
0.0000010]
def __init__(self, tp, isSequenceSeg):
self.tp = tp
self.segID = tp.segID
tp.segID += 1
self.isSequenceSeg = isSequenceSeg
self.lastActiveIteration = tp.lrnIterationIdx
self.positiveActivations = 1
self.totalActivations = 1
# These are internal variables used to compute the positive activations
# duty cycle.
# Callers should use dutyCycle()
self._lastPosDutyCycle = 1.0 / tp.lrnIterationIdx
self._lastPosDutyCycleIteration = tp.lrnIterationIdx
# Each synapse is a tuple (srcCellCol, srcCellIdx, permanence)
self.syns = []
def __ne__(self, s):
return not self == s
def __eq__(self, s):
d1 = self.__dict__
d2 = s.__dict__
if set(d1) != set(d2):
return False
for k, v in d1.iteritems():
if k in ('tp',):
continue
elif v != d2[k]:
return False
return True
def dutyCycle(self, active=False, readOnly=False):
"""Compute/update and return the positive activations duty cycle of
this segment. This is a measure of how often this segment is
providing good predictions.
@param active True if segment just provided a good prediction
@param readOnly If True, compute the updated duty cycle, but don't change
the cached value. This is used by debugging print statements.
@returns The duty cycle, a measure of how often this segment is
providing good predictions.
**NOTE:** This method relies on different schemes to compute the duty cycle
based on how much history we have. In order to support this tiered
approach **IT MUST BE CALLED ON EVERY SEGMENT AT EACH DUTY CYCLE TIER**
(@ref dutyCycleTiers).
When we don't have a lot of history yet (first tier), we simply return
number of positive activations / total number of iterations
After a certain number of iterations have accumulated, it converts into
a moving average calculation, which is updated only when requested
since it can be a bit expensive to compute on every iteration (it uses
the pow() function).
The duty cycle is computed as follows:
dc[t] = (1-alpha) * dc[t-1] + alpha * value[t]
If the value[t] has been 0 for a number of steps in a row, you can apply
all of the updates at once using:
dc[t] = (1-alpha)^(t-lastT) * dc[lastT]
We use the alphas and tiers as defined in @ref dutyCycleAlphas and
@ref dutyCycleTiers.
"""
# For tier #0, compute it from total number of positive activations seen
if self.tp.lrnIterationIdx <= self.dutyCycleTiers[1]:
dutyCycle = float(self.positiveActivations) \
/ self.tp.lrnIterationIdx
if not readOnly:
self._lastPosDutyCycleIteration = self.tp.lrnIterationIdx
self._lastPosDutyCycle = dutyCycle
return dutyCycle
# How old is our update?
age = self.tp.lrnIterationIdx - self._lastPosDutyCycleIteration
# If it's already up to date, we can returned our cached value.
if age == 0 and not active:
return self._lastPosDutyCycle
# Figure out which alpha we're using
for tierIdx in range(len(self.dutyCycleTiers)-1, 0, -1):
if self.tp.lrnIterationIdx > self.dutyCycleTiers[tierIdx]:
alpha = self.dutyCycleAlphas[tierIdx]
break
# Update duty cycle
dutyCycle = pow(1.0-alpha, age) * self._lastPosDutyCycle
if active:
dutyCycle += alpha
# Update cached values if not read-only
if not readOnly:
self._lastPosDutyCycleIteration = self.tp.lrnIterationIdx
self._lastPosDutyCycle = dutyCycle
return dutyCycle
def debugPrint(self):
"""Print segment information for verbose messaging and debugging.
This uses the following format:
ID:54413 True 0.64801 (24/36) 101 [9,1]0.75 [10,1]0.75 [11,1]0.75
where:
54413 - is the unique segment id
True - is sequence segment
0.64801 - moving average duty cycle
(24/36) - (numPositiveActivations / numTotalActivations)
101 - age, number of iterations since last activated
[9,1]0.75 - synapse from column 9, cell #1, strength 0.75
[10,1]0.75 - synapse from column 10, cell #1, strength 0.75
[11,1]0.75 - synapse from column 11, cell #1, strength 0.75
"""
# Segment ID
print "ID:%-5d" % (self.segID),
# Sequence segment or pooling segment
if self.isSequenceSeg:
print "True",
else:
print "False",
# Duty cycle
print "%9.7f" % (self.dutyCycle(readOnly=True)),
# numPositive/totalActivations
print "(%4d/%-4d)" % (self.positiveActivations,
self.totalActivations),
# Age
print "%4d" % (self.tp.lrnIterationIdx - self.lastActiveIteration),
# Print each synapses on this segment as: srcCellCol/srcCellIdx/perm
# if the permanence is above connected, put [] around the synapse info
# For aid in comparing to the C++ implementation, print them in sorted
# order
sortedSyns = sorted(self.syns)
for _, synapse in enumerate(sortedSyns):
print "[%d,%d]%4.2f" % (synapse[0], synapse[1], synapse[2]),
print
def isSequenceSegment(self):
return self.isSequenceSeg
def getNumSynapses(self):
return len(self.syns)
def freeNSynapses(self, numToFree, inactiveSynapseIndices, verbosity= 0):
"""Free up some synapses in this segment. We always free up inactive
synapses (lowest permanence freed up first) before we start to free up
active ones.
@param numToFree number of synapses to free up
@param inactiveSynapseIndices list of the inactive synapse indices.
"""
# Make sure numToFree isn't larger than the total number of syns we have
assert (numToFree <= len(self.syns))
if (verbosity >= 4):
print "\nIn PY freeNSynapses with numToFree =", numToFree,
print "inactiveSynapseIndices =",
for i in inactiveSynapseIndices:
print self.syns[i][0:2],
print
# Remove the lowest perm inactive synapses first
if len(inactiveSynapseIndices) > 0:
perms = numpy.array([self.syns[i][2] for i in inactiveSynapseIndices])
candidates = numpy.array(inactiveSynapseIndices)[
perms.argsort()[0:numToFree]]
candidates = list(candidates)
else:
candidates = []
# Do we need more? if so, remove the lowest perm active synapses too
if len(candidates) < numToFree:
activeSynIndices = [i for i in xrange(len(self.syns))
if i not in inactiveSynapseIndices]
perms = numpy.array([self.syns[i][2] for i in activeSynIndices])
moreToFree = numToFree - len(candidates)
moreCandidates = numpy.array(activeSynIndices)[
perms.argsort()[0:moreToFree]]
candidates += list(moreCandidates)
if verbosity >= 4:
print "Deleting %d synapses from segment to make room for new ones:" % (
len(candidates)), candidates
print "BEFORE:",
self.debugPrint()
# Free up all the candidates now
synsToDelete = [self.syns[i] for i in candidates]
for syn in synsToDelete:
self.syns.remove(syn)
if verbosity >= 4:
print "AFTER:",
self.debugPrint()
def addSynapse(self, srcCellCol, srcCellIdx, perm):
"""Add a new synapse
@param srcCellCol source cell column
@param srcCellIdx source cell index within the column
@param perm initial permanence
"""
self.syns.append([int(srcCellCol), int(srcCellIdx), numpy.float32(perm)])
def updateSynapses(self, synapses, delta):
"""Update a set of synapses in the segment.
@param tp The owner TP
@param synapses List of synapse indices to update
@param delta How much to add to each permanence
@returns True if synapse reached 0
"""
reached0 = False
if delta > 0:
for synapse in synapses:
self.syns[synapse][2] = newValue = self.syns[synapse][2] + delta
# Cap synapse permanence at permanenceMax
if newValue > self.tp.permanenceMax:
self.syns[synapse][2] = self.tp.permanenceMax
else:
for synapse in synapses:
self.syns[synapse][2] = newValue = self.syns[synapse][2] + delta
# Cap min synapse permanence to 0 in case there is no global decay
if newValue <= 0:
self.syns[synapse][2] = 0
reached0 = True
return reached0
# This is necessary for unpickling objects that have instances of the nested
# class since the loading process looks for the class at the top level of the
# module.
SegmentUpdate = TP.SegmentUpdate
| Petr-Kovalev/nupic-win32 | py/nupic/research/TP.py | Python | gpl-3.0 | 132,913 |
__author__ = 'LT'
import numpy as np
import cvxopt.solvers
import kernel
from time import gmtime, strftime
from sklearn.metrics.pairwise import pairwise_kernels
from numpy import vstack, hstack, ones, zeros, absolute, where, divide, inf, \
delete, outer, transpose, diag, tile, arange, concatenate, empty, unique, round, amin
from numpy.linalg import inv, eig, solve
import data
#from profilehooks import profile
import sys
#Trains an SVM
class OCSVM(object):
# define global variables
_rho = None
_v = None
_gamma = None
_a_history = False
#Class constructor: kernel function & nu & sigma
def __init__(self, metric, nu, gamma, e=None):
self._v = nu
self._gamma = gamma
self._data = data.Data()
if e is not None:
self._data.set_e(e)
#returns trained SVM rho given features (X)
# Please check libsvm what they provide for output, e.g., I need to access to the sv_idx all the time
def fit(self, X, scale=1, v_target=None, rho=True):
self._data.set_X(X)
self._data.set_C(1/(self._v * len(X)) * scale)
# get lagrangian multiplier
alpha = self.alpha(X, scale, v_target)
self._data.set_alpha(alpha)
# defines necessary parameter for prediction
if rho: self.rho()
#returns SVM prediction with given X and langrange mutlipliers
def rho(self):
# compute rho assuming non zero rho, take average rho!
Xs = self._data.Xs()
if self._data.K_X() != None:
inds = self._data.get_sv()
K_X_Xs = self._data.K_X()[:, inds]
else:
K_X_Xs = self.gram(self._data.X(), self._data.Xs())
rho_all = self._data.alpha().dot(K_X_Xs)
self._rho = np.mean(rho_all)
#compute Gram matrix
def gram(self, X, Y=None):
return pairwise_kernels(X, Y, "rbf", gamma=self._gamma)
#compute Lagrangian multipliers
def alpha(self, X, scale = 1, v_target = None):
n_samples, n_features = X.shape
K = 2 * self.gram(X)
P = cvxopt.matrix(K)
q = cvxopt.matrix(zeros(n_samples))
A = cvxopt.matrix(ones((n_samples, 1)), (1, n_samples))
if v_target == None:
b = cvxopt.matrix(self._v * n_samples)
else:
b = cvxopt.matrix(v_target * n_samples)
G_1 = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))
h_1 = cvxopt.matrix(np.zeros(n_samples))
G_2 = cvxopt.matrix(np.diag(np.ones(n_samples)))
h_2 = cvxopt.matrix(np.ones(n_samples) * 1/(self._v*len(X)) * scale)
G = cvxopt.matrix(np.vstack((G_1, G_2)))
h = cvxopt.matrix(np.vstack((h_1, h_2)))
cvxopt.solvers.options['show_progress'] = False
solution = cvxopt.solvers.qp(P, q, G, h, A, b)
return np.ravel(solution['x'])
# Returns SVM predicton given feature vector
def predict(self, x):
result = np.sign(self.decision_function(x))
result[result == 0] = 1
return result
# Returns distance to boundary
def decision_function(self, x):
return - self._rho + self._data.alpha().dot(self.gram(self._data.X(), x))
#@profile
def increment_supervised(self, Xc, labels, init_ac=0):
#print "semi supervised"
# epsilon
e = self._data._e
drop = 0
# initialize existing X, coefficients a, C
X_origin = self._data.X()
K_X_origin = self._data.K_X()
n_data = X_origin.shape[0]
n_feature = X_origin.shape[1]
C = self._data.C()
a_origin = self._data.alpha()
# number of new incremental points
n_new = Xc.shape[0]
# number of all (new and existing) points
n_all = n_data + n_new
# concatenate all new points with all existing points
X = empty((n_new + n_data, n_feature))
X[0:n_new, :] = Xc
X[n_new:, :] = X_origin
# create gram matrix for all new and existing points
# create of all data points
if K_X_origin == None:
K_X = self.gram(X)
# create gram matrix for new points and add to existing ones
else:
K_X = empty((n_all, n_all))
K_X[n_new:, n_new:] = K_X_origin
K_X_new = self.gram(Xc, concatenate((Xc,X_origin), axis=0))
K_X[0:n_new, :] = K_X_new
K_X[:, 0:n_new] = K_X_new.T
# creating coefficient vector alpha for all data points
a = empty(n_all)
a[n_new:] = a_origin
a[:n_new] = init_ac
# creating gradient vector
g = zeros(n_all)
# create sensitivity vector
gamma = empty(n_all)
if labels is None:
labels = zeros(n_new)
restart = False
save_state = False
# loop through all new points to increment
for x_count in range(n_new):
#print "--------- START %s ---------" % x_count
#print "dropped: %s" % drop
# initialize X, a, and kernel matrices
start_origin = n_new - x_count
start_new = start_origin - 1
K_X_start_new = K_X[start_new:]
K_X_start_origin = K_X[:, start_origin:]
a_origin = a[start_origin:]
label = labels[x_count]
# initalize indices for bookkeeping
if restart:
restart = False
ls = len(inds) # support vectors length
lr = len(indr) # error and non-support vectors length
le = len(inde) # error vectors lenght
lo = len(indo)
l = ls + lr
# calculate mu according to KKT-conditions
mu = - K_X_start_origin[inds[0]].dot(a_origin)
if x_count == 0:
r = range(start_origin, n_all)
inds = [i for i in r if e < a[i] < C - e]
indr = [i for i in r if i not in inds]
inde = [i for i in indr if a[i] > e]
indo = [i for i in indr if a[i] <= e]
ls = len(inds) # support vectors length
lr = len(indr) # error and non-support vectors length
le = len(inde) # error vectors lenght
lo = len(indo)
l = ls + lr
# calculate mu according to KKT-conditions
mu = - K_X_start_origin[inds[0]].dot(a_origin)
# calculate gradient of error and non-support vectors
g[inds] = 0
if lr > 0:
g[indr] = K_X_start_origin[indr].dot(a_origin) + mu
Qs = ones((l+1, ls+1))
Qs[:,1:] = K_X_start_new[:, inds]
else:
l = ls + lr
if ls > 0:
Qs = concatenate(([K_X_start_new[0, [start_new] + inds]], Qs), axis=0)
else:
Qs = concatenate(([1], Qs), axis=1)
# calculate gradient of error and non-support vectors
c_inds = [start_new] + inds
# only calculate gradient if there are support vectors
if ls > 0:
gc = K_X_start_origin[start_new].dot(a_origin) + mu
else:
#print "Semisupervised Error: No support vectors to train!"
return False
ac = a[start_new]
if x_count == 0:
Q = ones((ls+1, ls+1))
Q[0, 0] = 0
inds_row = [[i] for i in inds]
Q[1:, 1:] = K_X[inds_row, inds]
try:
R = inv(Q)
except np.linalg.linalg.LinAlgError:
x = 1e-11
found = False
print "singular matrix"
while not found:
try:
R = inv(Q + diag(ones(ls+1) * x))
found = True
except np.linalg.linalg.LinAlgError:
x = x*10
loop_count = 0
# supervised label
if label != 0:
gc = K_X_start_origin[start_new].dot(a_origin) + mu
# normal data point
if label == 1:
ac_new = 0
if gc >= e:
#print "drop 1"
# all good, nothing to do
# but saving the gradient of new data point
g[start_new] = gc
indr.append(start_new)
indo.append(start_new)
lr += 1
lo += 1
continue
else:
# continue with incremental
# learning, save previous state
# for unlearning
save_state = True
# anomaly
else:
ac_new = 1
if gc < e:
# continue with incremental
# learning, save previous state
# for unlearning
save_state = True
else:
# drop this data point
#print "drop 2"
drop += 1
n_all -= 1
X = delete(X, start_new, axis=0)
K_X = delete(K_X, start_new, axis=0)
K_X = delete(K_X, start_new, axis=1)
inds = [i - 1 for i in inds]
indr = [i - 1 for i in indr]
inde = [i - 1 for i in inde]
indo = [i - 1 for i in indo]
a = delete(a, start_new)
g=delete(g, start_new)
gamma=delete(gamma, start_new)
Qs = Qs[1:,:]
restart = True
continue
# saving necessary variables to undo learning:
if save_state:
R_save = R
a_save = a
g_save = g
Qs_save = Qs[1:,:]
indices = [list(inds), list(indr), list(inde), list(indo)]
# unsupervised label
while gc < e and ac < C - e:
loop_count += 1
#print "-------------------- incremental %s-%s ---------" % (x_count, loop_count)
#calculate beta
if ls > 0:
beta = - R.dot(K_X_start_new[0,c_inds])
#print R
#print K_X_start_new[0,c_inds]
betas = beta[1:]
# calculate gamma
if lr > 0 and ls > 0:
# non-empty R and S set
gamma[start_new:] = Qs.dot(beta) + K_X_start_new[:, start_new]
gammac = gamma[start_new]
ggamma = divide(-g, gamma)
elif ls > 0:
# empty R set
gammac = K_X_start_new[0, c_inds].dot(beta) + 1
else:
# empty S set
gammac = 1
gamma[indr] = 1
ggamma = -g
# accounting
#case 1: Some alpha_i in S reaches a bound
if ls > 0:
gsmax = - a[inds]
gsmax[betas > e] += C
#print gsmax
gsmax = divide(gsmax, betas)
#print betas
# only consider positive increment weights
gsmax[absolute(betas) <= e] = inf
gsmin = min(absolute(gsmax))
if gsmin != inf:
ismin = where(absolute(gsmax) == gsmin)[0][0]
#print "----"
else: gsmin = inf
#case 2: Some g_i in E reaches zero
if le > 0:
# only consider positive margin sensitivity for points in E
gec = ggamma[inde]
# only consider positive increment weights
gec[gec <= e] = inf
gemin = min(gec)
if gemin < inf:
iemin = where(gec == gemin)[0][0]
else: gemin = inf
#case 2: Some g_i in O reaches zero
if lo > 0 and ls > 0:
# only consider positive margin sensitivity for points in E
goc = ggamma[indo]
# only consider positive increment weights
goc[goc <= e] = inf
# find minimum and index of it
gomin = min(goc)
if gomin < inf:
iomin = where(goc == gomin)[0][0]
else: gomin = inf
# case 3: gc becomes zero => algorithm converges
if gammac > e: gcmin = - gc/gammac
else: gcmin = inf
# case 4: ac becomes an error vector => algorithm converges
if ls > 0: gacmin = C - ac
else: gacmin = inf
# determine minimum largest increment
all_deltas = [gsmin, gemin, gomin, gcmin, gacmin]
gmin = min(all_deltas)
imin = where(all_deltas == gmin)[0][0]
# update a, g
if ls > 0:
mu += beta[0]*gmin
ac += gmin
a[inds] += betas*gmin
else:
mu += gmin
if lr > 0:
g += gamma * gmin
gc += gammac * gmin
if imin == 0: # min = gsmin => move k from s to r
ak = a[inds][ismin]
ind_del = inds[ismin]
#bookkeeping
inds.remove(ind_del)
indr.append(ind_del)
if ak < e:
indo.append(ind_del)
lo += 1
else:
inde.append(ind_del)
le += 1
lr += 1
c_inds = [start_new] + inds
ismin += 1
#decrement R, delete row ismin and column ismin
if ls > 2:
### R update
R_new = zeros((ls,ls))
R_new[0:ismin, 0:ismin] = R[0:ismin, 0:ismin]
R_new[ismin:, 0:ismin] = R[ismin+1:,0:ismin]
R_new[0:ismin, ismin:] = R[0:ismin, ismin+1:]
R_new[ismin:, ismin:] = R[ismin+1:, ismin+1:]
betak = R[ismin,
[i for i in range(ls+1) if i != ismin]]
R_new -= outer(betak, betak)/R[ismin,ismin]
R = R_new
# update Qs for gamma
Qs_new = ones((l+1, ls))
Qs_new[:, :ismin] = Qs[:,:ismin]
Qs_new[:, ismin:] = Qs[:,ismin+1:]
Qs = Qs_new
elif ls == 2:
### R update
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
# update Qs for gamma
Qs_new = ones((l+1, ls))
Qs_new[:, :ismin] = Qs[:,:ismin]
Qs_new[:, ismin:] = Qs[:,ismin+1:]
Qs = Qs_new
else:
Qs = ones(l+1)
R = inf
ls -= 1
elif imin == 1:
ind_del = inde[iemin]
if ls > 0:
nk = K_X[[ind_del], [ind_del] + inds]
betak = - R.dot(nk)
k = 1 - nk.dot(R).dot(nk)
if k == 0:
k = 0.001
betak1 = ones(ls + 2)
betak1[:-1] = betak
R_old = R
R = 1/k * outer(betak1, betak1)
R[:-1,:-1] += R_old
else:
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
inds.append(ind_del)
c_inds = [start_new] + inds
indr.remove(ind_del)
inde.remove(ind_del)
ls += 1
lr -= 1
le -= 1
if ls > 1:
Qs_new = ones((l+1, ls+1))
Qs_new[:, :-1] = Qs
Qs_new[:, ls] = K_X_start_new[:, ind_del]
Qs = Qs_new
else:
Qs_new = ones((l+1, 2))
Qs_new[:, 1] = K_X_start_new[:, ind_del]
Qs = Qs_new
elif imin == 2: # min = gemin | gomin => move k from r to s
# delete the elements from X,a and g => add it to the end of X,a,g
#ind_del = np.asarray(indo)[Io_minus][iomin]
ind_del = indo[iomin]
if ls > 0:
nk = K_X[[ind_del], [ind_del] + inds]
betak = - R.dot(nk)
k = 1 - nk.dot(R).dot(nk)
# if k = 0
if k == 0:
k = 0.001
betak1 = ones(ls+2)
betak1[:-1] = betak
R_old = R
R = 1/k * outer(betak1, betak1)
R[:-1,:-1] += R_old
else:
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
indo.remove(ind_del)
indr.remove(ind_del)
inds.append(ind_del)
c_inds = [start_new] + inds
lo -= 1
lr -= 1
ls += 1
if ls > 1:
Qs_new = ones((l+1, ls+1))
Qs_new[:, :-1] = Qs
Qs_new[:, ls] = K_X_start_new[:, ind_del]
Qs = Qs_new
else:
Qs_new = ones((l+1, 2))
Qs_new[:, 1] = K_X_start_new[:, ind_del]
Qs = Qs_new
elif imin == 3:
break
else:
break
#loop_count += 1
if save_state:
save_state = False
# normal data
if label == 1:
if ac > C - e:
restart = True
# anomaly
else:
if ac <= C - e:
restart = True
if restart:
drop += 1
n_all -= 1
K_X = delete(K_X, start_new, axis=0)
K_X = delete(K_X, start_new, axis=1)
X = delete(X, start_new, axis=0)
gamma = delete(gamma, start_new)
R = R_save
a = a_save
a = delete(a, start_new)
g = g_save
g = delete(g, start_new)
Qs = Qs_save
inds, indr, inde, indo = indices
inds = [i - 1 for i in inds]
indr = [i - 1 for i in indr]
inde = [i - 1 for i in inde]
indo = [i - 1 for i in indo]
continue
a[start_new] = ac
g[start_new] = gc
if ac < e:
indr.append(start_new)
indo.append(start_new)
lr += 1
lo += 1
elif ac > C - e:
indr.append(start_new)
inde.append(start_new)
lr += 1
le += 1
else:
inds.append(start_new)
g[start_new] = 0
if len(inds) == 1:
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
else:
if R.shape[0] != len(inds) + 1:
nk = ones(ls+1)
nk[1:] = K_X_start_new[[0], inds[:-1]]
betak = - R.dot(nk)
k = 1 - nk.dot(R).dot(nk)
betak1 = ones(ls + 2)
betak1[:-1] = betak
R_old = R
R = 1/k * outer(betak1, betak1)
R[:-1,:-1] += R_old
if ls > 0:
Qs = concatenate((Qs,K_X_start_new[:, start_new].reshape((l+1,1))), axis=1)
else:
Qs = concatenate((Qs.reshape((l+1,1)),K_X_start_new[:, start_new].reshape((l+1,1))), axis=1)
Qs[:, 1] = 1
ls += 1
# update X, a
self._data.set_X(X)
self._data.set_alpha(a)
self._data.set_C(C)
self._data.set_K_X(K_X)
self._rho = -1 * mu
#print "dropped: %s" % drop
return True
def increment(self, Xc, init_ac=0):
#print "increment"
#print Xc.shape
# epsilon
e = self._data._e
# initialize existing X, coefficients a, C
X_origin = self._data.X()
K_X_origin = self._data.K_X()
n_data = X_origin.shape[0]
n_feature = X_origin.shape[1]
C = self._data.C()
a_origin = self._data.alpha()
# number of new incremental points
n_new = Xc.shape[0]
# number of all (new and existing) points
n_all = n_data + n_new
# concatenate all new points with all existing points
X = empty((n_new + n_data, n_feature))
X[0:n_new, :] = Xc
X[n_new:, :] = X_origin
# create gram matrix for all new and existing points
# create of all data points
if K_X_origin == None:
K_X = self.gram(X)
# create gram matrix for new points and add to existing ones
else:
K_X = empty((n_all, n_all))
K_X[n_new:, n_new:] = K_X_origin
K_X_new = self.gram(Xc, X_origin)
K_X[0:n_new, :] = K_X_new
K_X[:, 0:n_new] = K_X_new.T
# creating coefficient vector alpha for all data points
a = empty(n_all)
a[n_new:] = a_origin
a[:n_new] = init_ac
# creating gradient vector
g = zeros(n_all)
# create sensitivity vector
gamma = empty(n_all)
restart = False
save_state = False
drop = 0
# loop through all new points to increment
for x_count in range(n_new):
#print "--------- START %s ---------" % x_count
# initialize X, a, and kernel matrices
start_origin = n_new - x_count
start_new = start_origin - 1
K_X_start_new = K_X[start_new:]
K_X_start_origin = K_X[:, start_origin:]
a_origin = a[start_origin:]
# initalize indices for bookkeeping
if x_count == 0 or restart:
r = range(n_new, n_all)
inds = [i for i in r if e < a[i] < C - e]
indr = [i for i in r if i not in inds]
inde = [i for i in indr if a[i] > e]
indo = [i for i in indr if a[i] <= e]
ls = len(inds) # support vectors length
lr = len(indr) # error and non-support vectors length
le = len(inde) # error vectors lenght
lo = len(indo)
l = ls + lr
# calculate mu according to KKT-conditions
if ls == 0:
return False
mu = - K_X_start_origin[inds[0]].dot(a_origin)
# calculate gradient of error and non-support vectors
g[inds] = 0
if lr > 0:
g[indr] = K_X_start_origin[indr].dot(a_origin) + mu
Qs = ones((l+1, ls+1))
Qs[:,1:] = K_X_start_new[:, inds]
restart = False
else:
l += 1
if ls > 0:
Qs = concatenate(([K_X_start_new[0, [start_new] + inds]], Qs), axis=0)
c_inds = [start_new] + inds
# only calculate gradient if there are support vectors
if ls > 0:
gc = K_X_start_origin[start_new].dot(a_origin) + mu
else:
print "Error: No support vectors to train!"
return False
ac = a[start_new]
if x_count == 0:
Q = ones((ls+1, ls+1))
Q[0, 0] = 0
inds_row = [[i] for i in inds]
Q[1:, 1:] = K_X[inds_row, inds]
try:
R = inv(Q)
except np.linalg.linalg.LinAlgError:
x = 1e-11
found = False
#print "singular matrix"
while not found:
try:
R = inv(Q + diag(ones(ls+1) * x))
found = True
except np.linalg.linalg.LinAlgError:
x = x*10
loop_count = 0
# unsupervised label
while gc < e and ac < C - e:
loop_count += 1
#print "-------------------- incremental %s-%s ---------" % (x_count, loop_count)
#calculate beta
if ls > 0:
beta = - R.dot(K_X_start_new[0,c_inds])
#if x_count == 303:
# print "ls: %s" % ls
# print R
#print K_X_start_new[0,c_inds]
betas = beta[1:]
# calculate gamma
if lr > 0 and ls > 0:
# non-empty R and S set
gamma[start_new:] = Qs.dot(beta) + K_X_start_new[:, start_new]
gammac = gamma[start_new]
ggamma = divide(-g, gamma)
elif ls > 0:
# empty R set
gammac = K_X_start_new[0, c_inds].dot(beta) + 1
else:
# empty S set
gammac = 1
gamma[indr] = 1
ggamma = -g
# accounting
#case 1: Some alpha_i in S reaches a bound
if ls > 0:
gsmax = - a[inds]
gsmax[betas > e] += C
#print gsmax
gsmax = divide(gsmax, betas)
#print betas
# only consider positive increment weights
gsmax[absolute(betas) <= e] = inf
gsmin = min(absolute(gsmax))
#print "gsmin: %s" % gsmin
if gsmin != inf:
ismin = where(absolute(gsmax) == gsmin)[0][0]
#print "----"
else: gsmin = inf
#case 2: Some g_i in E reaches zero
if le > 0:
# only consider positive margin sensitivity for points in E
gec = ggamma[inde]
# only consider positive increment weights
gec[gec <= e] = inf
gemin = min(gec)
if gemin < inf:
iemin = where(gec == gemin)[0][0]
else: gemin = inf
#case 2: Some g_i in O reaches zero
if lo > 0 and ls > 0:
# only consider positive margin sensitivity for points in E
goc = ggamma[indo]
# only consider positive increment weights
goc[goc <= e] = inf
# find minimum and index of it
gomin = min(goc)
if gomin < inf:
iomin = where(goc == gomin)[0][0]
else: gomin = inf
# case 3: gc becomes zero => algorithm converges
if gammac > e: gcmin = - gc/gammac
else: gcmin = inf
# case 4: ac becomes an error vector => algorithm converges
if ls > 0: gacmin = C - ac
else: gacmin = inf
# determine minimum largest increment
all_deltas = [gsmin, gemin, gomin, gcmin, gacmin]
gmin = min(all_deltas)
imin = where(all_deltas == gmin)[0][0]
# update a, g
if ls > 0:
mu += beta[0]*gmin
ac += gmin
a[inds] += betas*gmin
else:
mu += gmin
if lr > 0:
g += gamma * gmin
gc += gammac * gmin
if imin == 0: # min = gsmin => move k from s to r
ak = a[inds][ismin]
ind_del = inds[ismin]
#bookkeeping
inds.remove(ind_del)
indr.append(ind_del)
if ak < e:
indo.append(ind_del)
lo += 1
else:
inde.append(ind_del)
le += 1
lr += 1
c_inds = [start_new] + inds
ismin += 1
#decrement R, delete row ismin and column ismin
if ls > 2:
### R update
R_new = zeros((ls,ls))
R_new[0:ismin, 0:ismin] = R[0:ismin, 0:ismin]
R_new[ismin:, 0:ismin] = R[ismin+1:,0:ismin]
R_new[0:ismin, ismin:] = R[0:ismin, ismin+1:]
R_new[ismin:, ismin:] = R[ismin+1:, ismin+1:]
betak = R[ismin,
[i for i in range(ls+1) if i != ismin]]
R_new -= outer(betak, betak)/R[ismin,ismin]
R = R_new
# update Qs for gamma
Qs_new = ones((l+1, ls))
Qs_new[:, :ismin] = Qs[:,:ismin]
Qs_new[:, ismin:] = Qs[:,ismin+1:]
Qs = Qs_new
elif ls == 2:
### R update
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
# update Qs for gamma
Qs_new = ones((l+1, ls))
Qs_new[:, :ismin] = Qs[:,:ismin]
Qs_new[:, ismin:] = Qs[:,ismin+1:]
Qs = Qs_new
else:
Qs = ones(l+1)
R = inf
ls -= 1
elif imin == 1:
ind_del = inde[iemin]
if ls > 0:
nk = K_X[[ind_del], [ind_del] + inds]
betak = - R.dot(nk)
k = 1 - nk.dot(R).dot(nk)
if k == 0:
k = 0.001
betak1 = ones(ls + 2)
betak1[:-1] = betak
R_old = R
R = 1/k * outer(betak1, betak1)
R[:-1,:-1] += R_old
else:
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
inds.append(ind_del)
c_inds = [start_new] + inds
indr.remove(ind_del)
inde.remove(ind_del)
ls += 1
lr -= 1
le -= 1
if ls > 1:
Qs_new = ones((l+1, ls+1))
Qs_new[:, :-1] = Qs
Qs_new[:, ls] = K_X_start_new[:, ind_del]
Qs = Qs_new
else:
Qs_new = ones((l+1, 2))
Qs_new[:, 1] = K_X_start_new[:, ind_del]
Qs = Qs_new
elif imin == 2: # min = gemin | gomin => move k from r to s
# delete the elements from X,a and g => add it to the end of X,a,g
#ind_del = np.asarray(indo)[Io_minus][iomin]
ind_del = indo[iomin]
if ls > 0:
nk = K_X[[ind_del], [ind_del] + inds]
betak = - R.dot(nk)
k = 1 - nk.dot(R).dot(nk)
#work around!
if k == 0:
k = 0.001
betak1 = ones(ls+2)
betak1[:-1] = betak
R_old = R
R = 1/k * outer(betak1, betak1)
R[:-1,:-1] += R_old
else:
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
indo.remove(ind_del)
indr.remove(ind_del)
inds.append(ind_del)
c_inds = [start_new] + inds
lo -= 1
lr -= 1
ls += 1
if ls > 1:
Qs_new = ones((l+1, ls+1))
Qs_new[:, :-1] = Qs
Qs_new[:, ls] = K_X_start_new[:, ind_del]
Qs = Qs_new
else:
Qs_new = ones((l+1, 2))
Qs_new[:, 1] = K_X_start_new[:, ind_del]
Qs = Qs_new
elif imin == 3:
break
else:
break
#loop_count += 1
a[start_new] = ac
g[start_new] = gc
if ac < e:
indr.append(start_new)
indo.append(start_new)
lr += 1
lo += 1
elif ac > C - e:
indr.append(start_new)
inde.append(start_new)
lr += 1
le += 1
else:
inds.append(start_new)
g[start_new] = 0
if len(inds) == 1:
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
else:
if R.shape[0] != len(inds) + 1:
nk = ones(ls+1)
nk[1:] = K_X_start_new[[0], inds[:-1]]
betak = - R.dot(nk)
k = 1 - nk.dot(R).dot(nk)
betak1 = ones(ls + 2)
betak1[:-1] = betak
R_old = R
R = 1/k * outer(betak1, betak1)
R[:-1,:-1] += R_old
if ls < 1:
Qs = concatenate((Qs.reshape((l+1,1)), K_X_start_new[:, start_new].reshape((l+1,1))), axis=1)
else:
Qs = concatenate((Qs,K_X_start_new[:, start_new].reshape((l+1,1))), axis=1)
Qs[:, 1] = 1
ls += 1
# update X, a
self._data.set_X(X)
self._data.set_alpha(a)
self._data.set_C(C)
self._data.set_K_X(K_X)
self._rho = -1 * mu
return True
def increment_perturb(self, Xc, C_new, init_ac=0, break_count=-1):
# epsilon
e = self._data._e
mu = 0
imin = None
# initialize existing X, coefficients a, C
X_origin = self._data.X()
K_X_origin = self._data.K_X()
n_data = X_origin.shape[0]
n_feature = X_origin.shape[1]
C = self._data.C()
a_origin = self._data.alpha()
# number of new incremental points
n_new = Xc.shape[0]
# number of all (new and existing) points
n_all = n_data + n_new
# concatenate all new points with all existing points
X = empty((n_new + n_data, n_feature))
X[0:n_new, :] = Xc
X[n_new:, :] = X_origin
# create gram matrix for all new and existing points
# create of all data points
if K_X_origin == None:
K_X = self.gram(X)
# create gram matrix for new points and add to existing ones
else:
K_X = empty((n_all, n_all))
K_X[n_new:, n_new:] = K_X_origin
K_X_new = self.gram(Xc, X_origin)
K_X[0:n_new, :] = K_X_new
K_X[:, 0:n_new] = K_X_new.T
# creating coefficient vector alpha for all data points
a = empty(n_all)
a[n_new:] = a_origin
a[:n_new] = init_ac
# creating gradient vector
g = zeros(n_all)
# create sensitivity vector
gamma = empty(n_all)
check_gradient = False
# loop through all new points to increment
for x_count in range(n_new):
#print "--------- START %s ---------" % x_count
# initialize X, a, C, g, indices, kernel values
start_origin = n_new - x_count
start_new = start_origin - 1
K_X_start_new = K_X[start_new:]
K_X_start_origin = K_X[:, start_origin:]
a_origin = a[start_origin:]
if x_count == 0:
r = range(n_new, n_all)
inds = [i for i in r if e < a[i] < C - e]
indr = [i for i in r if i not in inds]
inde = [i for i in indr if a[i] > e]
indo = [i for i in indr if a[i] <= e]
ls = len(inds) # support vectors length
lr = len(indr) # error and non-support vectors length
le = len(inde) # error vectors lenght
lo = len(indo)
l = ls + lr
# calculate mu according to KKT-conditions
mu = - K_X_start_origin[inds[0]].dot(a_origin)
# calculate gradient of error and non-support vectors
if lr > 0:
g[indr] = K_X_start_origin[indr].dot(a_origin) + mu
Qs = ones((l+1, ls+1))
Qs[:,1:] = K_X_start_new[:, inds]
else:
l += 1
Qs = concatenate(([K_X_start_new[0, [start_new] + inds]], Qs), axis=0)
c_inds = [start_new] + inds
# only calculate gradient if there are support vectors
if ls > 0:
gc = K_X_start_origin[start_new].dot(a_origin) + mu
else:
print "Error: No support vectors to train!"
sys.exit()
ac = a[start_new]
if x_count == 0:
Q = ones((ls+1, ls+1))
Q[0, 0] = 0
inds_row = [[i] for i in inds]
Q[1:, 1:] = K_X[inds_row, inds]
try:
R = inv(Q)
except np.linalg.linalg.LinAlgError:
x = 1e-11
found = False
print "singular matrix"
while not found:
try:
R = inv(Q + diag(ones(ls+1) * x))
found = True
except np.linalg.linalg.LinAlgError:
x = x*10
loop_count = 0
while gc < e and ac < C - e:
loop_count += 1
#print "-------------------- incremental %s-%s ---------" % (x_count, loop_count)
#calculate beta
if ls > 0:
beta = - R.dot(K_X_start_new[0,c_inds])
betas = beta[1:]
# calculate gamma
if lr > 0 and ls > 0:
# non-empty R and S set
gamma[start_new:] = Qs.dot(beta) + K_X_start_new[:, start_new]
gammac = gamma[start_new]
ggamma = divide(-g, gamma)
elif ls > 0:
# empty R set
gammac = K_X_start_new[0, c_inds].dot(beta) + 1
else:
# empty S set
gammac = 1
gamma[indr] = 1
ggamma = -g
# accounting
#case 1: Some alpha_i in S reaches a bound
if ls > 0:
gsmax = - a[inds]
gsmax[betas > e] += C
gsmax = divide(gsmax, betas)
# only consider positive increment weights
gsmax[absolute(betas) <= e] = inf
gsmin = min(absolute(gsmax))
if gsmin != inf:
ismin = where(absolute(gsmax) == gsmin)[0][0]
else: gsmin = inf
#case 2: Some g_i in E reaches zero
if le > 0:
# only consider positive margin sensitivity for points in E
gec = ggamma[inde]
# only consider positive increment weights
gec[gec <= e] = inf
gemin = min(gec)
if gemin < inf:
iemin = where(gec == gemin)[0][0]
else: gemin = inf
#case 2: Some g_i in O reaches zero
if lo > 0 and ls > 0:
# only consider positive margin sensitivity for points in E
goc = ggamma[indo]
# only consider positive increment weights
goc[goc <= e] = inf
# find minimum and index of it
gomin = min(goc)
if gomin < inf:
iomin = where(goc == gomin)[0][0]
else: gomin = inf
# case 3: gc becomes zero => algorithm converges
if gammac > e: gcmin = - gc/gammac
else: gcmin = inf
# case 4: ac becomes an error vector => algorithm converges
if ls > 0: gacmin = C - ac
else: gacmin = inf
# determine minimum largest increment
all_deltas = [gsmin, gemin, gomin, gcmin, gacmin]
gmin = min(all_deltas)
imin = where(all_deltas == gmin)[0][0]
# update a, g
if ls > 0:
mu += beta[0]*gmin
ac += gmin
a[inds] += betas*gmin
else:
mu += gmin
if lr > 0:
g += gamma * gmin
gc += gammac * gmin
if imin == 0: # min = gsmin => move k from s to r
ak = a[inds][ismin]
ind_del = inds[ismin]
#bookkeeping
inds.remove(ind_del)
indr.append(ind_del)
if ak < e:
indo.append(ind_del)
lo += 1
else:
inde.append(ind_del)
le += 1
lr += 1
c_inds = [start_new] + inds
ismin += 1
#decrement R, delete row ismin and column ismin
if ls > 2:
### R update
R_new = zeros((ls,ls))
R_new[0:ismin, 0:ismin] = R[0:ismin, 0:ismin]
R_new[ismin:, 0:ismin] = R[ismin+1:,0:ismin]
R_new[0:ismin, ismin:] = R[0:ismin, ismin+1:]
R_new[ismin:, ismin:] = R[ismin+1:, ismin+1:]
betak = R[ismin,
[i for i in range(ls+1) if i != ismin]]
R_new -= outer(betak, betak)/R[ismin,ismin]
R = R_new
# update Qs for gamma
Qs_new = ones((l+1, ls))
Qs_new[:, :ismin] = Qs[:,:ismin]
Qs_new[:, ismin:] = Qs[:,ismin+1:]
Qs = Qs_new
elif ls == 2:
### R update
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
# update Qs for gamma
Qs_new = ones((l+1, ls))
Qs_new[:, :ismin] = Qs[:,:ismin]
Qs_new[:, ismin:] = Qs[:,ismin+1:]
Qs = Qs_new
else:
Qs = ones(l+1)
R = inf
ls -= 1
elif imin == 1:
ind_del = inde[iemin]
if ls > 0:
nk = K_X[[ind_del], [ind_del] + inds]
betak = - R.dot(nk)
k = 1 - nk.dot(R).dot(nk)
betak1 = ones(ls + 2)
betak1[:-1] = betak
R_old = R
R = 1/k * outer(betak1, betak1)
R[:-1,:-1] += R_old
else:
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
inds.append(ind_del)
c_inds = [start_new] + inds
indr.remove(ind_del)
inde.remove(ind_del)
ls += 1
lr -= 1
le -= 1
if ls > 1:
Qs_new = ones((l+1, ls+1))
Qs_new[:, :-1] = Qs
Qs_new[:, ls] = K_X_start_new[:, ind_del]
Qs = Qs_new
else:
Qs_new = ones((l+1, 2))
Qs_new[:, 1] = K_X_start_new[:, ind_del]
Qs = Qs_new
elif imin == 2: # min = gemin | gomin => move k from r to s
# delete the elements from X,a and g => add it to the end of X,a,g
#ind_del = np.asarray(indo)[Io_minus][iomin]
ind_del = indo[iomin]
if ls > 0:
nk = K_X[[ind_del], [ind_del] + inds]
betak = - R.dot(nk)
k = 1 - nk.dot(R).dot(nk)
betak1 = ones(ls+2)
betak1[:-1] = betak
R_old = R
R = 1/k * outer(betak1, betak1)
R[:-1,:-1] += R_old
else:
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
indo.remove(ind_del)
indr.remove(ind_del)
inds.append(ind_del)
c_inds = [start_new] + inds
lo -= 1
lr -= 1
ls += 1
if ls > 1:
Qs_new = ones((l+1, ls+1))
Qs_new[:, :-1] = Qs
Qs_new[:, ls] = K_X_start_new[:, ind_del]
Qs = Qs_new
else:
Qs_new = ones((l+1, 2))
Qs_new[:, 1] = K_X_start_new[:, ind_del]
Qs = Qs_new
elif imin == 3:
break
else:
break
#loop_count += 1
a[start_new] = ac
g[start_new] = gc
if ac < e:
indr.append(start_new)
indo.append(start_new)
lr += 1
lo += 1
elif ac > C - e:
indr.append(start_new)
inde.append(start_new)
lr += 1
le += 1
else:
inds.append(start_new)
g[start_new] = 0
if len(inds) == 1:
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
else:
if R.shape[0] != len(inds) + 1:
nk = ones(ls+1)
nk[1:] = K_X_start_new[[0], inds[:-1]]
betak = - R.dot(nk)
k = 1 - nk.dot(R).dot(nk)
betak1 = ones(ls + 2)
betak1[:-1] = betak
R_old = R
R = 1/k * outer(betak1, betak1)
R[:-1,:-1] += R_old
Qs = concatenate((Qs,K_X_start_new[:, start_new].reshape((l+1,1))), axis=1)
Qs[:, 1] = 1
'''
Qs_new = ones((l+1, ls+2))
if ls > 0: Qs_new[:, :-1] = Qs
Qs_new[:, ls+1] = K_X_start_new[:, start_new]
Qs = Qs_new
'''
ls += 1
#Qs = concatenate((Qs,K_X_start_new[:, start_new].reshape((l+1,1))), axis=1)
# update X, a
self._data.set_X(X)
self._data.set_alpha(a)
self._data.set_C(C)
self._data.set_K_X(K_X)
#print self.rho()
self._rho = -1 * mu# epsilon
e = self._data._e
mu = 0
imin = None
# initialize existing X, coefficients a, C
X_origin = self._data.X()
K_X_origin = self._data.K_X()
n_data = X_origin.shape[0]
n_feature = X_origin.shape[1]
C = self._data.C()
a_origin = self._data.alpha()
# number of new incremental points
n_new = Xc.shape[0]
# number of all (new and existing) points
n_all = n_data + n_new
# concatenate all new points with all existing points
X = empty((n_new + n_data, n_feature))
X[0:n_new, :] = Xc
X[n_new:, :] = X_origin
# create gram matrix for all new and existing points
# create of all data points
if K_X_origin == None:
K_X = self.gram(X)
# create gram matrix for new points and add to existing ones
else:
K_X = empty((n_all, n_all))
K_X[n_new:, n_new:] = K_X_origin
K_X_new = self.gram(Xc, X_origin)
K_X[0:n_new, :] = K_X_new
K_X[:, 0:n_new] = K_X_new.T
# creating coefficient vector alpha for all data points
a = empty(n_all)
a[n_new:] = a_origin
a[:n_new] = init_ac
# creating gradient vector
g = zeros(n_all)
# create sensitivity vector
gamma = empty(n_all)
check_gradient = False
# loop through all new points to increment
for x_count in range(n_new):
#print "--------- START %s ---------" % x_count
# initialize X, a, C, g, indices, kernel values
start_origin = n_new - x_count
start_new = start_origin - 1
K_X_start_new = K_X[start_new:]
K_X_start_origin = K_X[:, start_origin:]
a_origin = a[start_origin:]
if x_count == 0:
r = range(n_new, n_all)
inds = [i for i in r if e < a[i] < C - e]
indr = [i for i in r if i not in inds]
inde = [i for i in indr if a[i] > e]
indo = [i for i in indr if a[i] <= e]
ls = len(inds) # support vectors length
lr = len(indr) # error and non-support vectors length
le = len(inde) # error vectors lenght
lo = len(indo)
l = ls + lr
# calculate mu according to KKT-conditions
mu = - K_X_start_origin[inds[0]].dot(a_origin)
# calculate gradient of error and non-support vectors
if lr > 0:
g[indr] = K_X_start_origin[indr].dot(a_origin) + mu
Qs = ones((l+1, ls+1))
Qs[:,1:] = K_X_start_new[:, inds]
else:
l += 1
Qs = concatenate(([K_X_start_new[0, [start_new] + inds]], Qs), axis=0)
c_inds = [start_new] + inds
# only calculate gradient if there are support vectors
if ls > 0:
gc = K_X_start_origin[start_new].dot(a_origin) + mu
else:
print "Error: No support vectors to train!"
sys.exit()
ac = a[start_new]
if x_count == 0:
Q = ones((ls+1, ls+1))
Q[0, 0] = 0
inds_row = [[i] for i in inds]
Q[1:, 1:] = K_X[inds_row, inds]
try:
R = inv(Q)
except np.linalg.linalg.LinAlgError:
x = 1e-11
found = False
print "singular matrix"
while not found:
try:
R = inv(Q + diag(ones(ls+1) * x))
found = True
except np.linalg.linalg.LinAlgError:
x = x*10
loop_count = 0
while gc < e and ac < C - e:
loop_count += 1
#print "-------------------- incremental %s-%s ---------" % (x_count, loop_count)
#calculate beta
if ls > 0:
beta = - R.dot(K_X_start_new[0,c_inds])
betas = beta[1:]
# calculate gamma
if lr > 0 and ls > 0:
# non-empty R and S set
gamma[start_new:] = Qs.dot(beta) + K_X_start_new[:, start_new]
gammac = gamma[start_new]
ggamma = divide(-g, gamma)
elif ls > 0:
# empty R set
gammac = K_X_start_new[0, c_inds].dot(beta) + 1
else:
# empty S set
gammac = 1
gamma[indr] = 1
ggamma = -g
# accounting
#case 1: Some alpha_i in S reaches a bound
if ls > 0:
gsmax = - a[inds]
gsmax[betas > e] += C
gsmax = divide(gsmax, betas)
# only consider positive increment weights
gsmax[absolute(betas) <= e] = inf
gsmin = min(absolute(gsmax))
if gsmin != inf:
ismin = where(absolute(gsmax) == gsmin)[0][0]
else: gsmin = inf
#case 2: Some g_i in E reaches zero
if le > 0:
# only consider positive margin sensitivity for points in E
gec = ggamma[inde]
# only consider positive increment weights
gec[gec <= e] = inf
gemin = min(gec)
if gemin < inf:
iemin = where(gec == gemin)[0][0]
else: gemin = inf
#case 2: Some g_i in O reaches zero
if lo > 0 and ls > 0:
# only consider positive margin sensitivity for points in E
goc = ggamma[indo]
# only consider positive increment weights
goc[goc <= e] = inf
# find minimum and index of it
gomin = min(goc)
if gomin < inf:
iomin = where(goc == gomin)[0][0]
else: gomin = inf
# case 3: gc becomes zero => algorithm converges
if gammac > e: gcmin = - gc/gammac
else: gcmin = inf
# case 4: ac becomes an error vector => algorithm converges
if ls > 0: gacmin = C - ac
else: gacmin = inf
# determine minimum largest increment
all_deltas = [gsmin, gemin, gomin, gcmin, gacmin]
gmin = min(all_deltas)
imin = where(all_deltas == gmin)[0][0]
# update a, g
if ls > 0:
mu += beta[0]*gmin
ac += gmin
a[inds] += betas*gmin
else:
mu += gmin
if lr > 0:
g += gamma * gmin
gc += gammac * gmin
if imin == 0: # min = gsmin => move k from s to r
ak = a[inds][ismin]
ind_del = inds[ismin]
#bookkeeping
inds.remove(ind_del)
indr.append(ind_del)
if ak < e:
indo.append(ind_del)
lo += 1
else:
inde.append(ind_del)
le += 1
lr += 1
c_inds = [start_new] + inds
ismin += 1
#decrement R, delete row ismin and column ismin
if ls > 2:
### R update
R_new = zeros((ls,ls))
R_new[0:ismin, 0:ismin] = R[0:ismin, 0:ismin]
R_new[ismin:, 0:ismin] = R[ismin+1:,0:ismin]
R_new[0:ismin, ismin:] = R[0:ismin, ismin+1:]
R_new[ismin:, ismin:] = R[ismin+1:, ismin+1:]
betak = R[ismin,
[i for i in range(ls+1) if i != ismin]]
R_new -= outer(betak, betak)/R[ismin,ismin]
R = R_new
# update Qs for gamma
Qs_new = ones((l+1, ls))
Qs_new[:, :ismin] = Qs[:,:ismin]
Qs_new[:, ismin:] = Qs[:,ismin+1:]
Qs = Qs_new
elif ls == 2:
### R update
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
# update Qs for gamma
Qs_new = ones((l+1, ls))
Qs_new[:, :ismin] = Qs[:,:ismin]
Qs_new[:, ismin:] = Qs[:,ismin+1:]
Qs = Qs_new
else:
Qs = ones(l+1)
R = inf
ls -= 1
elif imin == 1:
ind_del = inde[iemin]
if ls > 0:
nk = K_X[[ind_del], [ind_del] + inds]
betak = - R.dot(nk)
k = 1 - nk.dot(R).dot(nk)
betak1 = ones(ls + 2)
betak1[:-1] = betak
R_old = R
R = 1/k * outer(betak1, betak1)
R[:-1,:-1] += R_old
else:
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
inds.append(ind_del)
c_inds = [start_new] + inds
indr.remove(ind_del)
inde.remove(ind_del)
ls += 1
lr -= 1
le -= 1
if ls > 1:
Qs_new = ones((l+1, ls+1))
Qs_new[:, :-1] = Qs
Qs_new[:, ls] = K_X_start_new[:, ind_del]
Qs = Qs_new
else:
Qs_new = ones((l+1, 2))
Qs_new[:, 1] = K_X_start_new[:, ind_del]
Qs = Qs_new
elif imin == 2: # min = gemin | gomin => move k from r to s
# delete the elements from X,a and g => add it to the end of X,a,g
#ind_del = np.asarray(indo)[Io_minus][iomin]
ind_del = indo[iomin]
if ls > 0:
nk = K_X[[ind_del], [ind_del] + inds]
betak = - R.dot(nk)
k = 1 - nk.dot(R).dot(nk)
betak1 = ones(ls+2)
betak1[:-1] = betak
R_old = R
R = 1/k * outer(betak1, betak1)
R[:-1,:-1] += R_old
else:
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
indo.remove(ind_del)
indr.remove(ind_del)
inds.append(ind_del)
c_inds = [start_new] + inds
lo -= 1
lr -= 1
ls += 1
if ls > 1:
Qs_new = ones((l+1, ls+1))
Qs_new[:, :-1] = Qs
Qs_new[:, ls] = K_X_start_new[:, ind_del]
Qs = Qs_new
else:
Qs_new = ones((l+1, 2))
Qs_new[:, 1] = K_X_start_new[:, ind_del]
Qs = Qs_new
elif imin == 3:
break
else:
break
#loop_count += 1
a[start_new] = ac
g[start_new] = gc
if ac < e:
indr.append(start_new)
indo.append(start_new)
lr += 1
lo += 1
elif ac > C - e:
indr.append(start_new)
inde.append(start_new)
lr += 1
le += 1
else:
inds.append(start_new)
g[start_new] = 0
if len(inds) == 1:
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
else:
if R.shape[0] != len(inds) + 1:
nk = ones(ls+1)
nk[1:] = K_X_start_new[[0], inds[:-1]]
betak = - R.dot(nk)
k = 1 - nk.dot(R).dot(nk)
betak1 = ones(ls + 2)
betak1[:-1] = betak
R_old = R
R = 1/k * outer(betak1, betak1)
R[:-1,:-1] += R_old
Qs = concatenate((Qs,K_X_start_new[:, start_new].reshape((l+1,1))), axis=1)
Qs[:, 1] = 1
'''
Qs_new = ones((l+1, ls+2))
if ls > 0: Qs_new[:, :-1] = Qs
Qs_new[:, ls+1] = K_X_start_new[:, start_new]
Qs = Qs_new
'''
ls += 1
#Qs = concatenate((Qs,K_X_start_new[:, start_new].reshape((l+1,1))), axis=1)
# update X, a
self._data.set_X(X)
self._data.set_alpha(a)
self._data.set_C(C)
self._data.set_K_X(K_X)
#print self.rho()
self._rho = -1 * mu
##### PERTUBATION START #####
C = 1
lmbda = C_new - C
# if there are no error vectors initially...
if le == 0:
pass
delta_p = (a - C) / lmbda
i = delta_p > e
p = min(delta_p[i], 1)
C += lmbda *p
if p < 1:
# find index of minimum
i = where(delta_p == p)[0][0]
# update R
ismin = inds.index(i)
if ls > 2:
ismin += 1
R_new = zeros((ls,ls))
R_new[0:ismin, 0:ismin] = R[0:ismin, 0:ismin]
R_new[ismin:, 0:ismin] = R[ismin+1:,0:ismin]
R_new[0:ismin, ismin:] = R[0:ismin, ismin+1:]
R_new[ismin:, ismin:] = R[ismin+1:, ismin+1:]
betak = zeros(ls)
betak[:ismin] = R[ismin, :ismin]
betak[ismin:] = R[ismin, ismin+1:]
R_new -= outer(betak, betak)/R[ismin,ismin]
R = R_new
elif ls == 2:
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
# bookkeeping from margin to error
a[i] = 0
inds.remove(i)
indr.append(i)
inde.append(i)
ls -= 1
lr += 1
le += 1
# if there are error vectors to adjust...
disp_p_delta = 0.2
disp_p_count = 1
perturbations = 0
if p < 1:
SQl = np.sum(K_X[:, inde], axis=1) * lmbda
Syl = n_all*lmbda
while p < 1:
perturbations += 1
if ls > 0:
v = zeros(ls + 1)
if p < 1 - e:
v[0] = - Syl - sum(a)/(1-p)
else:
v[0] = -Syl
v[1:] = - -SQl[inds]
beta = R * v
betas = beta[1:]
gamma = zeros(K_X.shape[0])
if lr > 0:
Q = ones((indr, inds))
indr_row = [[i] for i in indr]
Q[1:,:] = K_X[indr_row,inds]
gamma[indr] = Q.dot(betas) + SQl[indr]
else:
beta = 0
gamma = SQl
### minimum increment or decrement
#accounting
#upper limit on change in p_c assuming no other examples change status
delta_p_c = 1 - p;
#case 1: Some alpha_i in S reaches a bound
if ls > 0:
# only consider non-zero coefficient sensitivity betas
# change in p_c that causes a margin vector to change to a reserve vector
IS_minus = betas < - e
gsmax = ones(ls)*inf
# look for greatest increment according to sensitivity
if gsmax[IS_minus].shape[0] > 0:
gsmax[IS_minus] = - a[inds][IS_minus]
gsmax = divide(gsmax, betas)
# find minimum and index of it
gsmin1 = min(absolute(gsmax))
ismin1 = where(gsmax == gsmin1)[0][0]
else:
gsmin = inf
new_beta = betas-lmbda;
IS_plus = new_beta > e
gsmax = ones(ls)*inf
# look for greatest increment according to sensitivity
if gsmax[IS_plus].shape[0] > 0:
gsmax[IS_plus] = (C - a[inds][IS_plus]) / new_beta
gsmin2 = min(absolute(gsmax))
ismin2 = where(gsmax == gsmin2)[0][0]
else:
gsmin2 = inf
#case 2: Some g_i in E reaches zero
if le > 0:
gamma_inde = gamma[inde]
g_inde = g[inde]
# only consider positive margin sensitivity for points in E
Ie_plus = gamma_inde > e
if len(g_inde[Ie_plus]) > 0:
gec = divide(-g_inde[Ie_plus], gamma_inde[Ie_plus])
# only consider positive increment weights
gec[gec <= 0] = inf
# find minimum and index of it
gemin = min(gec)
if gemin < inf:
iemin = where(gec == gemin)[0][0]
else: gemin = inf
else: gemin = inf
#case 2: Some g_i in O reaches zero
if lo > 0 and ls > 0:
gamma_indo = gamma[indo]
g_indo = g[indo]
Io_minus = gamma_indo < - e
if len(g_indo[Io_minus]) > 0:
goc = divide(-g_indo[Io_minus], gamma_indo[Io_minus])
goc[goc <= 0] = inf
goc[g_indo[Io_minus] < 0] = inf
gomin = min(goc)
if gomin < inf:
iomin = where(goc == gomin)[0][0]
else: gomin = inf
else: gomin = inf
# determine minimum largest increment
all_deltas = [gsmin1, gsmin2, gemin, gomin]
gmin = min(all_deltas)
imin = where(all_deltas == gmin)[0][0]
# update a, b, g and p
if lr > 0:
a[indr] += lmbda * gmin
g[indr] += gamma[indr] * gmin
if ls > 0:
mu += beta[0]*gmin
a[inds] += betas*gmin
else:
mu += gmin
p += gmin
C += lmbda * gmin
if imin == 0: # min = gsmin1 => move k from s to o
# if there are more than 1 minimum, just take 1
ak = a[inds][ismin1]
# delete the elements from X,a and g
# => add it to the end of X,a,g
ind_del = inds[ismin1]
inds.remove(ind_del)
indr.append(ind_del)
indo.append(ind_del)
lr += 1
lo += 1
#decrement R, delete row ismin and column ismin
if ls > 2:
ismin += 1
R_new = zeros((ls,ls))
R_new[0:ismin, 0:ismin] = R[0:ismin, 0:ismin]
R_new[ismin:, 0:ismin] = R[ismin+1:,0:ismin]
R_new[0:ismin, ismin:] = R[0:ismin, ismin+1:]
R_new[ismin:, ismin:] = R[ismin+1:, ismin+1:]
betak = zeros(ls)
betak[:ismin] = R[ismin, :ismin]
betak[ismin:] = R[ismin, ismin+1:]
R_new -= outer(betak, betak)/R[ismin,ismin]
R = R_new
elif ls == 2:
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
else:
R = inf
ls -= 1
elif imin == 1:
# if there are more than 1 minimum, just take 1
ak = a[inds][ismin2]
# delete the elements from X,a and g
# => add it to the end of X,a,g
ind_del = inds[ismin2]
inds.remove(ind_del)
indr.append(ind_del)
inde.append(ind_del)
lr += 1
le += 1
#update SQl and Syl when the status of
# indss changes from MARGIN to ERROR
SQl += K_X[:, ind_del] * lmbda
Syl += lmbda
#decrement R, delete row ismin and column ismin
if ls > 2:
ismin += 1
R_new = zeros((ls,ls))
R_new[0:ismin, 0:ismin] = R[0:ismin, 0:ismin]
R_new[ismin:, 0:ismin] = R[ismin+1:,0:ismin]
R_new[0:ismin, ismin:] = R[0:ismin, ismin+1:]
R_new[ismin:, ismin:] = R[ismin+1:, ismin+1:]
betak = zeros(ls)
betak[:ismin] = R[ismin, :ismin]
betak[ismin:] = R[ismin, ismin+1:]
R_new -= outer(betak, betak)/R[ismin,ismin]
R = R_new
elif ls == 2:
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
else:
R = inf
ls -= 1
elif imin == 2:
# delete the elements from X,a and g => add it to the end of X,a,g
ind_del = np.asarray(inde)[Ie_plus][iemin]
#update SQl and Syl when the status of
# indss changes from ERROR to MARGIN
SQl -= K_X[:, ind_del] * lmbda
Syl -= lmbda
#
if ls > 0:
nk = K_X[ind_del, :][[ind_del] + inds]
betak = - R.dot(nk)
k = 1 - nk.dot(R).dot(nk)
betak1 = ones(ls + 2)
betak1[:-1] = betak
R_old = R
R = 1/k * outer(betak1, betak1)
R[:-1,:-1] += R_old
else:
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
# bookkeeping
inds.append(ind_del)
indr.remove(ind_del)
inde.remove(ind_del)
ls += 1
lr -= 1
le -= 1
elif imin == 3: # min = gemin | gomin => move k from r to s
# delete the elements from X,a and g => add it to the end of X,a,g
ind_del = np.asarray(indo)[Io_minus][iomin]
if ls > 0:
nk = ones(ls+1)
nk[1:] = K_X[ind_del,:][inds]
betak = - R.dot(nk)
k = 1 - nk.dot(R).dot(nk)
betak1 = ones(ls+2)
betak1[:-1] = betak
R_old = R
R = 1/k * outer(betak1, betak1)
R[:-1,:-1] += R_old
else:
R = ones((2, 2))
R[1,1] = 0
R[0,0] = -1
indo.remove(ind_del)
indr.remove(ind_del)
inds.append(ind_del)
lo -= 1
lr -= 1
ls += 1
if p >= disp_p_delta*disp_p_count:
disp_p_count += 1;
print 'p = %.2f' % p
##### PERTUBATION END #####
# update X, a
self._data.set_X(X)
self._data.set_alpha(a)
self._data.set_C(C)
self._data.set_K_X(K_X)
#TODO: CHECK IF mu == rho,
# then just set it like that
self.rho()
def test_kkt(self, K_X, a, e, C):
l = a.shape[0]
r = range(l)
inds = [i for i in r if e < a[i] < C - e]
indr = [i for i in r if i not in inds]
inde = [i for i in indr if a[i] > e]
indo = [i for i in indr if a[i] <= e]
ls = len(inds) # support vectors length
lr = len(indr) # error and non-support vectors length
mu = - K_X[inds[0],:].dot(a)
g = ones(l) * -1
g[inds] = zeros(ls)
if lr > 0:
Kr = K_X[indr, :]
g[indr] = Kr.dot(a) + ones((lr,1)) * mu
if len(g[inde][g[inde] > 0]) > 0:
return False
if len(g[indo][g[indo] < 0]) > 0:
return False
if ls > 0:
Ks = K_X[inds, :]
g[inds] = Ks.dot(a) + ones((ls,1)) * mu
for i in range(len(inds)):
if g[inds[0]] <= -e or g[inds[0]] >= C-e:
return False
return True
def KKT(self, X=None, a=None):
print "KKT Test---start"
e = self._data._e
C = self._data.C()
if X == None and a == None:
# initialize X, a, C, g, indeces, kernel values
a = self._data.alpha()
#print "a saved: %s" % a
#print "KKT test a_c: %s" % a[0]
#print "KKT test x_c: %s" % self._data.X()[0]
if self._data.K_X() != None:
K_X = self._data.K_X()
else:
X = self._data.X() # data points
K_X = self.gram(X)
else:
a = a
X = X
K_X = self.gram(X)
print "sum a: %s" % sum(a)
inds = [i for i, bool in enumerate(np.all([a > e, a < C - e], axis=0)) if bool]
print "inds: %s (%s), a[inds]: %s" % (inds, len(inds), a[inds])
#print "inds: %s" % inds
indr = [i for i, bool in enumerate(np.any([a <= e, a >= C - e], axis=0)) if bool]
#print "indr: %s" % indr
inde = [i for i, bool in enumerate(a >= C - e) if bool]
print "inde: %s (%s)" % (inde, len(inde))
indo = [i for i, bool in enumerate(a <= e) if bool]
print "indo: %s (%s)" % (indo, len(indo))
l = len(a)
ls = len(a[inds]) # support vectors length
lr = len(a[indr]) # error and non-support vectors length
mu_all = - K_X[inds,:].dot(a)
print "mu_all: %s" % mu_all
#print "K_X[inds,:]: %s" % K_X[inds,:]
#print "a[inds]: %s" % a[inds]
#print "mu_all: %s" % mu_all
mu = max(mu_all)
print "mu: %s" % mu
g = ones(l) * -1
g[inds] = zeros(ls)
if lr > 0:
Kr = K_X[indr, :]
g[indr] = Kr.dot(a) + ones((lr,1)) * mu
if ls > 0:
Ks = K_X[inds, :]
g[inds] = Ks.dot(a) + ones((ls,1)) * mu
print "g[inds]: %s" % g[inds]
#print "test b: %s" % (K_X[inds,:][:,indr].dot(a[indr]))
#print "kernel kkt: %s " % K_X[inds,:][:,indr]
#print "a[0]: %s" % a[0]
#print "g[0]: %s" % g[0]
#print "g[inds]: %s" % g[inds]
KKT = True
#print "a[54]: %s" % a[54]
#print "g[55]: %s" % g[55]
if len(g[inde][g[inde] > 0]) > 0:
print "g[inde]: %s" % g[inde]
print g[inde][g[inde] > 0]
ind = where(g == g[inde][g[inde] > 0])[0]
print where(g == g[inde][g[inde] > 0])
print "index (g[inde] > 0): %s" % ind
print "inde: g[index]: %s, a[index]: %s" % (g[ind], a[ind])
#print "g[index-1]: %s, a[index-1]: %s" % (g[ind-1], a[ind-1])
#print "error wrong!"
#print "g[inde]: %s" %g[inde]
#print "a[inde]: %s" %a[inde]
KKT = False
#print "indo: %s" % indo
if len(g[indo][g[indo] < 0]) > 0:
#print "non-support wrong!"
#print "g[indo]: %s" %g[indo]
ind = where(g == g[indo][g[indo] < 0])[0]
print "index (g[indo] > 0): %s" % ind
print "indo: g[index]: %s, a[index]: %s" % (g[ind], a[ind])
#print "a[indo]: %s" %a[indo]
KKT = False
if not KKT:
print "KKT not satisfied"
print "KKT Test---end"
return KKT | feuerchop/increOCSVM | ocsvm.py | Python | gpl-2.0 | 80,081 |
#!/usr/bin/env python
# coding: utf-8
from __future__ import with_statement, print_function, absolute_import, division
""" Data analysis Tango device server ... for UPBL09a
"""
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "02/03/2018"
__status__ = "production"
__docformat__ = 'restructuredtext'
import sys
import os
import json
import threading
import logging
import time
import types
import multiprocessing
import six
if six.PY2:
from Queue import Queue
else:
from queue import Queue
logger = logging.getLogger("dahu.server")
# set loglevel at least at INFO
if logger.getEffectiveLevel() > logging.INFO:
logger.setLevel(logging.INFO)
import PyTango
from .job import Job, plugin_factory
try:
from rfoo.utils import rconsole
rconsole.spawn_server()
except ImportError:
logger.debug("No socket opened for debugging -> please install rfoo")
class DahuDS(PyTango.Device_4Impl):
"""
Tango device server launcher for Dahu server.
"""
def __init__(self, cl, name):
PyTango.Device_4Impl.__init__(self, cl, name)
self.init_device()
self.job_queue = Queue() # queue containing jobs to process
self.event_queue = Queue() # queue containing finished jobs
self.processing_lock = threading.Semaphore()
self.stat_lock = threading.Semaphore()
self.last_stats = "No statistics collected yet, please use the 'collectStatistics' method first"
self.last_failure = -1
self.last_success = -1
self.statistics_threads = None
# self._ncpu_sem = threading.Semaphore(multiprocessing.cpu_count())
# start the two threads related to queues: process_job and event_queue
t2 = threading.Thread(target=self.process_job)
t2.start()
t1 = threading.Thread(target=self.process_event)
t1.start()
def get_name(self):
"""Returns the name of the class"""
return self.__class__.__name__
def delete_device(self):
logger.debug("[Device delete_device method] for device %s" % self.get_name())
def init_device(self):
logger.debug("In %s.init_device()" % self.get_name())
self.set_state(PyTango.DevState.ON)
self.get_device_properties(self.get_device_class())
self.set_change_event("jobSuccess", True, False)
self.set_change_event("jobFailure", True, False)
self.set_change_event("statisticsCollected", True, False)
def always_executed_hook(self):
pass
def read_attr_hardware(self, data):
logger.debug("In %s.read_attr_hardware()" % self.get_name())
def read_jobSuccess(self, attr):
attr.set_value(self.last_success)
def read_jobFailure(self, attr):
attr.set_value(self.last_failure)
def read_statisticsCollected(self, attr):
attr.set_value(self.last_stats)
def getJobState(self, jobId):
return Job.getStatusFromID(jobId)
def cleanJob(self, jobId):
return Job.cleanJobFromID(jobId)
def listPlugins(self):
"""
List all plugin currently loaded .... with a brief description
"""
logger.debug("In %s.listPlugins" % (self.get_name()))
res = ["List of all plugin currently loaded (use initPlugin to loaded additional plugins):"]
plugins = list(plugin_factory.registry.keys())
plugins.sort()
return os.linesep.join(res + [" %s : %s" % (i, plugin_factory.registry[i].__doc__.split("\n")[0]) for i in plugins])
def initPlugin(self, name):
"""
Creates a job with the given plugin
"""
logger.debug("In %s.initPlugin(%s)" % (self.get_name(), name))
err = None
try:
plugin = plugin_factory(name)
except Exception as error:
err = "plugin %s failed to be instanciated: %s" % (name, error)
logger.error(err)
if plugin is None or err:
return "Plugin not found: %s, err" % (name, err)
else:
return "Plugin loaded: %s%s%s" % (name, os.linesep, plugin.__doc__)
def abort(self, jobId):
"""
Aborts a job
@param jobId: ID of the job to stop
"""
pass
def quitDahu(self):
logger.debug("In %s.quitDahu()" % self.get_name())
logger.info("Quitting DahuDS")
sys.exit()
def startJob(self, argin):
"""
Starts a job
@param argin: 2-list [<Dahu plugin to execute>, <JSON serialized dict>]
@return: jobID which is an int (-1 for error)
"""
logger.debug("In %s.startJob()" % self.get_name())
name, data_input = argin[:2]
if data_input.strip() == "":
return -1
job = Job(name, data_input)
if job is None:
return -1
self.job_queue.put(job)
return job.id
def process_job(self):
"""
Process all jobs in the queue.
"""
while True:
job = self.job_queue.get()
job.connect_callback(self.finished_processing)
job.start()
def finished_processing(self, job):
"""
callback: when processing is done
@param job: instance of dahu.job.Job
"""
logger.debug("In %s.finished_processing id:%s (%s)" % (self.get_name(), job.id, job.status))
# self._ncpu_sem.release()
job.clean(wait=False)
if job.status == job.STATE_SUCCESS:
self.last_success = job.id
else:
sys.stdout.flush()
sys.stderr.flush()
self.last_failure = job.id
self.job_queue.task_done()
self.event_queue.put(job)
def process_event(self):
"""
process finished jobs on the tango side (issue with tango locks)
"""
while True:
job = self.event_queue.get()
if job.status == job.STATE_SUCCESS:
self.push_change_event("jobSuccess", job.id)
else:
self.push_change_event("jobFailure", job.id)
# TODO one day
# def getRunning(self):
# """
# retrieve the list of plugins currently under execution (with their plugin-Id)
# """
# return EDStatus.getRunning()
#
# def getSuccess(self):
# """
# retrieve the list of plugins finished with success (with their plugin-Id)
# """
# return EDStatus.getSuccess()
#
# def getFailure(self):
# """
# retrieve the list of plugins finished with failure (with their plugin-Id)
# """
# return EDStatus.getFailure()
def collectStatistics(self):
"""
Retrieve some statistics on all Dahu-Jobs
@return: a page of information about Dahu-jobs
"""
self.statistics_threads = threading.Thread(target=self.statistics)
self.statistics_threads.start()
def statistics(self):
"""
retrieve some statistics about past jobs.
"""
with self.stat_lock:
fStartStat = time.time()
self.last_stats = Job.stats()
self.last_stats += os.linesep + "Statistics collected on %s, the collect took: %.3fs" % (time.asctime(), time.time() - fStartStat)
self.push_change_event("statisticsCollected", self.last_stats)
def getStatistics(self):
"""
just return statistics previously calculated
"""
if self.statistics_threads:
self.statistics_threads.join()
return self.last_stats
def getJobOutput(self, jobId):
"""
Retrieve XML output form a job
@param jobId: name of the job
@return: output from a job
"""
return Job.getDataOutputFromId(jobId, as_JSON=True)
def getJobInput(self, jobId):
"""
Retrieve input from a job as JSON string
@param jobId: identifier of the job (int)
@return: JSON serialized input from a job
"""
return Job.getDataInputFromId(jobId, as_JSON=True)
def getJobError(self, jobId):
"""
Retrieve error message from a job as a string
@param jobId: identifier of the job (int)
@return: Error message
"""
return Job.getErrorFromId(jobId)
def waitJob(self, jobId):
"""
Wait for a job to be finished and returns the status.
May cause Tango timeout if too slow to finish ....
May do polling to wait the job actually started
@param jobId: identifier of the job (int)
@return: status of the job
"""
res = Job.synchronize_job(jobId)
i = 0
while res == Job.STATE_UNINITIALIZED:
if i > 10:
break
i += 1
time.sleep(0.1)
res = Job.synchronize_job(jobId)
return res
class DahuDSClass(PyTango.DeviceClass):
# Class Properties
class_property_list = {
}
# Device Properties
device_property_list = {
'plugins_directory':
[PyTango.DevString,
"Dahu plugins directory",
[] ],
}
# Command definitions
cmd_list = {
'startJob': [[PyTango.DevVarStringArray, "[<Dahu plugin to execute>, <JSON serialized dict>]"], [PyTango.DevLong, "job id"]],
'abort': [[PyTango.DevLong, "job id"], [PyTango.DevBoolean, ""]],
'getJobState': [[PyTango.DevLong, "job id"], [PyTango.DevString, "job state"]],
'initPlugin': [[PyTango.DevString, "plugin name"], [PyTango.DevString, "Message"]],
'cleanJob':[[PyTango.DevLong, "job id"], [PyTango.DevString, "Message"]],
'collectStatistics':[[PyTango.DevVoid, "nothing needed"], [PyTango.DevVoid, "Collect some statistics about jobs within Dahu"]],
'getStatistics':[[PyTango.DevVoid, "nothing needed"], [PyTango.DevString, "Retrieve statistics about Dahu-jobs"]],
'getJobOutput': [[PyTango.DevLong, "job id"], [PyTango.DevString, "<JSON serialized dict>"]],
'getJobInput': [[PyTango.DevLong, "job id"], [PyTango.DevString, "<JSON serialized dict>"]],
'getJobError': [[PyTango.DevLong, "job id"], [PyTango.DevString, "Error message"]],
'listPlugins': [[PyTango.DevVoid, "nothing needed"], [PyTango.DevString, "prints the list of all plugin classes currently loaded"]],
'waitJob': [[PyTango.DevLong, "job id"], [PyTango.DevString, "job state"]],
}
# Attribute definitions
attr_list = {
'jobSuccess':
[[PyTango.DevLong,
PyTango.SCALAR,
PyTango.READ]],
'jobFailure':
[[PyTango.DevLong,
PyTango.SCALAR,
PyTango.READ]],
'statisticsCollected':
[[PyTango.DevString,
PyTango.SCALAR,
PyTango.READ]],
}
def __init__(self, name):
PyTango.DeviceClass.__init__(self, name)
self.set_type(name);
logger.debug("In DahuDSClass constructor")
| kif/UPBL09a | dahu/server.py | Python | gpl-2.0 | 11,107 |
from abc import abstractmethod, abstractproperty
from parso._compatibility import utf8_repr, encoding, py_version
from parso.utils import split_lines
def search_ancestor(node, *node_types):
"""
Recursively looks at the parents of a node and returns the first found node
that matches node_types. Returns ``None`` if no matching node is found.
:param node: The ancestors of this node will be checked.
:param node_types: type names that are searched for.
:type node_types: tuple of str
"""
while True:
node = node.parent
if node is None or node.type in node_types:
return node
class NodeOrLeaf(object):
"""
The base class for nodes and leaves.
"""
__slots__ = ()
type = None
'''
The type is a string that typically matches the types of the grammar file.
'''
def get_root_node(self):
"""
Returns the root node of a parser tree. The returned node doesn't have
a parent node like all the other nodes/leaves.
"""
scope = self
while scope.parent is not None:
scope = scope.parent
return scope
def get_next_sibling(self):
"""
Returns the node immediately following this node in this parent's
children list. If this node does not have a next sibling, it is None
"""
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i + 1]
except IndexError:
return None
def get_previous_sibling(self):
"""
Returns the node immediately preceding this node in this parent's
children list. If this node does not have a previous sibling, it is
None.
"""
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i - 1]
def get_previous_leaf(self):
"""
Returns the previous leaf in the parser tree.
Returns `None` if this is the first element in the parser tree.
"""
node = self
while True:
c = node.parent.children
i = c.index(node)
if i == 0:
node = node.parent
if node.parent is None:
return None
else:
node = c[i - 1]
break
while True:
try:
node = node.children[-1]
except AttributeError: # A Leaf doesn't have children.
return node
def get_next_leaf(self):
"""
Returns the next leaf in the parser tree.
Returns None if this is the last element in the parser tree.
"""
node = self
while True:
c = node.parent.children
i = c.index(node)
if i == len(c) - 1:
node = node.parent
if node.parent is None:
return None
else:
node = c[i + 1]
break
while True:
try:
node = node.children[0]
except AttributeError: # A Leaf doesn't have children.
return node
@abstractproperty
def start_pos(self):
"""
Returns the starting position of the prefix as a tuple, e.g. `(3, 4)`.
:return tuple of int: (line, column)
"""
@abstractproperty
def end_pos(self):
"""
Returns the end position of the prefix as a tuple, e.g. `(3, 4)`.
:return tuple of int: (line, column)
"""
@abstractmethod
def get_start_pos_of_prefix(self):
"""
Returns the start_pos of the prefix. This means basically it returns
the end_pos of the last prefix. The `get_start_pos_of_prefix()` of the
prefix `+` in `2 + 1` would be `(1, 1)`, while the start_pos is
`(1, 2)`.
:return tuple of int: (line, column)
"""
@abstractmethod
def get_first_leaf(self):
"""
Returns the first leaf of a node or itself if this is a leaf.
"""
@abstractmethod
def get_last_leaf(self):
"""
Returns the last leaf of a node or itself if this is a leaf.
"""
@abstractmethod
def get_code(self, include_prefix=True):
"""
Returns the code that was input the input for the parser for this node.
:param include_prefix: Removes the prefix (whitespace and comments) of
e.g. a statement.
"""
class Leaf(NodeOrLeaf):
'''
Leafs are basically tokens with a better API. Leafs exactly know where they
were defined and what text preceeds them.
'''
__slots__ = ('value', 'parent', 'line', 'column', 'prefix')
def __init__(self, value, start_pos, prefix=''):
self.value = value
'''
:py:func:`str` The value of the current token.
'''
self.start_pos = start_pos
self.prefix = prefix
'''
:py:func:`str` Typically a mixture of whitespace and comments. Stuff
that is syntactically irrelevant for the syntax tree.
'''
self.parent = None
'''
The parent :class:`BaseNode` of this leaf.
'''
@property
def start_pos(self):
return self.line, self.column
@start_pos.setter
def start_pos(self, value):
self.line = value[0]
self.column = value[1]
def get_start_pos_of_prefix(self):
previous_leaf = self.get_previous_leaf()
if previous_leaf is None:
lines = split_lines(self.prefix)
# + 1 is needed because split_lines always returns at least [''].
return self.line - len(lines) + 1, 0 # It's the first leaf.
return previous_leaf.end_pos
def get_first_leaf(self):
return self
def get_last_leaf(self):
return self
def get_code(self, include_prefix=True):
if include_prefix:
return self.prefix + self.value
else:
return self.value
@property
def end_pos(self):
lines = split_lines(self.value)
end_pos_line = self.line + len(lines) - 1
# Check for multiline token
if self.line == end_pos_line:
end_pos_column = self.column + len(lines[-1])
else:
end_pos_column = len(lines[-1])
return end_pos_line, end_pos_column
@utf8_repr
def __repr__(self):
value = self.value
if not value:
value = self.type
return "<%s: %s>" % (type(self).__name__, value)
class TypedLeaf(Leaf):
__slots__ = ('type',)
def __init__(self, type, value, start_pos, prefix=''):
super(TypedLeaf, self).__init__(value, start_pos, prefix)
self.type = type
class BaseNode(NodeOrLeaf):
"""
The super class for all nodes.
A node has children, a type and possibly a parent node.
"""
__slots__ = ('children', 'parent')
type = None
def __init__(self, children):
self.children = children
"""
A list of :class:`NodeOrLeaf` child nodes.
"""
self.parent = None
'''
The parent :class:`BaseNode` of this leaf.
None if this is the root node.
'''
@property
def start_pos(self):
return self.children[0].start_pos
def get_start_pos_of_prefix(self):
return self.children[0].get_start_pos_of_prefix()
@property
def end_pos(self):
return self.children[-1].end_pos
def _get_code_for_children(self, children, include_prefix):
if include_prefix:
return "".join(c.get_code() for c in children)
else:
first = children[0].get_code(include_prefix=False)
return first + "".join(c.get_code() for c in children[1:])
def get_code(self, include_prefix=True):
return self._get_code_for_children(self.children, include_prefix)
def get_leaf_for_position(self, position, include_prefixes=False):
"""
Get the :py:class:`parso.tree.Leaf` at ``position``
:param tuple position: A position tuple, row, column. Rows start from 1
:param bool include_prefixes: If ``False``, ``None`` will be returned if ``position`` falls
on whitespace or comments before a leaf
:return: :py:class:`parso.tree.Leaf` at ``position``, or ``None``
"""
def binary_search(lower, upper):
if lower == upper:
element = self.children[lower]
if not include_prefixes and position < element.start_pos:
# We're on a prefix.
return None
# In case we have prefixes, a leaf always matches
try:
return element.get_leaf_for_position(position, include_prefixes)
except AttributeError:
return element
index = int((lower + upper) / 2)
element = self.children[index]
if position <= element.end_pos:
return binary_search(lower, index)
else:
return binary_search(index + 1, upper)
if not ((1, 0) <= position <= self.children[-1].end_pos):
raise ValueError('Please provide a position that exists within this node.')
return binary_search(0, len(self.children) - 1)
def get_first_leaf(self):
return self.children[0].get_first_leaf()
def get_last_leaf(self):
return self.children[-1].get_last_leaf()
@utf8_repr
def __repr__(self):
code = self.get_code().replace('\n', ' ').replace('\r', ' ').strip()
if not py_version >= 30:
code = code.encode(encoding, 'replace')
return "<%s: %s@%s,%s>" % \
(type(self).__name__, code, self.start_pos[0], self.start_pos[1])
class Node(BaseNode):
"""Concrete implementation for interior nodes."""
__slots__ = ('type',)
def __init__(self, type, children):
super(Node, self).__init__(children)
self.type = type
def __repr__(self):
return "%s(%s, %r)" % (self.__class__.__name__, self.type, self.children)
class ErrorNode(BaseNode):
"""
A node that contains valid nodes/leaves that we're follow by a token that
was invalid. This basically means that the leaf after this node is where
Python would mark a syntax error.
"""
__slots__ = ()
type = 'error_node'
class ErrorLeaf(Leaf):
"""
A leaf that is either completely invalid in a language (like `$` in Python)
or is invalid at that position. Like the star in `1 +* 1`.
"""
__slots__ = ('token_type',)
type = 'error_leaf'
def __init__(self, token_type, value, start_pos, prefix=''):
super(ErrorLeaf, self).__init__(value, start_pos, prefix)
self.token_type = token_type
def __repr__(self):
return "<%s: %s:%s, %s>" % \
(type(self).__name__, self.token_type, repr(self.value), self.start_pos)
| lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/parso/tree.py | Python | mit | 11,302 |
# Manually imported from:
# https://github.com/Rapptz/RoboDanny/blob/master/cogs/stars.py
# DATE IMPORTED: 2017-04-14
# DATE LAST MODIFIED: 2017-05-06
# Global variables:
messageReaction = "ren" #The emoji name you want to use for initiating stars.
messageReactionCode = "<:ren:303022146513403907>" #The emoji code retrieved by doing \emoji
#messageReaction = "\N{WHITE MEDIUM STAR}" #Default reaction emoji
from discord.ext import commands
import discord
import datetime
from .utils import checks, config
import json
import copy
import random
import asyncio
import logging
import weakref
from collections import Counter
log = logging.getLogger(__name__)
class StarError(commands.CommandError):
pass
class MockContext:
pass
def requires_starboard():
def predicate(ctx):
if ctx.cog is None or ctx.message.server is None:
return True
ctx.guild_id = ctx.message.server.id
ctx.db = ctx.cog.stars.get(ctx.guild_id, {})
ctx.starboard = ctx.bot.get_channel(ctx.db.get('channel'))
if ctx.starboard is None:
raise StarError('\N{WARNING SIGN} Starboard channel not found.')
return True
return commands.check(predicate)
class Stars:
"""A starboard to upvote posts obviously.
There are two ways to make use of this feature, the first is
via reactions, react to a message with \N{WHITE MEDIUM STAR} and
the bot will automatically add (or remove) it to the starboard.
The second way is via Developer Mode. Enable it under Settings >
Appearance > Developer Mode and then you get access to Copy ID
and using the star/unstar commands.
"""
def __init__(self, bot):
self.bot = bot
# config format: (yeah, it's not ideal or really any good but whatever)
# <guild_id> : <data> where <data> is
# channel: <starboard channel id>
# locked: <boolean indicating locked status>
# message_id: [bot_message, [starred_user_ids]]
self.stars = config.Config('stars.json', cogname='stars')
# cache message objects to save Discord some HTTP requests.
self._message_cache = {}
self._cleaner = self.bot.loop.create_task(self.clean_message_cache())
self._locks = weakref.WeakValueDictionary()
self.janitor_tasks = {
guild_id: self.bot.loop.create_task(self.janitor(guild_id))
for guild_id in self.stars.all()
if self.stars.get(guild_id).get('janitor') is not None
}
def __unload(self):
self._cleaner.cancel()
for task in self.janitor_tasks.values():
try:
task.cancel()
except:
pass
async def clean_message_cache(self):
try:
while not self.bot.is_closed:
self._message_cache.clear()
await asyncio.sleep(3600)
except asyncio.CancelledError:
pass
async def clean_starboard(self, ctx, stars):
dead_messages = {
data[0]
for _, data in ctx.db.items()
if isinstance(data, list) and len(data[1]) <= stars and data[0] is not None
}
# delete all the keys from the dict
for msg_id in dead_messages:
ctx.db.pop(msg_id, None)
await self.stars.put(ctx.guild_id, ctx.db)
await self.bot.purge_from(ctx.starboard, limit=100, check=lambda m: m.id in dead_messages)
async def run_janitor(self, guild_id):
ctx = MockContext()
ctx.guild_id = guild_id
try:
ctx.db = self.stars[guild_id]
ctx.starboard = self.bot.get_channel(ctx.db.get('channel'))
await self.clean_starboard(ctx, 100) # Leave stars that have over 100 stars
await asyncio.sleep(ctx.db['janitor'])
except KeyError:
pass
async def janitor(self, guild_id):
try:
await self.bot.wait_until_ready()
while not self.bot.is_closed:
await self.run_janitor(guild_id)
except asyncio.CancelledError:
pass
def star_emoji(self, stars):
if 5 >= stars >= 0:
#return '\N{WHITE MEDIUM STAR}'
return messageReactionCode
elif 10 >= stars >= 6:
#return '\N{GLOWING STAR}'
return messageReactionCode
elif 25 >= stars >= 11:
#return '\N{DIZZY SYMBOL}'
return messageReactionCode
else:
#return '\N{SPARKLES}'
return messageReactionCode
def star_gradient_colour(self, stars):
# We define as 13 stars to be 100% of the star gradient (half of the 26 emoji threshold)
# So X / 13 will clamp to our percentage,
# We start out with 0xfffdf7 for the beginning colour
# Gradually evolving into 0xffc20c
# rgb values are (255, 253, 247) -> (255, 194, 12)
# To create the gradient, we use a linear interpolation formula
# Which for reference is X = X_1 * p + X_2 * (1 - p)
p = stars / 13
if p > 1.0:
p = 1.0
red = 255
green = int((194 * p) + (253 * (1 - p)))
blue = int((12 * p) + (247 * (1 - p)))
return (red << 16) + (green << 8) + blue
def emoji_message(self, msg, starrers):
emoji = self.star_emoji(starrers)
# base = '%s ID: %s' % (msg.channel.mention, msg.id)
if starrers > 1:
base = '%s **%s** %s ID: %s' % (emoji, starrers, msg.channel.mention, msg.id)
else:
base = '%s %s ID: %s' % (emoji, msg.channel.mention, msg.id)
content = msg.content
e = discord.Embed(description=content)
if msg.embeds:
data = discord.Embed.from_data(msg.embeds[0])
if data.type == 'image':
e.set_image(url=data.url)
if msg.attachments:
url = msg.attachments[0]['url']
if url.lower().endswith(('png', 'jpeg', 'jpg', 'gif')):
e.set_image(url=url)
else:
attachments = '[Attachment](%s)' % url
if content:
e.description = content + '\n' + attachments
else:
e.description = attachments
# build the embed
author = msg.author
avatar = author.default_avatar_url if not author.avatar else author.avatar_url
avatar = avatar.replace('.gif', '.jpg')
e.set_author(name=author.display_name, icon_url=avatar)
e.timestamp = msg.timestamp
e.colour = self.star_gradient_colour(starrers)
return base, e
async def _star_message(self, message, starrer_id, message_id, *, reaction=True):
guild_id = message.server.id
db = self.stars.get(guild_id, {})
starboard = self.bot.get_channel(db.get('channel'))
if starboard is None:
raise StarError('\N{WARNING SIGN} Starboard channel not found.')
if db.get('locked'):
raise StarError('\N{NO ENTRY SIGN} Starboard is locked.')
stars = db.get(message_id, [None, []]) # ew, I know.
starrers = stars[1]
if starrer_id in starrers:
raise StarError('\N{NO ENTRY SIGN} You already starred this message.')
if reaction:
mod = self.bot.get_cog('ModCustom')
if mod:
member = message.server.get_member(starrer_id)
if member and mod.is_plonked(message.server, member): # note: message.server isnt needed
raise StarError('\N{NO ENTRY SIGN} Plonked Member')
elif member and not mod.has_perms(message.server, member):
raise StarError('\N{NO ENTRY SIGN} Member doesn\'t have Perms')
# if the IDs are the same, then they were probably starred using the reaction interface
if message.id != message_id:
msg = await self.get_message(message.channel, message_id)
if msg is None:
raise StarError('\N{BLACK QUESTION MARK ORNAMENT} This message could not be found.')
else:
msg = message
if msg.channel.id == starboard.id:
if not reaction:
raise StarError('\N{NO ENTRY SIGN} Cannot star messages in the starboard without reacting.')
# If we star a message in the starboard then we can do a reverse lookup to check
# what message to star in reality.
# first remove the reaction if applicable:
try:
await self.bot.http.remove_reaction(msg.id, msg.channel.id, messageReaction, starrer_id)
except:
pass # oh well
# do the reverse lookup and update the references
tup = discord.utils.find(lambda t: isinstance(t[1], list) and t[1][0] == message_id, db.items())
if tup is None:
raise StarError('\N{NO ENTRY SIGN} Could not find this message ID in the starboard.')
msg = await self.get_message(msg.channel_mentions[0], tup[0])
if msg is None:
raise StarError('\N{BLACK QUESTION MARK ORNAMENT} This message could not be found.')
# god bless recursion
return await self.star_message(msg, starrer_id, msg.id, reaction=True)
if (len(msg.content) == 0 and len(msg.attachments) == 0) or msg.type is not discord.MessageType.default:
raise StarError('\N{NO ENTRY SIGN} This message cannot be starred.')
if starrer_id == msg.author.id:
raise StarError('\N{NO ENTRY SIGN} You cannot star your own message.')
# check if the message is older than 7 days
seven_days_ago = datetime.datetime.utcnow() - datetime.timedelta(days=7)
if msg.timestamp < seven_days_ago:
raise StarError('\N{NO ENTRY SIGN} This message is older than 7 days.')
# at this point we can assume that the user did not star the message
# and that it is relatively safe to star
content, embed = self.emoji_message(msg, len(starrers) + 1)
# try to remove the star message since it's 'spammy'
if not reaction:
try:
await self.bot.delete_message(message)
except:
pass
starrers.append(starrer_id)
db[message_id] = stars
# freshly starred
if stars[0] is None:
sent = await self.bot.send_message(starboard, content, embed=embed)
stars[0] = sent.id
await self.stars.put(guild_id, db)
return
bot_msg = await self.get_message(starboard, stars[0])
if bot_msg is None:
await self.bot.say('\N{BLACK QUESTION MARK ORNAMENT} Expected to be in {0.mention} but is not.'.format(starboard))
# remove the entry from the starboard cache since someone deleted it.
# i.e. they did a 'clear' on the stars.
# they can go through this process again if they *truly* want to star it.
db.pop(message_id, None)
await self.stars.put(guild_id, db)
return
await self.stars.put(guild_id, db)
await self.bot.edit_message(bot_msg, content, embed=embed)
async def star_message(self, message, starrer_id, message_id, *, reaction=True):
lock = self._locks.get(message.server.id)
if lock is None:
self._locks[message.server.id] = lock = asyncio.Lock(loop=self.bot.loop)
async with lock:
await self._star_message(message, starrer_id, message_id, reaction=reaction)
async def _unstar_message(self, message, starrer_id, message_id):
guild_id = message.server.id
db = self.stars.get(guild_id, {})
starboard = self.bot.get_channel(db.get('channel'))
if starboard is None:
raise StarError('\N{WARNING SIGN} Starboard channel not found.')
if db.get('locked'):
raise StarError('\N{NO ENTRY SIGN} Starboard is locked.')
stars = db.get(message_id)
if stars is None:
raise StarError('\N{NO ENTRY SIGN} This message has no stars.')
starrers = stars[1]
try:
starrers.remove(starrer_id)
except ValueError:
raise StarError('\N{NO ENTRY SIGN} You have not starred this message.')
db[message_id] = stars
bot_msg = await self.get_message(starboard, stars[0])
if bot_msg is not None:
if len(starrers) == 0:
# no more stars, so it's gone from the board
db.pop(message_id, None)
await self.stars.put(guild_id, db)
await self.bot.delete_message(bot_msg)
else:
# if the IDs are the same, then they were probably starred using the reaction interface
if message.id != message_id:
msg = await self.get_message(message.channel, message_id)
if msg is None:
raise StarError('\N{BLACK QUESTION MARK ORNAMENT} This message could not be found.')
else:
msg = message
content, e = self.emoji_message(msg, len(starrers))
await self.stars.put(guild_id, db)
await self.bot.edit_message(bot_msg, content, embed=e)
async def unstar_message(self, message, starrer_id, message_id):
lock = self._locks.get(message.server.id)
if lock is None:
self._locks[message.server.id] = lock = asyncio.Lock(loop=self.bot.loop)
async with lock:
await self._unstar_message(message, starrer_id, message_id)
@commands.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(administrator=True)
async def starboard(self, ctx, *, name: str = 'starboard'):
"""Sets up the starboard for this server.
This creates a new channel with the specified name
and makes it into the server's "starboard". If no
name is passed in then it defaults to "starboard".
If the channel is deleted then the starboard is
deleted as well.
You must have Administrator permissions to use this
command or the Bot Admin role.
"""
server = ctx.message.server
stars = self.stars.get(server.id, {})
old_starboard = self.bot.get_channel(stars.get('channel'))
if old_starboard is not None:
fmt = 'This server already has a starboard ({.mention})'
await self.bot.say(fmt.format(old_starboard))
return
# an old channel might have been deleted and thus we should clear all its star data
stars = {}
my_permissions = ctx.message.channel.permissions_for(server.me)
args = [server, name]
if my_permissions.manage_roles:
mine = discord.PermissionOverwrite(send_messages=True, manage_messages=True, embed_links=True)
everyone = discord.PermissionOverwrite(read_messages=True, send_messages=False, read_message_history=True)
args.append((server.me, mine))
args.append((server.default_role, everyone))
try:
channel = await self.bot.create_channel(*args)
except discord.Forbidden:
await self.bot.say('\N{NO ENTRY SIGN} I do not have permissions to create a channel.')
except discord.HTTPException:
await self.bot.say('\N{PISTOL} This channel name is bad or an unknown error happened.')
else:
stars['channel'] = channel.id
await self.stars.put(server.id, stars)
await self.bot.say('\N{GLOWING STAR} Starboard created at ' + channel.mention)
async def get_message(self, channel, mid):
try:
return self._message_cache[mid]
except KeyError:
try:
msg = self._message_cache[mid] = await self.bot.get_message(channel, mid)
except discord.HTTPException:
return None
else:
return msg
async def on_command_error(self, error, ctx):
if isinstance(error, StarError):
await self.bot.send_message(ctx.message.channel, error)
# a custom message events
async def on_socket_raw_receive(self, data):
# no binary frames
if isinstance(data, bytes):
return
data = json.loads(data)
event = data.get('t')
payload = data.get('d')
if event not in ('MESSAGE_DELETE', 'MESSAGE_REACTION_ADD',
'MESSAGE_REACTION_REMOVE', 'MESSAGE_REACTION_REMOVE_ALL'):
return
is_message_delete = event == 'MESSAGE_DELETE'
is_reaction_clear = event == 'MESSAGE_REACTION_REMOVE_ALL'
is_reaction = event == 'MESSAGE_REACTION_ADD'
# make sure the reaction is proper
if not is_message_delete and not is_reaction_clear:
emoji = payload['emoji']
if emoji['name'] != messageReaction:
return # not a star reaction
channel = self.bot.get_channel(payload.get('channel_id'))
if channel is None or channel.is_private:
return
# everything past this point is pointless if we're adding a reaction,
# so let's just see if we can star the message and get it over with.
if not is_message_delete and not is_reaction_clear:
message = await self.get_message(channel, payload['message_id'])
member = channel.server.get_member(payload['user_id'])
if member is None or member.bot:
return # denied
verb = 'star' if is_reaction else 'unstar'
coro = getattr(self, '%s_message' % verb)
try:
await coro(message, member.id, message.id)
log.info('User ID %s has %sred Message ID %s' % (payload['user_id'], verb, message.id))
except StarError:
pass
finally:
return
server = channel.server
db = self.stars.get(server.id)
if db is None:
return
starboard = self.bot.get_channel(db.get('channel'))
if starboard is None or (is_message_delete and channel.id != starboard.id):
# the starboard might have gotten deleted?
# or it might not be a delete worth dealing with
return
# see if the message being deleted is in the starboard
if is_message_delete:
msg_id = payload['id']
exists = discord.utils.find(lambda k: isinstance(db[k], list) and db[k][0] == msg_id, db)
if exists:
db.pop(exists)
await self.stars.put(server.id, db)
else:
msg_id = payload['message_id']
try:
value = db.pop(msg_id)
except KeyError:
pass
else:
await self.bot.http.delete_message(starboard.id, value[0], guild_id=server.id)
await self.stars.put(server.id, db)
@commands.group(pass_context=True, no_pm=True, invoke_without_command=True)
async def star(self, ctx, message: int):
"""Stars a message via message ID.
To star a message you should right click on the
on a message and then click "Copy ID". You must have
Developer Mode enabled to get that functionality.
It is recommended that you react to a message with
'\N{WHITE MEDIUM STAR}' instead since this will
make it easier.
You can only star a message once. You cannot star
messages older than 7 days.
"""
try:
await self.star_message(ctx.message, ctx.message.author.id, str(message), reaction=False)
except StarError as e:
await self.bot.say(e)
@star.error
async def star_error(self, error, ctx):
if isinstance(error, commands.BadArgument):
await self.bot.say('That is not a valid message ID. Use Developer Mode to get the Copy ID option.')
@commands.command(pass_context=True, no_pm=True)
async def unstar(self, ctx, message: int):
"""Unstars a message via message ID.
To unstar a message you should right click on the
on a message and then click "Copy ID". You must have
Developer Mode enabled to get that functionality.
You cannot unstar messages older than 7 days.
"""
try:
await self.unstar_message(ctx.message, ctx.message.author.id, str(message))
except StarError as e:
return await self.bot.say(e)
else:
await self.bot.delete_message(ctx.message)
@star.command(name='janitor', pass_context=True, no_pm=True)
@checks.admin_or_permissions(administrator=True)
@requires_starboard()
async def star_janitor(self, ctx, minutes: float = 0.0):
"""Set the starboard's janitor clean rate.
The clean rate allows the starboard to cleared from less than 100 star
messages. By setting a clean rate, every N minutes the bot will
routinely cleanup single starred messages from the starboard.
Setting the janitor's clean rate to 0 (or below) disables it.
This command requires the Administrator permission or the Bot
Admin role.
"""
def cleanup_task():
task = self.janitor_tasks.pop(ctx.guild_id)
task.cancel()
ctx.db.pop('janitor', None)
if minutes <= 0.0:
try: #Suppress key error from messaging in the channel if guild_id key doesn't exist.
cleanup_task()
except:
pass
await self.bot.say('\N{SQUARED OK} No more cleaning up.')
else:
if 'janitor' in ctx.db:
cleanup_task()
ctx.db['janitor'] = minutes * 60.0
self.janitor_tasks[ctx.guild_id] = self.bot.loop.create_task(self.janitor(ctx.guild_id))
await self.bot.say('Remember to \N{PUT LITTER IN ITS PLACE SYMBOL}')
await self.stars.put(ctx.guild_id, ctx.db)
@star.command(name='clean', pass_context=True, no_pm=True)
@checks.admin_or_permissions(manage_server=True)
@requires_starboard()
async def star_clean(self, ctx, stars:int = 1):
"""Cleans the starboard
This removes messages in the starboard that only have less
than or equal to the number of specified stars. This defaults to 1.
To continuously do this over a period of time see
the `janitor` subcommand.
This command requires the Manage Server permission or the
Bot Admin role.
"""
stars = 1 if stars < 0 else stars
await self.clean_starboard(ctx, stars)
await self.bot.say('\N{PUT LITTER IN ITS PLACE SYMBOL}')
@star.command(name='update', no_pm=True, pass_context=True, hidden=True)
@checks.admin_or_permissions(administrator=True)
@requires_starboard()
@commands.cooldown(rate=1, per=5.0*60, type=commands.BucketType.server)
async def star_update(self, ctx):
"""Updates the starboard's content to the latest format.
If a message referred in the starboard was deleted then
the message will be untouched.
To prevent abuse, only the last 100 messages are updated.
Warning: This operation takes a long time. As a consequence,
only those with Administrator permission can use this command
and it has a cooldown of one use per 5 minutes.
"""
reconfigured_cache = {
v[0]: (k, v[1]) for k, v in ctx.db.items()
}
async for msg in self.bot.logs_from(ctx.starboard, limit=100):
try:
original_id, starrers = reconfigured_cache[msg.id]
original_channel = msg.channel_mentions[0]
except Exception:
continue
original_message = await self.get_message(original_channel, original_id)
if original_message is None:
continue
content, embed = self.emoji_message(original_message, len(starrers))
try:
await self.bot.edit_message(msg, content, embed=embed)
except:
pass # somehow this failed, so ignore it
await self.bot.say('\N{BLACK UNIVERSAL RECYCLING SYMBOL}')
@star_update.error
async def star_update_error(self, error, ctx):
if isinstance(error, commands.CommandOnCooldown):
if checks.is_owner_check(ctx.message):
await ctx.invoke(self.star_update)
else:
await self.bot.say(error)
async def show_message(self, ctx, key, value):
# Unfortunately, we don't store the channel_id internally, so this
# requires an extra lookup to parse the channel mentions to get the
# original channel. A consequence of mediocre design I suppose.
bot_message = await self.get_message(ctx.starboard, value[0])
if bot_message is None:
raise RuntimeError('Somehow referring to a deleted message in the starboard?')
try:
original_channel = bot_message.channel_mentions[0]
msg = await self.get_message(original_channel, key)
except Exception as e:
raise RuntimeError('An error occurred while fetching message.')
if msg is None:
raise RuntimeError('Could not find message. Possibly deleted.')
content, embed = self.emoji_message(msg, len(value[1]))
await self.bot.say(content, embed=embed)
@star.command(name='show', no_pm=True, pass_context=True)
@commands.cooldown(rate=1, per=10.0, type=commands.BucketType.user)
@requires_starboard()
async def star_show(self, ctx, message: int):
"""Shows a starred via message ID.
To get the ID of a message you should right click on the
message and then click "Copy ID". You must have
Developer Mode enabled to get that functionality.
You can only use this command once per 10 seconds.
"""
message = str(message)
try:
entry = ctx.db[message]
except KeyError:
return await self.bot.say('This message has not been starred.')
try:
await self.show_message(ctx, message, entry)
except Exception as e:
await self.bot.say(e)
@star_show.error
async def star_show_error(self, error, ctx):
if isinstance(error, commands.CommandOnCooldown):
if checks.is_owner_check(ctx.message):
await ctx.invoke(self.star_show)
else:
await self.bot.say(error)
elif isinstance(error, commands.BadArgument):
await self.bot.say('That is not a valid message ID. Use Developer Mode to get the Copy ID option.')
@star.command(pass_context=True, no_pm=True, name='who')
async def star_who(self, ctx, message: int):
"""Show who starred a message.
The ID can either be the starred message ID
or the message ID in the starboard channel.
"""
server = ctx.message.server
db = self.stars.get(server.id, {})
message = str(message)
if message in db:
# starred message ID so this one's rather easy.
starrers = db[message][1]
else:
# this one requires extra look ups...
found = discord.utils.find(lambda v: isinstance(v, list) and v[0] == message, db.values())
if found is None:
await self.bot.say('No one did.')
return
starrers = found[1]
members = filter(None, map(server.get_member, starrers))
await self.bot.say(', '.join(map(str, members)))
@star.command(pass_context=True, no_pm=True, name='stats')
@requires_starboard()
async def star_stats(self, ctx):
"""Shows statistics on the starboard usage."""
e = discord.Embed()
e.timestamp = ctx.starboard.created_at
e.set_footer(text='Adding stars since')
all_starrers = [(v[1], k) for k, v in ctx.db.items() if isinstance(v, list)]
e.add_field(name='Messages Starred', value=str(len(all_starrers)))
e.add_field(name='Stars Given', value=str(sum(len(x) for x, _ in all_starrers)))
most_stars = max(all_starrers, key=lambda t: len(t[0]))
e.add_field(name='Most Stars Given', value='{} stars\nID: {}'.format(len(most_stars[0]), most_stars[1]))
c = Counter(author for x, _ in all_starrers for author in x)
common = c.most_common(3)
e.add_field(name='\U0001f947 Starrer', value='<@!%s> with %s stars' % common[0])
e.add_field(name='\U0001f948 Starrer', value='<@!%s> with %s stars' % common[1])
e.add_field(name='\U0001f949 Starrer', value='<@!%s> with %s stars' % common[2])
await self.bot.say(embed=e)
@star.command(pass_context=True, no_pm=True, name='random')
@requires_starboard()
async def star_random(self, ctx):
entries = [(k, v) for k, v in ctx.db.items() if isinstance(v, list)]
# try at most 5 times to get a non-deleted starboard message
for i in range(5):
try:
(k, v) = random.choice(entries)
await self.show_message(ctx, k, v)
except Exception:
continue
else:
return
await self.bot.say('Sorry, all I could find are deleted messages. Try again?')
@star.command(pass_context=True, no_pm=True, name='lock')
@checks.admin_or_permissions(manage_server=True)
@requires_starboard()
async def star_lock(self, ctx):
"""Locks the starboard from being processed.
This is a moderation tool that allows you to temporarily
disable the starboard to aid in dealing with star spam.
When the starboard is locked, no new entries are added to
the starboard as the bot will no longer listen to reactions or
star/unstar commands.
To unlock the starboard, use the `unlock` subcommand.
To use this command you need Bot Admin role or Manage Server
permission.
"""
ctx.db['locked'] = True
await self.stars.put(ctx.guild_id, ctx.db)
await self.bot.say('Starboard is now locked.')
@star.command(pass_context=True, no_pm=True, name='unlock')
@checks.admin_or_permissions(manage_server=True)
@requires_starboard()
async def star_unlock(self, ctx):
"""Unlocks the starboard for re-processing.
To use this command you need Bot Admin role or Manage Server
permission.
"""
ctx.db['locked'] = False
await self.stars.put(ctx.guild_id, ctx.db)
await self.bot.say('Starboard is now unlocked.')
def setup(bot):
bot.add_cog(Stars(bot))
| sedruk/Red-DiscordBot | cogs/stars.py | Python | gpl-3.0 | 31,755 |
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests methods in util.py
"""
import sys
sys.path[0:0] = [""]
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
from bson import timestamp
from mongo_connector.util import (bson_ts_to_long,
long_to_bson_ts,
retry_until_ok)
def err_func():
"""Helper function for retry_until_ok test
"""
err_func.counter += 1
if err_func.counter == 3:
return True
else:
raise TypeError
err_func.counter = 0
class UtilTester(unittest.TestCase):
""" Tests the utils
"""
def test_bson_ts_to_long(self):
"""Test bson_ts_to_long and long_to_bson_ts
"""
tstamp = timestamp.Timestamp(0x12345678, 0x90abcdef)
self.assertEqual(0x1234567890abcdef,
bson_ts_to_long(tstamp))
self.assertEqual(long_to_bson_ts(0x1234567890abcdef),
tstamp)
def test_retry_until_ok(self):
"""Test retry_until_ok
"""
self.assertTrue(retry_until_ok(err_func))
self.assertEqual(err_func.counter, 3)
if __name__ == '__main__':
unittest.main()
| TPopovich/mongo-connector | tests/test_util.py | Python | apache-2.0 | 1,776 |
import json
import os
import sys
def print_usage():
print """./type_from_schema <json_schema> [ARGS]
Options
--typedef <type_name>\t\t\t Displays the type as a typedef."""
def type_of_number(obj):
return "JSONNumber"
type_of_integer = type_of_number
def type_of_string(obj):
return "JSONString"
def type_of_bool(obj):
return "JSONBool"
def type_of_array(obj):
result = "JSONHomogenousArray<"
sub_func_name = "type_of_" + obj["items"]["type"]
result += globals()[sub_func_name](obj["items"])
result += ">"
return result
def type_of_object(obj):
result = "JSONSet<"
properties = obj["properties"]
is_first = True
for prop, val in properties.items():
sub_func_name = "type_of_" + val["type"]
if is_first:
is_first = False
else:
result += ", "
result += "NamedType<" + globals()[sub_func_name](val) + ", str_to_list_{}(\"{}\")".format(len(prop), prop)
result += ">"
return result
def main(filename, args):
typedef_name = None
for i in range(len(args)):
if args[i] == "--typedef":
i += 1
typedef_name = args[i]
with open(filename) as f:
loaded_json = json.loads(f.read())
if typedef_name is not None:
sys.stdout.write("typedef ")
sys.stdout.write(type_of_object(loaded_json))
if typedef_name is not None:
sys.stdout.write(" " + typedef_name + ";")
sys.stdout.write("\n")
if len(sys.argv) == 1:
print_usage()
else:
main(sys.argv[1], sys.argv[1:]) | nomad010/typesafe_json | type_from_schema.py | Python | mit | 1,653 |
import sys
import os.path
import re
import time
from docutils import io, nodes, statemachine, utils
try:
from docutils.utils.error_reporting import ErrorString # the new way
except ImportError:
from docutils.error_reporting import ErrorString # the old way
from docutils.parsers.rst import Directive, convert_directive_function
from docutils.parsers.rst import directives, roles, states
from docutils.parsers.rst.roles import set_classes
from docutils.transforms import misc
from nbconvert import html
class Notebook(Directive):
"""Use nbconvert to insert a notebook into the environment.
This is based on the Raw directive in docutils
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
has_content = False
def run(self):
# check if raw html is supported
if not self.state.document.settings.raw_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
# set up encoding
attributes = {'format': 'html'}
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler = self.state.document.settings.input_encoding_error_handler
# get path to notebook
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
nb_path = os.path.normpath(os.path.join(source_dir,
self.arguments[0]))
nb_path = utils.relative_path(None, nb_path)
# convert notebook to html
exporter = html.HTMLExporter(template_file='full')
output, resources = exporter.from_filename(nb_path)
header = output.split('<head>', 1)[1].split('</head>',1)[0]
body = output.split('<body>', 1)[1].split('</body>',1)[0]
# add HTML5 scoped attribute to header style tags
header = header.replace('<style', '<style scoped="scoped"')
header = header.replace('body {\n overflow: visible;\n padding: 8px;\n}\n',
'')
header = header.replace("code,pre{", "code{")
# Filter out styles that conflict with the sphinx theme.
filter_strings = [
'navbar',
'body{',
'alert{',
'uneditable-input{',
'collapse{',
]
filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
line_begin = [
'pre{',
'p{margin'
]
filterfunc = lambda x: not any([s in x for s in filter_strings])
header_lines = filter(filterfunc, header.split('\n'))
filterfunc = lambda x: not any([x.startswith(s) for s in line_begin])
header_lines = filter(filterfunc, header_lines)
header = '\n'.join(header_lines)
# concatenate raw html lines
lines = ['<div class="ipynotebook">']
lines.append(header)
lines.append(body)
lines.append('</div>')
text = '\n'.join(lines)
# add dependency
self.state.document.settings.record_dependencies.add(nb_path)
attributes['source'] = nb_path
# create notebook node
nb_node = notebook('', text, **attributes)
(nb_node.source, nb_node.line) = \
self.state_machine.get_source_and_line(self.lineno)
return [nb_node]
class notebook(nodes.raw):
pass
def visit_notebook_node(self, node):
self.visit_raw(node)
def depart_notebook_node(self, node):
self.depart_raw(node)
def setup(app):
app.add_node(notebook,
html=(visit_notebook_node, depart_notebook_node))
app.add_directive('notebook', Notebook)
| wbinventor/openmc | docs/sphinxext/notebook_sphinxext.py | Python | mit | 3,717 |
import torch
from allennlp.modules.span_extractors.span_extractor import SpanExtractor
from allennlp.modules.span_extractors.span_extractor_with_span_width_embedding import (
SpanExtractorWithSpanWidthEmbedding,
)
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.nn import util
@SpanExtractor.register("self_attentive")
class SelfAttentiveSpanExtractor(SpanExtractorWithSpanWidthEmbedding):
"""
Computes span representations by generating an unnormalized attention score for each
word in the document. Spans representations are computed with respect to these
scores by normalising the attention scores for words inside the span.
Given these attention distributions over every span, this module weights the
corresponding vector representations of the words in the span by this distribution,
returning a weighted representation of each span.
Registered as a `SpanExtractor` with name "self_attentive".
# Parameters
input_dim : `int`, required.
The final dimension of the `sequence_tensor`.
num_width_embeddings : `int`, optional (default = `None`).
Specifies the number of buckets to use when representing
span width features.
span_width_embedding_dim : `int`, optional (default = `None`).
The embedding size for the span_width features.
bucket_widths : `bool`, optional (default = `False`).
Whether to bucket the span widths into log-space buckets. If `False`,
the raw span widths are used.
# Returns
attended_text_embeddings : `torch.FloatTensor`.
A tensor of shape (batch_size, num_spans, input_dim), which each span representation
is formed by locally normalising a global attention over the sequence. The only way
in which the attention distribution differs over different spans is in the set of words
over which they are normalized.
"""
def __init__(
self,
input_dim: int,
num_width_embeddings: int = None,
span_width_embedding_dim: int = None,
bucket_widths: bool = False,
) -> None:
super().__init__(
input_dim=input_dim,
num_width_embeddings=num_width_embeddings,
span_width_embedding_dim=span_width_embedding_dim,
bucket_widths=bucket_widths,
)
self._global_attention = TimeDistributed(torch.nn.Linear(input_dim, 1))
def get_output_dim(self) -> int:
if self._span_width_embedding is not None:
return self._input_dim + self._span_width_embedding.get_output_dim()
return self._input_dim
def _embed_spans(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
sequence_mask: torch.BoolTensor = None,
span_indices_mask: torch.BoolTensor = None,
) -> torch.FloatTensor:
# shape (batch_size, sequence_length, 1)
global_attention_logits = self._global_attention(sequence_tensor)
# shape (batch_size, sequence_length, embedding_dim + 1)
concat_tensor = torch.cat([sequence_tensor, global_attention_logits], -1)
concat_output, span_mask = util.batched_span_select(concat_tensor, span_indices)
# Shape: (batch_size, num_spans, max_batch_span_width, embedding_dim)
span_embeddings = concat_output[:, :, :, :-1]
# Shape: (batch_size, num_spans, max_batch_span_width)
span_attention_logits = concat_output[:, :, :, -1]
# Shape: (batch_size, num_spans, max_batch_span_width)
span_attention_weights = util.masked_softmax(span_attention_logits, span_mask)
# Do a weighted sum of the embedded spans with
# respect to the normalised attention distributions.
# Shape: (batch_size, num_spans, embedding_dim)
attended_text_embeddings = util.weighted_sum(span_embeddings, span_attention_weights)
return attended_text_embeddings
| allenai/allennlp | allennlp/modules/span_extractors/self_attentive_span_extractor.py | Python | apache-2.0 | 3,955 |
from setuptools import setup
setup(name='tdwrapper',
version='0.0.1',
description='Teradata utility wrapper for Python',
url='https://github.com/changhyeoklee/tdwrapper',
author='Changhyeok Lee',
author_email='Changhyeoklee@gmail.com',
license='MIT',
packages=['tdwrapper'],
install_requires=[
'subprocess32',
'pandas',
],
zip_safe=False)
| changhyeoklee/tdwrapper | setup.py | Python | mit | 420 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.db import models
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AbstractUser, UserManager
from django.db.models import signals
from django.conf import settings
from taggit.managers import TaggableManager
from geonode.base.enumerations import COUNTRIES
from geonode.groups.models import GroupProfile
from account.models import EmailAddress
from .utils import format_address
if 'notification' in settings.INSTALLED_APPS:
from notification import models as notification
class ProfileUserManager(UserManager):
def get_by_natural_key(self, username):
return self.get(username__iexact=username)
class Profile(AbstractUser):
"""Fully featured Geonode user"""
organization = models.CharField(
_('Organization Name'),
max_length=255,
blank=True,
null=True,
help_text=_('name of the responsible organization'))
profile = models.TextField(_('Profile'), null=True, blank=True, help_text=_('introduce yourself'))
position = models.CharField(
_('Position Name'),
max_length=255,
blank=True,
null=True,
help_text=_('role or position of the responsible person'))
voice = models.CharField(_('Voice'), max_length=255, blank=True, null=True, help_text=_(
'telephone number by which individuals can speak to the responsible organization or individual'))
fax = models.CharField(_('Facsimile'), max_length=255, blank=True, null=True, help_text=_(
'telephone number of a facsimile machine for the responsible organization or individual'))
delivery = models.CharField(
_('Delivery Point'),
max_length=255,
blank=True,
null=True,
help_text=_('physical and email address at which the organization or individual may be contacted'))
city = models.CharField(
_('City'),
max_length=255,
blank=True,
null=True,
help_text=_('city of the location'))
area = models.CharField(
_('Administrative Area'),
max_length=255,
blank=True,
null=True,
help_text=_('state, province of the location'))
zipcode = models.CharField(
_('Postal Code'),
max_length=255,
blank=True,
null=True,
help_text=_('ZIP or other postal code'))
country = models.CharField(
choices=COUNTRIES,
max_length=3,
blank=True,
null=True,
help_text=_('country of the physical address'))
keywords = TaggableManager(_('keywords'), blank=True, help_text=_(
'commonly used word(s) or formalised word(s) or phrase(s) used to describe the subject \
(space or comma-separated'))
def get_absolute_url(self):
return reverse('profile_detail', args=[self.username, ])
def __unicode__(self):
return u"%s" % (self.username)
def class_name(value):
return value.__class__.__name__
objects = ProfileUserManager()
USERNAME_FIELD = 'username'
def group_list_public(self):
return GroupProfile.objects.exclude(access="private").filter(groupmember__user=self)
def group_list_all(self):
return GroupProfile.objects.filter(groupmember__user=self)
def keyword_list(self):
"""
Returns a list of the Profile's keywords.
"""
return [kw.name for kw in self.keywords.all()]
@property
def name_long(self):
if self.first_name and self.last_name:
return '%s %s (%s)' % (self.first_name, self.last_name, self.username)
elif (not self.first_name) and self.last_name:
return '%s (%s)' % (self.last_name, self.username)
elif self.first_name and (not self.last_name):
return '%s (%s)' % (self.first_name, self.username)
else:
return self.username
@property
def location(self):
return format_address(self.delivery, self.zipcode, self.city, self.area, self.country)
def get_anonymous_user_instance(Profile):
return Profile(pk=-1, username='AnonymousUser')
def profile_post_save(instance, sender, **kwargs):
"""
Make sure the user belongs by default to the anonymous group.
This will make sure that anonymous permissions will be granted to the new users.
"""
from django.contrib.auth.models import Group
anon_group, created = Group.objects.get_or_create(name='anonymous')
instance.groups.add(anon_group)
# keep in sync Profile email address with Account email address
if instance.email not in [u'', '', None] and not kwargs.get('raw', False):
address, created = EmailAddress.objects.get_or_create(
user=instance, primary=True,
defaults={'email': instance.email, 'verified': False})
if not created:
EmailAddress.objects.filter(user=instance, primary=True).update(email=instance.email)
def email_post_save(instance, sender, **kw):
if instance.primary:
Profile.objects.filter(id=instance.user.pk).update(email=instance.email)
def profile_pre_save(instance, sender, **kw):
matching_profiles = Profile.objects.filter(id=instance.id)
if matching_profiles.count() == 0:
return
if instance.is_active and not matching_profiles.get().is_active and \
'notification' in settings.INSTALLED_APPS:
notification.send([instance, ], "account_active")
signals.pre_save.connect(profile_pre_save, sender=Profile)
signals.post_save.connect(profile_post_save, sender=Profile)
signals.post_save.connect(email_post_save, sender=EmailAddress)
| pjdufour/geonode | geonode/people/models.py | Python | gpl-3.0 | 6,503 |
#!/usr/bin/env python3
from configparser import ConfigParser
import argparse
from flask import Flask, current_app, g, request, render_template
from flask_cors import CORS, cross_origin
import pymysql
import json
import ast
import time
import datetime
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--configfile", help="Config File for Scheduler", required=True)
parser.add_argument("-d", "--flaskdebug", action='store_true', help="Turn on Flask Debugging")
parser._optionals.title = "DESCRIPTION "
args = parser.parse_args()
FDEBUG=args.flaskdebug
CONFIG=args.configfile
def ui(CONFIG, FDEBUG):
try:
# Read Our INI with our data collection rules
config = ConfigParser()
config.read(CONFIG)
# Debug
#for i in config :
#for key in config[i] :
#print (i, "-", key, ":", config[i][key])
except Exception as e: # pylint: disable=broad-except, invalid-name
sys.exit('Bad configuration file {}'.format(e))
# Grab me Collections Items Turn them into a Dictionary
config_items=dict()
# Collection Items
for section in config :
config_items[section]=dict()
for item in config[section]:
config_items[section][item] = ast.literal_eval(config[section][item])
# Create db_conn
app = Flask(__name__)
# Enable CORS for UI Guys
cors = CORS(app, resources={r"/(api)/*": {"origins": "*"}})
@app.before_request
def before_request():
# DEFAULT Fresh. Use this instead of "Fresh" Values to allow for query caching.
NOW = int(time.time())
g.config_items = config_items
try :
db_conn = pymysql.connect(host=config_items["db"]["dbhostname"], port=int(config_items["db"]["dbport"]), user=config_items["db"]["dbuser"], passwd=config_items["db"]["dbpassword"], db=config_items["db"]["dbname"], autocommit=True )
g.db = db_conn
except Exception as e :
print("Error connecting to database", str(e))
g.cur = g.db.cursor(pymysql.cursors.DictCursor)
@app.after_request
def after_request(response) :
# Close My Cursor JK Do that in teardown request
return response
@app.teardown_request
def teardown_request(response):
# Get Rid of My Cursor
cur = getattr(g, 'cur', None)
if cur is not None:
cur.close()
db = getattr(g, 'db', None)
if db is not None:
db.close()
return response
## API 2 Imports
from api import root
from api import synced
from api import addtrans
from api import txid
from api import attempts
from api import txlist
from api import addcontact
from api import confirmemail
from api import watchtx
from api import removetx
# Register API Blueprints for Version 2
app.register_blueprint(root.root, url_prefix=config_items["api"]["application_prefix"])
app.register_blueprint(synced.synced, url_prefix=config_items["api"]["application_prefix"])
app.register_blueprint(addtrans.addtrans, url_prefix=config_items["api"]["application_prefix"])
app.register_blueprint(txid.txid, url_prefix=config_items["api"]["application_prefix"])
app.register_blueprint(attempts.attempts, url_prefix=config_items["api"]["application_prefix"])
app.register_blueprint(txlist.txlist, url_prefix=config_items["api"]["application_prefix"])
app.register_blueprint(addcontact.addcontact, url_prefix=config_items["api"]["application_prefix"])
app.register_blueprint(confirmemail.confirmemail, url_prefix=config_items["api"]["application_prefix"])
app.register_blueprint(watchtx.watchtx, url_prefix=config_items["api"]["application_prefix"])
app.register_blueprint(removetx.removetx, url_prefix=config_items["api"]["application_prefix"])
## Display Imports
from display import d_txid
from display import d_txlist
from display import d_addemail
from display import d_addemail_results
from display import d_confirmemail
from display import d_confirmemail_results
from display import d_addtx
from display import d_addtx_results
from display import d_watchtx_results
from display import d_why
from display import d_howto
from display import d_removetx_results
# Register Display Blueprints
app.register_blueprint(d_txid.Dtxid, url_prefix="/display")
app.register_blueprint(d_txlist.Dtxlist, url_prefix="/display")
app.register_blueprint(d_addemail.Daddemail, url_prefix="/display")
app.register_blueprint(d_addemail_results.Daddemail_results, url_prefix="/display")
app.register_blueprint(d_confirmemail.Dconfirmemail, url_prefix="/display")
app.register_blueprint(d_confirmemail_results.Dconfirmemail_results, url_prefix="/display")
app.register_blueprint(d_addtx.Daddtx, url_prefix="/display")
app.register_blueprint(d_addtx_results.Daddtx_results, url_prefix="/display")
app.register_blueprint(d_watchtx_results.Dwatchtx_results, url_prefix="/display")
app.register_blueprint(d_why.Dwhy, url_prefix="/display")
app.register_blueprint(d_howto.Dhowto, url_prefix="/display")
app.register_blueprint(d_removetx_results.Dremovetx_results, url_prefix="/display")
@app.route("/")
def index():
# Index
return render_template("index.html.jinja")
app.run(debug=FDEBUG, port=int(config_items['api']['port']) , threaded=True, host=config_items['api']['bindaddress'])
# Run if Execute from CLI
if __name__ == "__main__":
ui(CONFIG, FDEBUG)
| chalbersma/persist_transaction | api.py | Python | gpl-3.0 | 5,275 |
from .static import StaticCalculation
from .relax import IonRelaxation
__author__ = 'Guillermo Avendano-Franco'
| MaterialsDiscovery/PyChemia | pychemia/code/abinit/task/__init__.py | Python | mit | 113 |
'''Yet another dev run at making apt better, and improving my understanding of python.
2014-Sep-27, mhw
This attempt doesn't look for a new magic bullet to avoid contending with
argparse, nor does it use it. Rather it is, so far, a simple refactoring ground
of choice functions from apt.py. Ideally each refactoring here will go back into
apt.py.
'''
import os
import sys
import getopt
import glob
import re
import shutil
import string
import urllib
import gzip, tarfile, bz2
import hashlib
import subprocess
import shlex
def get_config():
'''Configuration values which are used by all functions.
current form is ugly, hard to read, but works. a simple ini like form is
better for reading, but I don't know how to code with equal clarity.
root = 'c:/osgeo4w'
etc_setup = 'c:/osgeo4w/etc/setup'
setup_ini = 'c:/osgeo4w/etc/setup/setup.ini'
...
'''
global config
config = {}
config['root'] = 'C:/OSGeo4W'
config['etc_setup'] = config['root'] + '/etc/setup'
config['setup_ini'] = config['etc_setup'] + '/setup.ini'
config['setup_bak'] = config['etc_setup'] + '/setup.bak'
config['installed_db'] = config['etc_setup'] + '/installed.db'
config['installed_db_magic'] = 'INSTALLED.DB 2\n'
def setup(target):
'''Create skeleton folder tree, initialize empty installed packages database and fetch current package index'''
if os.path.exists(target):
sys.exit('Abort: target path "%s" exists' % target)
os.makedirs(config['etc_setup'])
write_installed({0:{}})
# update()
def get_installed(dummy):
''' Get list of installed packages from ./etc/setup/installed.db.
Returns nested dictionary (empty when installed.db doesn't exist):
{status_int : {pkg_name : archive_name}}
I don't know significance of the nesting or leading zero. It appears to be
extraneous? The db is just a straight name:tarball lookup table.
In write_installed() the "status" is hard coded as 0 for all packages.
'''
# installed = {0:{}}
installed = {}
if os.path.exists(config['installed_db']):
for i in open (config['installed_db']).readlines ()[1:]:
name, archive, status = string.split (i)
# installed[int (status)][name] = archive
installed[name] = archive
if dummy:
print(installed) ##debug
return installed
def write_installed(packages):
''' Record packages in install.db. Packages arg needs to be nested dict {status_int : {name : archive_name}}
Reads existing installed list and then rewrites whole file. It would be better to just append?
'''
# read existing installed packages
installed = get_installed('')
if packages == 'debug':
for k in installed.keys():
print(k, installed[k])
file = open (config['installed_db'], 'w')
file.write (config['installed_db_magic'])
for k in installed.keys():
file.writeline('%s %s 0' % (installed[k], installed[k]))
'''
file = open (config['installed_db'], 'w')
file.write (config['installed_db_magic'])
file.writelines (map (lambda x: '%s %s 0\n' % (x, installed[0][x]),
installed[0].keys ()))
if file.close ():
raise 'urg'
'''
'''
INSTALLED.DB 2
libxml2 libxml2-2.9.1-1.tar.bz2 0
gdal110dll gdal110dll-1.10.1-1.tar.bz2 0
libtiff libtiff-4.0.2-2.tar.bz2 0
libjpeg12 libjpeg12-6b-3.tar.bz2 0
'''
def main(action, args):
get_config()
print('Action: %s' % action)
print('Parameters: %s' % args)
if action == 'setup':
setup(config['root'])
else:
eval(action)(args)
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2:])
# <<globals>>
# <<parse command line>>
# <<post-parse globals>>
# <<run the commands>>
# <<wrap up>>
| maphew/apt | wildlands/dapt.py | Python | gpl-2.0 | 4,005 |
# Copyright: Damien Elmes <anki@ichi2.net>
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
# imports are all in this file to make moving to pyside easier in the future
# fixme: make sure not to optimize imports on this file
import sip
import os
from anki.utils import isWin, isMac
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
sip.setapi('QUrl', 2)
try:
sip.setdestroyonexit(False)
except:
# missing in older versions
pass
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import QWebPage, QWebView, QWebSettings
from PyQt4.QtNetwork import QLocalServer, QLocalSocket
def debug():
from PyQt4.QtCore import pyqtRemoveInputHook
from pdb import set_trace
pyqtRemoveInputHook()
set_trace()
import sys, traceback
if os.environ.get("DEBUG"):
def info(type, value, tb):
from PyQt4.QtCore import pyqtRemoveInputHook
for line in traceback.format_exception(type, value, tb):
sys.stdout.write(line)
pyqtRemoveInputHook()
from pdb import pm
pm()
sys.excepthook = info
qtmajor = (QT_VERSION & 0xff0000) >> 16
qtminor = (QT_VERSION & 0x00ff00) >> 8
# qt4.6 doesn't support ruby tags
if qtmajor <= 4 and qtminor <= 6:
import anki.template.furigana
anki.template.furigana.ruby = r'<span style="display: inline-block; text-align: center; line-height: 1; white-space: nowrap; vertical-align: baseline; margin: 0; padding: 0"><span style="display: block; text-decoration: none; line-height: 1.2; font-weight: normal; font-size: 0.64em">\2</span>\1</span>'
| z-jason/anki | aqt/qt.py | Python | agpl-3.0 | 1,591 |
# Django settings for django-custom-user-example project.
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'custommodel',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': 5432,
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'hhpbe@1_8x4us$v14+h31pjy)udqz(fg@92a@p-yd=6kk04smp'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'customuser.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'customuser.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'customuser.accounts',
'django_extensions', # Don't use this in production environment
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
AUTH_USER_MODEL = 'accounts.CustomUser'
| aniketmaithani/django-custom-user-example | customuser/settings.py | Python | mit | 5,115 |
#!/usr/bin/env python
#=========================================================================
# This is OPEN SOURCE SOFTWARE governed by the Gnu General Public
# License (GPL) version 3, as described at www.opensource.org.
# Copyright (C)2017 William H. Majoros (martiandna@gmail.com).
#=========================================================================
from __future__ import (absolute_import, division, print_function,
unicode_literals, generators, nested_scopes, with_statement)
from builtins import (bytes, dict, int, list, object, range, str, ascii,
chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
# The above imports should allow this program to run in both Python 2 and
# Python 3. You might need to update your version of module "future".
import sys
import os
import ProgramName
from GffTranscriptReader import GffTranscriptReader
from Rex import Rex
rex=Rex()
ASSEMBLY="/home/bmajoros/1000G/assembly/"
#RNA_FILE="/home/bmajoros/1000G/assembly/rna150/denovo-rna-nofilter/HG00096.1.txt"
def loadRNA():
keep=set()
with open(RNA_FILE,"rt") as IN:
for line in IN:
fields=line.rstrip().split()
(denovo,spliceType,gene,hap,coding,transID,altID,strand,
score,junctionBegin,junctionEnd,fate,identity,variant,
splicingActivity,newCount,oldCount)=fields
if(denovo!="denovo"): continue
if(splicingActivity==0.0): continue
keep.add(gene+" "+str(junctionBegin))
keep.add(gene+" "+str(junctionEnd))
#print("adding",gene+" "+str(junctionBegin))
#print("adding",gene+" "+str(junctionEnd))
return keep
def getMappedTranscript(gene):
numTrans=gene.numTranscripts()
for i in range(numTrans):
transcript=gene.getIthTranscript(i)
extra=transcript.parseExtraFields()
hashExtra=transcript.hashExtraFields(extra)
change=hashExtra.get("structure_change",None)
if(change=="mapped-transcript"): return transcript
return None
def getDistance(trans1,trans2,rna):
rawExons1=trans1.getRawExons()
rawExons2=trans2.getRawExons()
geneID=trans1.getGeneId()
if(rex.find("(\S+)_\d+",geneID)): geneID=rex[1]
n=len(rawExons1)
if(len(rawExons2)!=n): return None
for i in range(n):
exon1=rawExons1[i]; exon2=rawExons2[i]
if(exon1.getBegin()!=exon2.getBegin() and
exon1.getEnd()==exon2.getEnd()):
if(geneID+" "+str(exon1.getBegin()) in rna):
return abs(exon1.getBegin()-exon2.getBegin())
#else:
#print("NOT FOUND:",geneID+" "+str(exon1.getBegin()),flush=True)
#os.system("grep "+geneID+" "+RNA_FILE)
if(exon1.getEnd()!=exon2.getEnd() and
exon1.getBegin()==exon2.getBegin()):
if(geneID+" "+str(exon1.getEnd()) in rna):
return abs(exon1.getEnd()-exon2.getEnd())
#else:
#print("NOT FOUND:",geneID+" "+str(exon1.getEnd()),flush=True)
#os.system("grep "+geneID+" "+RNA_FILE)
return None
def System(cmd):
print(cmd,flush=True)
os.system(cmd)
def processGene(gene,rna,indiv):
mapped=getMappedTranscript(gene)
if(mapped is None): return
numTrans=gene.numTranscripts()
for i in range(numTrans):
transcript=gene.getIthTranscript(i)
extra=transcript.parseExtraFields()
hashExtra=transcript.hashExtraFields(extra)
change=hashExtra.get("structure_change",None)
if(change!="denovo-site"): continue
score=transcript.getScore()
distance=getDistance(transcript,mapped,rna)
if(distance is None): continue
print(score,distance,gene.getStrand(),gene.getId(),indiv,
sep="\t",flush=True)
#=========================================================================
# main()
#=========================================================================
if(len(sys.argv)!=3):
exit(ProgramName.get()+" <indiv> <hap>\n")
(indiv,hap)=sys.argv[1:]
RNA_FILE=ASSEMBLY+"rna150/denovo-rna-nofilter/"+indiv+"."+hap+".txt"
gffFile=ASSEMBLY+"combined/"+indiv+"/"+hap+".logreg.gff"
reader=GffTranscriptReader()
genes=reader.loadGenes(gffFile)
rna=loadRNA()
for gene in genes:
processGene(gene,rna,indiv)
| ReddyLab/1000Genomes | get-denovo-distances.py | Python | gpl-2.0 | 4,317 |
#encoding: utf-8
from . import api
from .resources.tests import Test
# 添加路由
api.add_resource(Test, '/tests') | chenke91/ckPermission | app/api_v1/routes.py | Python | mit | 118 |
# pylint: skip-file
# pylint: disable=too-many-instance-attributes
class GcloudComputeAddresses(GcloudCLI):
''' Class to wrap the gcloud compute addresses command'''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
aname=None,
desc=None,
region=None,
address=None,
isglobal=False,
verbose=False):
''' Constructor for gcloud resource '''
super(GcloudComputeAddresses, self).__init__()
self.name = aname
self.desc = desc
self.region = region
self.isglobal = isglobal
self.address = address
self.verbose = verbose
def list_addresses(self, address_name=None, region_name=None):
'''return a list of addresses'''
results = self._list_addresses(address_name, region_name)
if results['returncode'] == 0:
if not address_name:
rval = []
for addr in results['results'].strip().split('\n')[1:]:
aname, region, aip, status = addr.split()
rval.append({'name': aname, 'region': region, 'address': aip, 'status': status})
results['results'] = rval
else:
results['results'] = yaml.load(results['results'])
return results
def exists(self):
''' return whether an address exists '''
addresses = self.list_addresses()
if addresses['returncode'] != 0:
if 'was not found' in addresses['stderr']:
addresses['returncode'] = 0
return addresses
raise GcloudCLIError('Something went wrong. Results: %s' % addresses['stderr'])
return any([self.name == addr['name'] for addr in addresses['results']])
def delete_address(self):
'''delete an address'''
return self._delete_address(self.name)
def create_address(self):
'''create an address'''
address_info = {}
address_info['description'] = self.desc
address_info['region'] = self.region
return self._create_address(self.name, address_info, self.address, self.isglobal)
| appuio/ansible-role-openshift-zabbix-monitoring | vendor/openshift-tools/ansible/roles/lib_gcloud/build/src/gcloud_compute_addresses.py | Python | apache-2.0 | 2,225 |
from . import func
from .Glitch import Glitch
class Constraints(object):
"""A container for tree topology constraints.
taxNames
A list of taxNames in the same order as in
the data or alignment, and the same order as
in other tree or trees objects.
constraintTree
A partially resolved tree object that
describes the constraints. You need to include
all the taxNames.
For example::
tNames = ['A', 'B', 'C', 'D', 'E']
read('(A, B, (E, D), C);')
constTree = var.trees.pop()
c = Constraints(tNames, constTree)
t = func.randomTree(taxNames=tName, constraints=c)
The constraint tree should not have a bifurcating root.
You can pass a Constraints object to func.randomTree() and
Mcmc() to enforce constraints.
"""
def __init__(self, taxNames, constraintTree):
if constraintTree.root.getNChildren() == 2:
raise Glitch('The constraint tree should not have a bifurcating root.')
self.tree = constraintTree
self.tree.taxNames = taxNames
self.tree.reRoot(self.tree.taxNames[0], moveInternalName=False)
self.allOnes = 2**(self.tree.nTax) - 1
self.tree.makeSplitKeys()
self.constraints = []
internalsExceptTheFirst = [n for n in self.tree.iterInternalsNoRoot()][1:]
for n in internalsExceptTheFirst:
n.name = n.br.splitKey
self.constraints.append(n.br.splitKey)
assert self.constraints, "No constraints?"
def dump(self):
print('Constraints.dump()')
print('taxNames:')
for i in range(self.tree.nTax):
print(' %3i %s' % (i, self.tree.taxNames[i]))
print('constraints:')
for i in self.constraints:
print(func.getSplitStringFromKey(i, self.tree.nTax))
self.tree.draw()
| Anaphory/p4-phylogeny | p4/Constraints.py | Python | gpl-2.0 | 1,931 |
#!/usr/bin/env python3
import logging
import json
import math
import os
from configparser import NoOptionError
from gi.repository import Gtk, Gdk, GObject
from lib.videodisplay import VideoDisplay
from lib.audiodisplay import AudioDisplay
import lib.connection as Connection
from lib.config import Config
from vocto.port import Port
class VideoPreviewsController(object):
"""Displays Video-Previews and selection Buttons for them"""
def __init__(self, video_box, audio_box, win, uibuilder):
self.log = logging.getLogger('VideoPreviewsController')
self.win = win
self.preview_players = {}
self.previews = {}
self.volume_sliders = {}
self.video_box = video_box
self.audio_box = audio_box
# Accelerators
accelerators = Gtk.AccelGroup()
win.add_accel_group(accelerators)
# count number of previews
num_previews = len(Config.getSources()) + len(Config.getLivePreviews())
# get preview size
self.previewSize = Config.getPreviewSize()
# recalculate preview size if in sum they are too large for screen
screen = Gdk.Screen.get_default()
if screen.get_height() < self.previewSize[1] * num_previews:
height = screen.get_height() / num_previews
self.previewSize = (Config.getVideoRatio() * height, height)
self.log.warning(
'Resizing previews so that they fit onto screen to WxH={}x{}'.format(*self.previewSize))
# connect event-handler and request initial state
Connection.send('get_video')
def addPreview(self, uibuilder, source, port, has_volume=True):
self.log.info('Initializing video preview %s at port %d', source, port)
video = uibuilder.load_check_widget('video',
os.path.dirname(uibuilder.uifile) +
"/widgetpreview.ui")
video.set_size_request(*self.previewSize)
self.video_box.pack_start(video, fill=False,
expand=False, padding=0)
mix_audio_display = AudioDisplay(self.audio_box, source, uibuilder, has_volume)
player = VideoDisplay(video, mix_audio_display, port=port,
width=self.previewSize[0],
height=self.previewSize[1],
name=source.upper()
)
| voc/voctomix | voctogui/lib/videopreviews.py | Python | mit | 2,466 |
# Natural Language Toolkit: Combinatory Categorial Grammar
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Graeme Gange <ggange@csse.unimelb.edu.au>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import unicode_literals
import re
from collections import defaultdict
from nltk.ccg.api import PrimitiveCategory, Direction, CCGVar, FunctionalCategory
from nltk.compat import python_2_unicode_compatible
#------------
# Regular expressions used for parsing components of the lexicon
#------------
# Parses a primitive category and subscripts
rePrim = re.compile(r'''([A-Za-z]+)(\[[A-Za-z,]+\])?''')
# Separates the next primitive category from the remainder of the
# string
reNextPrim = re.compile(r'''([A-Za-z]+(?:\[[A-Za-z,]+\])?)(.*)''')
# Separates the next application operator from the remainder
reApp = re.compile(r'''([\\/])([.,]?)([.,]?)(.*)''')
# Parses the definition of the category of either a word or a family
reLex = re.compile(r'''([\w_]+)\s*(::|[-=]+>)\s*(.+)''', re.UNICODE)
# Strips comments from a line
reComm = re.compile('''([^#]*)(?:#.*)?''')
#----------
# Lexicons
#----------
@python_2_unicode_compatible
class CCGLexicon(object):
'''
Class representing a lexicon for CCG grammars.
primitives - The list of primitive categories for the lexicon
families - Families of categories
entries - A mapping of words to possible categories
'''
def __init__(self,start,primitives,families,entries):
self._start = PrimitiveCategory(start)
self._primitives = primitives
self._families = families
self._entries = entries
# Returns all the possible categories for a word
def categories(self, word):
return self._entries[word]
# Returns the target category for the parser
def start(self):
return self._start
# String representation of the lexicon
# Used for debugging
def __str__(self):
st = ""
first = True
for ident in self._entries:
if not first:
st = st + "\n"
st = st + ident + " => "
first = True
for cat in self._entries[ident]:
if not first:
st = st + " | "
else:
first = False
st = st + "%s" % cat
return st
#-----------
# Parsing lexicons
#-----------
# Separates the contents matching the first set of brackets
# from the rest of the input.
def matchBrackets(string):
rest = string[1:]
inside = "("
while rest != "" and not rest.startswith(')'):
if rest.startswith('('):
(part, rest) = matchBrackets(rest)
inside = inside + part
else:
inside = inside + rest[0]
rest = rest[1:]
if rest.startswith(')'):
return (inside + ')', rest[1:])
raise AssertionError('Unmatched bracket in string \'' + string + '\'')
# Separates the string for the next portion of the category
# from the rest of the string
def nextCategory(string):
if string.startswith('('):
return matchBrackets(string)
return reNextPrim.match(string).groups()
# Parses an application operator
def parseApplication(app):
return Direction(app[0], app[1:])
# Parses the subscripts for a primitive category
def parseSubscripts(subscr):
if subscr:
return subscr[1:-1].split(',')
return []
# Parse a primitive category
def parsePrimitiveCategory(chunks, primitives, families, var):
# If the primitive is the special category 'var',
# replace it with the correct CCGVar
if chunks[0] == "var":
if chunks[1] is None:
if var is None:
var = CCGVar()
return (var, var)
catstr = chunks[0]
if catstr in families:
(cat, cvar) = families[catstr]
if var is None:
var = cvar
else:
cat = cat.substitute([(cvar, var)])
return (cat, var)
if catstr in primitives:
subscrs = parseSubscripts(chunks[1])
return (PrimitiveCategory(catstr, subscrs), var)
raise AssertionError('String \'' + catstr + '\' is neither a family nor primitive category.')
# parseCategory drops the 'var' from the tuple
def parseCategory(line, primitives, families):
return augParseCategory(line, primitives, families)[0]
# Parses a string representing a category, and returns
# a tuple with (possibly) the CCG variable for the category
def augParseCategory(line, primitives, families, var=None):
(str, rest) = nextCategory(line)
if str.startswith('('):
(res, var) = augParseCategory(str[1:-1], primitives, families, var)
else:
# print rePrim.match(str).groups()
(res, var) = parsePrimitiveCategory(rePrim.match(str).groups(),
primitives, families, var)
while rest != "":
app = reApp.match(rest).groups()
dir = parseApplication(app[0:3])
rest = app[3]
(str, rest) = nextCategory(rest)
if str.startswith('('):
(arg, var) = augParseCategory(str[1:-1], primitives, families, var)
else:
(arg, var) = parsePrimitiveCategory(rePrim.match(str).groups(), primitives, families, var)
res = FunctionalCategory(res, arg, dir)
return (res, var)
# Takes an input string, and converts it into a lexicon for CCGs.
def parseLexicon(lex_str):
primitives = []
families = {}
entries = defaultdict(list)
for line in lex_str.splitlines():
# Strip comments and leading/trailing whitespace.
line = reComm.match(line).groups()[0].strip()
if line == "":
continue
if line.startswith(':-'):
# A line of primitive categories.
# The first one is the target category
# ie, :- S, N, NP, VP
primitives = primitives + [ prim.strip() for prim in line[2:].strip().split(',') ]
else:
# Either a family definition, or a word definition
(ident, sep, catstr) = reLex.match(line).groups()
(cat, var) = augParseCategory(catstr, primitives, families)
if sep == '::':
# Family definition
# ie, Det :: NP/N
families[ident] = (cat, var)
else:
# Word definition
# ie, which => (N\N)/(S/NP)
entries[ident].append(cat)
return CCGLexicon(primitives[0], primitives, families, entries)
openccg_tinytiny = parseLexicon('''
# Rather minimal lexicon based on the openccg `tinytiny' grammar.
# Only incorporates a subset of the morphological subcategories, however.
:- S,NP,N # Primitive categories
Det :: NP/N # Determiners
Pro :: NP
IntransVsg :: S\\NP[sg] # Tensed intransitive verbs (singular)
IntransVpl :: S\\NP[pl] # Plural
TransVsg :: S\\NP[sg]/NP # Tensed transitive verbs (singular)
TransVpl :: S\\NP[pl]/NP # Plural
the => NP[sg]/N[sg]
the => NP[pl]/N[pl]
I => Pro
me => Pro
we => Pro
us => Pro
book => N[sg]
books => N[pl]
peach => N[sg]
peaches => N[pl]
policeman => N[sg]
policemen => N[pl]
boy => N[sg]
boys => N[pl]
sleep => IntransVsg
sleep => IntransVpl
eat => IntransVpl
eat => TransVpl
eats => IntransVsg
eats => TransVsg
see => TransVpl
sees => TransVsg
''')
| Reagankm/KnockKnock | venv/lib/python3.4/site-packages/nltk/ccg/lexicon.py | Python | gpl-2.0 | 7,509 |
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
import json
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.ironware.ironware import ironware_provider_spec
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
provider = load_provider(ironware_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'ironware'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
pc.become = provider['authorize'] or False
if pc.become:
pc.become_method = 'enable'
pc.become_pass = provider['auth_pass']
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
# make sure we are in the right cli context which should be
# enable mode and not config module
conn = Connection(socket_path)
out = conn.get_prompt()
if to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
display.vvvv('wrong context, sending end to device', self._play_context.remote_addr)
conn.send_command('end')
task_vars['ansible_socket'] = socket_path
if self._play_context.become_method == 'enable':
self._play_context.become = False
self._play_context.become_method = None
result = super(ActionModule, self).run(tmp, task_vars)
return result
| le9i0nx/ansible | lib/ansible/plugins/action/ironware.py | Python | gpl-3.0 | 3,707 |
from scipysim.actors import SisoTestHelper, Channel, Event, LastEvent
from scipysim.actors.math import Abs
import numpy
import unittest
class AbsTests( unittest.TestCase ):
'''Test the absolute actor'''
def setUp( self ):
'''
Unit test setup code
'''
self.q_in = Channel()
self.q_out = Channel()
def test_positive_integers( self ):
'''Test a simple positive integer signal.
'''
inp = [Event(value=i, tag=i) for i in xrange( 0, 100, 1 )]
expected_outputs = inp[:]
abs = Abs( self.q_in, self.q_out )
abs.start()
[self.q_in.put( val ) for val in inp]
self.q_in.put( LastEvent() )
abs.join()
for expected_output in expected_outputs:
out = self.q_out.get()
self.assertEquals( out.value, expected_output.value )
self.assertEquals( out.tag, expected_output.tag )
self.assertTrue( self.q_out.get().last )
from scipysim.actors.math.ct_integrator import CTIntegratorTests
from scipysim.actors.math.ct_integrator_qs1 import CTintegratorQSTests
from scipysim.actors.math.dt_integrator import DTIntegratorTests
from scipysim.actors.math.derivative import BundleDerivativeTests
from scipysim.actors.math.proportional import ProportionalTests
from scipysim.actors.math.summer import SummerTests
if __name__ == "__main__":
# import sys
# sys.argv = ['', 'Test.testName']
unittest.main()
| mm318/scipysim-nogui | scipysim_tests/test_actors_math.py | Python | gpl-3.0 | 1,477 |
from rctk.widgets.control import Control
from rctk.layouts import VBox, LayoutException
from rctk.task import Task
import warnings
class Container(Control):
""" A container can contain controls. It uses a layout manager
to position them. The default layoutmanager is a gridlayout
(vertical).
A container has a default LayoutManager which can be changed
as long as no controls have been added
"""
default_layout = VBox
def __init__(self, tk, **properties):
super(Container, self).__init__(tk, **properties)
self._layout = self.default_layout()
self._controls = []
self._controls_args = {} ## for restoration
##
## Can't really be created but we don't want to hide the default
## control behaviour from derived classes
def _add_append_task(self, control, args):
self.tk.queue(Task("Append %d to %d" % (control.id, self.id), args))
def append(self, control, **args):
""" adds a control to the window.
args can provide layout instructions for the
layoutmanager
"""
if not control.containable:
warnings.warn("Widget %s does not support appending to parent container" % control)
return
if self.id == control.id:
warnings.warn("Refusing to add widget %s to itself" % control)
return
if control.parent:
control.parent.remove(control)
t = {'id':self.id, 'child':control.id, 'action':'append'}
t.update(args)
self._controls.append(control)
control.parent = self
control._append_args = t
self._add_append_task(control, t)
## restoration
self._controls_args[control] = t
self._layout.append(control, **args)
def remove(self, control):
""" Removes a control from the window and moves it back into
the factory.
"""
if control in self._controls and self.id != control.id:
t = {'id':self.id, 'child':control.id, 'action':'remove'}
control.parent = None
control._append_args = None
self.tk.queue(Task('Remove %d from %d' % (control.id, self.id), t))
del self._controls_args[control]
self._controls.remove(control)
def destroy(self):
for c in self._controls:
c.destroy()
super(Container, self).destroy()
def restore(self):
self.create()
self._add_layout_task(self._layout)
## Not root and not a toplevel
#if self.id > 0 and self.parent is not None:
# print str(self._append_args)
# self.parent.append(self, **self._append_args)
for c in self._controls:
c.restore()
self._add_append_task(c, self._controls_args.get(c, {}))
def _add_layout_task(self, layout):
self.tk.queue(Task("Set layout %s on id %d" % (layout.type, self.id),
{'id':self.id, 'action':'layout', 'type':self._layout.type, 'config':self._layout.config()}))
def setLayout(self, layout):
if self._controls_added():
raise LayoutException("Can't change layout once controls have been added")
self._layout = layout
self._add_layout_task(layout)
def layout(self):
## recursively call layout on children, bottom to top
for c in self._controls:
if isinstance(c, Container):
c.layout()
self.tk.queue(Task("Laying out id %d" % (self.id,),
{'id':self.id, 'action':'relayout', 'config':self._layout.config()}))
def _controls_added(self):
return len(self._controls) > 0
| rctk/rctk | rctk/widgets/container.py | Python | bsd-2-clause | 3,705 |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from cloudinit.config import cc_rh_subscription
from cloudinit import util
from .helpers import TestCase, mock
class GoodTests(TestCase):
def setUp(self):
super(GoodTests, self).setUp()
self.name = "cc_rh_subscription"
self.cloud_init = None
self.log = logging.getLogger("good_tests")
self.args = []
self.handle = cc_rh_subscription.handle
self.SM = cc_rh_subscription.SubscriptionManager
self.config = {'rh_subscription':
{'username': 'scooby@do.com',
'password': 'scooby-snacks'
}}
self.config_full = {'rh_subscription':
{'username': 'scooby@do.com',
'password': 'scooby-snacks',
'auto-attach': True,
'service-level': 'self-support',
'add-pool': ['pool1', 'pool2', 'pool3'],
'enable-repo': ['repo1', 'repo2', 'repo3'],
'disable-repo': ['repo4', 'repo5']
}}
def test_already_registered(self):
'''
Emulates a system that is already registered. Ensure it gets
a non-ProcessExecution error from is_registered()
'''
with mock.patch.object(cc_rh_subscription.SubscriptionManager,
'_sub_man_cli') as mockobj:
self.SM.log_success = mock.MagicMock()
self.handle(self.name, self.config, self.cloud_init,
self.log, self.args)
self.assertEqual(self.SM.log_success.call_count, 1)
self.assertEqual(mockobj.call_count, 1)
def test_simple_registration(self):
'''
Simple registration with username and password
'''
self.SM.log_success = mock.MagicMock()
reg = "The system has been registered with ID:" \
" 12345678-abde-abcde-1234-1234567890abc"
self.SM._sub_man_cli = mock.MagicMock(
side_effect=[util.ProcessExecutionError, (reg, 'bar')])
self.handle(self.name, self.config, self.cloud_init,
self.log, self.args)
self.assertIn(mock.call(['identity']),
self.SM._sub_man_cli.call_args_list)
self.assertIn(mock.call(['register', '--username=scooby@do.com',
'--password=scooby-snacks'],
logstring_val=True),
self.SM._sub_man_cli.call_args_list)
self.assertEqual(self.SM.log_success.call_count, 1)
self.assertEqual(self.SM._sub_man_cli.call_count, 2)
def test_full_registration(self):
'''
Registration with auto-attach, service-level, adding pools,
and enabling and disabling yum repos
'''
call_lists = []
call_lists.append(['attach', '--pool=pool1', '--pool=pool3'])
call_lists.append(['repos', '--disable=repo5', '--enable=repo2',
'--enable=repo3'])
call_lists.append(['attach', '--auto', '--servicelevel=self-support'])
self.SM.log_success = mock.MagicMock()
reg = "The system has been registered with ID:" \
" 12345678-abde-abcde-1234-1234567890abc"
self.SM._sub_man_cli = mock.MagicMock(
side_effect=[util.ProcessExecutionError, (reg, 'bar'),
('Service level set to: self-support', ''),
('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''),
('Repo ID: repo1\nRepo ID: repo5\n', ''),
('Repo ID: repo2\nRepo ID: repo3\nRepo ID: '
'repo4', ''),
('', '')])
self.handle(self.name, self.config_full, self.cloud_init,
self.log, self.args)
for call in call_lists:
self.assertIn(mock.call(call), self.SM._sub_man_cli.call_args_list)
self.assertEqual(self.SM.log_success.call_count, 1)
self.assertEqual(self.SM._sub_man_cli.call_count, 9)
class TestBadInput(TestCase):
name = "cc_rh_subscription"
cloud_init = None
log = logging.getLogger("bad_tests")
args = []
SM = cc_rh_subscription.SubscriptionManager
reg = "The system has been registered with ID:" \
" 12345678-abde-abcde-1234-1234567890abc"
config_no_password = {'rh_subscription':
{'username': 'scooby@do.com'
}}
config_no_key = {'rh_subscription':
{'activation-key': '1234abcde',
}}
config_service = {'rh_subscription':
{'username': 'scooby@do.com',
'password': 'scooby-snacks',
'service-level': 'self-support'
}}
config_badpool = {'rh_subscription':
{'username': 'scooby@do.com',
'password': 'scooby-snacks',
'add-pool': 'not_a_list'
}}
config_badrepo = {'rh_subscription':
{'username': 'scooby@do.com',
'password': 'scooby-snacks',
'enable-repo': 'not_a_list'
}}
config_badkey = {'rh_subscription':
{'activation-key': 'abcdef1234',
'fookey': 'bar',
'org': '123',
}}
def setUp(self):
super(TestBadInput, self).setUp()
self.handle = cc_rh_subscription.handle
def test_no_password(self):
'''
Attempt to register without the password key/value
'''
self.SM._sub_man_cli = mock.MagicMock(
side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
self.handle(self.name, self.config_no_password, self.cloud_init,
self.log, self.args)
self.assertEqual(self.SM._sub_man_cli.call_count, 0)
def test_no_org(self):
'''
Attempt to register without the org key/value
'''
self.input_is_missing_data(self.config_no_key)
def test_service_level_without_auto(self):
'''
Attempt to register using service-level without the auto-attach key
'''
self.SM.log_warn = mock.MagicMock()
self.SM._sub_man_cli = mock.MagicMock(
side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
self.handle(self.name, self.config_service, self.cloud_init,
self.log, self.args)
self.assertEqual(self.SM._sub_man_cli.call_count, 1)
self.assertEqual(self.SM.log_warn.call_count, 2)
def test_pool_not_a_list(self):
'''
Register with pools that are not in the format of a list
'''
self.SM.log_warn = mock.MagicMock()
self.SM._sub_man_cli = mock.MagicMock(
side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
self.handle(self.name, self.config_badpool, self.cloud_init,
self.log, self.args)
self.assertEqual(self.SM._sub_man_cli.call_count, 2)
self.assertEqual(self.SM.log_warn.call_count, 2)
def test_repo_not_a_list(self):
'''
Register with repos that are not in the format of a list
'''
self.SM.log_warn = mock.MagicMock()
self.SM._sub_man_cli = mock.MagicMock(
side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
self.handle(self.name, self.config_badrepo, self.cloud_init,
self.log, self.args)
self.assertEqual(self.SM.log_warn.call_count, 3)
self.assertEqual(self.SM._sub_man_cli.call_count, 2)
def test_bad_key_value(self):
'''
Attempt to register with a key that we don't know
'''
self.SM.log_warn = mock.MagicMock()
self.SM._sub_man_cli = mock.MagicMock(
side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
self.handle(self.name, self.config_badkey, self.cloud_init,
self.log, self.args)
self.assertEqual(self.SM.log_warn.call_count, 2)
self.assertEqual(self.SM._sub_man_cli.call_count, 1)
def input_is_missing_data(self, config):
'''
Helper def for tests that having missing information
'''
self.SM.log_warn = mock.MagicMock()
self.SM._sub_man_cli = mock.MagicMock(
side_effect=[util.ProcessExecutionError])
self.handle(self.name, config, self.cloud_init,
self.log, self.args)
self.SM._sub_man_cli.assert_called_with(['identity'])
self.assertEqual(self.SM.log_warn.call_count, 4)
self.assertEqual(self.SM._sub_man_cli.call_count, 1)
| clovertrail/cloudinit-bis | tests/unittests/test_rh_subscription.py | Python | gpl-3.0 | 9,497 |
# -*- coding: utf-8 -*-
# © 2017 Houssine BAKKALI, Coop IT Easy
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import product
| houssine78/addons | product_internal_ref/models/__init__.py | Python | agpl-3.0 | 156 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'StockTransaction.subtype'
db.add_column(u'stock_stocktransaction', 'subtype', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'StockTransaction.subtype'
db.delete_column(u'stock_stocktransaction', 'subtype')
models = {
u'stock.stockreport': {
'Meta': {'object_name': 'StockReport'},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'form_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'stock.stocktransaction': {
'Meta': {'object_name': 'StockTransaction'},
'case_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'quantity': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '5'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['stock.StockReport']"}),
'section_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'stock_on_hand': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '5'}),
'subtype': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
}
}
complete_apps = ['stock']
| puttarajubr/commcare-hq | corehq/ex-submodules/casexml/apps/stock/migrations/0004_auto__add_field_stocktransaction_subtype.py | Python | bsd-3-clause | 2,177 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Encapsulation of CLAnetwork that implements the ModelBase."""
import copy
import math
import sys
import os
import json
import random
import datetime
import itertools
import numpy
import logging
import traceback
from collections import defaultdict, namedtuple, deque
from datetime import timedelta
from ordereddict import OrderedDict
from operator import itemgetter
from model import Model
from nupic.algorithms.anomaly import computeAnomalyScore
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.fieldmeta import FieldMetaSpecial, FieldMetaInfo
from nupic.data.filters import AutoResetFilter
from nupic.encoders import (MultiEncoder, DateEncoder, ScalarEncoder)
from nupic.engine import Network
from nupic.research import fdrutilities as fdrutils
from nupic.support import aggregationDivide
from nupic.support.fshelpers import makeDirectoryFromAbsolutePath
from opfutils import (InferenceType, InferenceElement, SensorInput,
PredictionElement, validateOpfJsonValue, initLogger)
from abc import ABCMeta, abstractmethod
DEFAULT_LIKELIHOOD_THRESHOLD = 0.0001
DEFAULT_MAX_PREDICTIONS_PER_STEP = 8
DEFAULT_ANOMALY_TRAINRECORDS = 4000
DEFAULT_ANOMALY_THRESHOLD = 1.1
DEFAULT_ANOMALY_CACHESIZE = 10000
def requireAnomalyModel(func):
"""
Decorator for functions that require anomaly models.
"""
def _decorator(self, *args, **kwargs):
if not (self.getInferenceType() == InferenceType.TemporalAnomaly):
raise RuntimeError("Method required a TemporalAnomaly model.")
if self._getAnomalyClassifier() is None:
raise RuntimeError("Model does not support this command. Model must"
"be an active anomalyDetector model.")
return func(self, *args, **kwargs)
return _decorator
###############################################################
class NetworkInfo(object):
""" Data type used as return value type by
CLAModel.__createCLANetwork()
"""
def __init__(self, net, statsCollectors):
"""
net: The CLA Network instance
statsCollectors:
Sequence of 0 or more CLAStatistic-based instances
"""
self.net = net
self.statsCollectors = statsCollectors
return
def __repr__(self):
return "NetworkInfo(net=%r, statsCollectors=%r)" % (
self.net, self.statsCollectors)
class CLAModel(Model):
__supportedInferenceKindSet = set((InferenceType.TemporalNextStep,
InferenceType.TemporalClassification,
InferenceType.NontemporalClassification,
InferenceType.NontemporalAnomaly,
InferenceType.TemporalAnomaly,
InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep))
__myClassName = "CLAModel"
#############################################################################
def __init__(self,
inferenceType=InferenceType.TemporalNextStep,
predictedField=None,
sensorParams={},
spEnable=True,
spParams={},
# TODO: We can't figure out what this is. Remove?
trainSPNetOnlyIfRequested=False,
tpEnable=True,
tpParams={},
clParams={},
anomalyParams={},
minLikelihoodThreshold=DEFAULT_LIKELIHOOD_THRESHOLD,
maxPredictionsPerStep=DEFAULT_MAX_PREDICTIONS_PER_STEP):
"""CLAModel constructor.
Args:
inferenceType: A value from the InferenceType enum class.
predictedField: The field to predict for multistep prediction.
sensorParams: A dictionary specifying the sensor parameters.
spEnable: Whether or not to use a spatial pooler.
spParams: A dictionary specifying the spatial pooler parameters. These
are passed to the spatial pooler.
trainSPNetOnlyIfRequested: If set, don't create an SP network unless the
user requests SP metrics.
tpEnable: Whether to use a temporal pooler.
tpParams: A dictionary specifying the temporal pooler parameters. These
are passed to the temporal pooler.
clParams: A dictionary specifying the classifier parameters. These are
are passed to the classifier.
anomalyParams: Anomaly detection parameters
minLikelihoodThreshold: The minimum likelihood value to include in
inferences. Currently only applies to multistep inferences.
maxPredictionsPerStep: Maximum number of predictions to include for
each step in inferences. The predictions with highest likelihood are
included.
"""
if not inferenceType in self.__supportedInferenceKindSet:
raise ValueError("{0} received incompatible inference type: {1}"\
.format(self.__class__, inferenceType))
# Call super class constructor
super(CLAModel, self).__init__(inferenceType)
# self.__restoringFromState is set to True by our __setstate__ method
# and back to False at completion of our _deSerializeExtraData() method.
self.__restoringFromState = False
self.__restoringFromV1 = False
# Intitialize logging
self.__logger = initLogger(self)
self.__logger.debug("Instantiating %s." % self.__myClassName)
# TODO: VERBOSITY should be deprecated since we now have logging with levels
self.__VERBOSITY = 0
self._minLikelihoodThreshold = minLikelihoodThreshold
self._maxPredictionsPerStep = maxPredictionsPerStep
# set up learning parameters (note: these may be replaced via
# enable/disable//SP/TP//Learning methods)
self.__spLearningEnabled = bool(spEnable)
self.__tpLearningEnabled = bool(tpEnable)
# Explicitly exclude the TP if this type of inference doesn't require it
if not InferenceType.isTemporal(self.getInferenceType()) \
or self.getInferenceType() == InferenceType.NontemporalMultiStep:
tpEnable = False
self._netInfo = None
self._hasSP = spEnable
self._hasTP = tpEnable
self._classifierInputEncoder = None
self._predictedFieldIdx = None
self._predictedFieldName = None
self._numFields = None
# -----------------------------------------------------------------------
# Create the network
self._netInfo = self.__createCLANetwork(
sensorParams, spEnable, spParams, tpEnable, tpParams, clParams,
anomalyParams)
# Initialize Spatial Anomaly detection parameters
if self.getInferenceType() == InferenceType.NontemporalAnomaly:
self._getSPRegion().setParameter('anomalyMode', True)
# Initialize Temporal Anomaly detection parameters
if self.getInferenceType() == InferenceType.TemporalAnomaly:
self._getTPRegion().setParameter('anomalyMode', True)
self._prevPredictedColumns = numpy.array([])
# -----------------------------------------------------------------------
# This flag, if present tells us not to train the SP network unless
# the user specifically asks for the SP inference metric
self.__trainSPNetOnlyIfRequested = trainSPNetOnlyIfRequested
self.__numRunCalls = 0
# Tracks whether finishedLearning() has been called
self.__finishedLearning = False
self.__logger.info("Instantiated %s" % self.__class__.__name__)
return
def getParameter(self, paramName):
if paramName == '__numRunCalls':
return self.__numRunCalls
else:
raise RuntimeError("'%s' parameter is not exposed by clamodel." % \
(paramName))
#############################################################################
def resetSequenceStates(self):
""" [virtual method override] Resets the model's sequence states. Normally
called to force the delineation of a sequence, such as between OPF tasks.
"""
if self._hasTP:
# Reset TP's sequence states
self._getTPRegion().executeCommand(['resetSequenceStates'])
self.__logger.debug("CLAModel.resetSequenceStates(): reset temporal "
"pooler's sequence states")
return
#############################################################################
def finishLearning(self):
""" [virtual method override] Places the model in a permanent "finished
learning" mode where it will not be able to learn from subsequent input
records.
NOTE: Upon completion of this command, learning may not be resumed on
the given instance of the model (e.g., the implementation may optimize
itself by pruning data structures that are necessary for learning)
"""
assert not self.__finishedLearning
if self._hasSP:
# Finish SP learning
self._getSPRegion().executeCommand(['finishLearning'])
self.__logger.debug(
"CLAModel.finishLearning(): finished SP learning")
if self._hasTP:
# Finish temporal network's TP learning
self._getTPRegion().executeCommand(['finishLearning'])
self.__logger.debug(
"CLAModel.finishLearning(): finished TP learning")
self.__spLearningEnabled = self.__tpLearningEnabled = False
self.__finishedLearning = True
return
def setFieldStatistics(self,fieldStats):
encoder = self._getEncoder()
# Set the stats for the encoders. The first argument to setFieldStats
# is the field name of the encoder. Since we are using a multiencoder
# we leave it blank, the multiencoder will propagate the field names to the
# underlying encoders
encoder.setFieldStats('',fieldStats)
def enableLearning(self):
"""[override] Turn Learning on for the current model """
super(CLAModel, self).enableLearning()
self.setEncoderLearning(True)
def disableLearning(self):
"""[override] Turn Learning off for the current model """
super(CLAModel, self).disableLearning()
self.setEncoderLearning(False)
def setEncoderLearning(self,learningEnabled):
Encoder = self._getEncoder()
Encoder.setLearning(learningEnabled)
return
# Anomaly Accessor Methods
@requireAnomalyModel
def setAnomalyParameter(self, param, value):
"""
Set a parameter of the anomaly classifier within this model.
"""
self._getAnomalyClassifier().setParameter(param, value)
@requireAnomalyModel
def getAnomalyParameter(self, param):
"""
Get a parameter of the anomaly classifier within this model.
"""
return self._getAnomalyClassifier().getParameter(param)
@requireAnomalyModel
def anomalyRemoveLabels(self, start, end, labelFilter):
"""
Remove labels from the anomaly classifier within this model.
"""
self._getAnomalyClassifier().getSelf().removeLabels(start, end, labelFilter)
@requireAnomalyModel
def anomalyAddLabel(self, start, end, labelName):
"""
Add labels from the anomaly classifier within this model.
"""
self._getAnomalyClassifier().getSelf().addLabel(start, end, labelName)
@requireAnomalyModel
def anomalyGetLabels(self, start, end):
"""
Get labels from the anomaly classifier within this model.
"""
return self._getAnomalyClassifier().getSelf().getLabels(start, end)
def run(self, inputRecord):
""" run one iteration of this model.
args:
inputRecord is a record object formatted according to
nupic.data.RecordStream.getNextRecordDict() result format.
return:
An ModelResult namedtuple (see opfutils.py) The contents of
ModelResult.inferences depends on the the specific inference type
of this model, which can be queried by getInferenceType()
"""
assert not self.__restoringFromState
assert inputRecord
results = super(CLAModel, self).run(inputRecord)
self.__numRunCalls += 1
self.__logger.debug("CLAModel.run() inputRecord=%s", (inputRecord))
results.inferences = {}
# -------------------------------------------------------------------------
# Turn learning on or off?
if '_learning' in inputRecord:
if inputRecord['_learning']:
self.enableLearning()
else:
self.disableLearning()
###########################################################################
# Predictions and Learning
###########################################################################
predictions = dict()
inputRecordSensorMappings = dict()
inferenceType = self.getInferenceType()
inferenceArgs = self.getInferenceArgs()
if inferenceArgs is None:
inferenceArgs = {}
self._sensorCompute(inputRecord)
self._spCompute()
self._tpCompute()
results.sensorInput = self._getSensorInputRecord(inputRecord)
tpTopDownComputed = False
inferences = {}
# TODO: Reconstruction and temporal classification not used. Remove
if self._isReconstructionModel():
inferences = self._reconstructionCompute()
tpTopDownComputed = True
elif self._isMultiStepModel():
inferences = self._multiStepCompute(rawInput=inputRecord)
# For temporal classification. Not used, and might not work anymore
elif self._isClassificationModel():
inferences = self._classifcationCompute()
results.inferences.update(inferences)
inferences = self._anomalyCompute(computeTPTopDown=(not tpTopDownComputed))
results.inferences.update(inferences)
# -----------------------------------------------------------------------
# Store the index and name of the predictedField
results.predictedFieldIdx = self._predictedFieldIdx
results.predictedFieldName = self._predictedFieldName
# =========================================================================
# output
assert (not self.isInferenceEnabled() or results.inferences is not None), \
"unexpected inferences: %r" % results.inferences
#self.__logger.setLevel(logging.DEBUG)
if self.__logger.isEnabledFor(logging.DEBUG):
self.__logger.debug("inputRecord: %r, results: %r" % (inputRecord,
results))
return results
def _getSensorInputRecord(self, inputRecord):
"""
inputRecord - dict containing the input to the sensor
Return a 'SensorInput' object, which represents the 'parsed'
representation of the input record
"""
sensor = self._getSensorRegion()
dataRow = copy.deepcopy(sensor.getSelf().getOutputValues('sourceOut'))
dataDict = copy.deepcopy(inputRecord)
inputRecordEncodings = sensor.getSelf().getOutputValues('sourceEncodings')
inputRecordCategory = int(sensor.getOutputData('categoryOut')[0])
resetOut = sensor.getOutputData('resetOut')[0]
return SensorInput(dataRow=dataRow,
dataDict=dataDict,
dataEncodings=inputRecordEncodings,
sequenceReset=resetOut,
category=inputRecordCategory)
def _sensorCompute(self, inputRecord):
sensor = self._getSensorRegion()
self._getDataSource().push(inputRecord)
sensor.setParameter('topDownMode', False)
sensor.prepareInputs()
try:
sensor.compute()
except StopIteration as e:
raise Exception("Unexpected StopIteration", e,
"ACTUAL TRACEBACK: %s" % traceback.format_exc())
def _spCompute(self):
sp = self._getSPRegion()
if sp is None:
return
sp.setParameter('topDownMode', False)
sp.setParameter('inferenceMode', self.isInferenceEnabled())
sp.setParameter('learningMode', self.isLearningEnabled())
sp.prepareInputs()
sp.compute()
def _tpCompute(self):
tp = self._getTPRegion()
if tp is None:
return
tp = self._getTPRegion()
tp.setParameter('topDownMode', False)
tp.setParameter('inferenceMode', self.isInferenceEnabled())
tp.setParameter('learningMode', self.isLearningEnabled())
tp.prepareInputs()
tp.compute()
def _tpTopDownCompute(self):
tp = self._getTPRegion()
if tp is None:
return
tp.setParameter('topDownMode', True)
tp.prepareInputs()
tp.compute()
def _isReconstructionModel(self):
inferenceType = self.getInferenceType()
inferenceArgs = self.getInferenceArgs()
if inferenceType == InferenceType.TemporalNextStep:
return True
if inferenceArgs:
return inferenceArgs.get('useReconstruction', False)
return False
def _isMultiStepModel(self):
return self.getInferenceType() in (InferenceType.NontemporalMultiStep,
InferenceType.NontemporalClassification,
InferenceType.TemporalMultiStep,
InferenceType.TemporalAnomaly)
def _isClassificationModel(self):
return self.getInferenceType() in (InferenceType.TemporalClassification)
def _multiStepCompute(self, rawInput):
patternNZ = None
if self._getTPRegion() is not None:
tp = self._getTPRegion()
tpOutput = tp.getSelf()._tfdr.infActiveState['t']
patternNZ = tpOutput.reshape(-1).nonzero()[0]
elif self._getSPRegion() is not None:
sp = self._getSPRegion()
spOutput = sp.getOutputData('bottomUpOut')
patternNZ = spOutput.nonzero()[0]
elif self._getSensorRegion() is not None:
sensor = self._getSensorRegion()
sensorOutput = sensor.getOutputData('dataOut')
patternNZ = sensorOutput.nonzero()[0]
else:
raise RuntimeError("Attempted to make multistep prediction without"
"TP, SP, or Sensor regions")
inputTSRecordIdx = rawInput.get('_timestampRecordIdx')
return self._handleCLAClassifierMultiStep(
patternNZ=patternNZ,
inputTSRecordIdx=inputTSRecordIdx,
rawInput=rawInput)
def _classifcationCompute(self):
inference = {}
classifier = self._getClassifierRegion()
classifier.setParameter('inferenceMode', True)
classifier.setParameter('learningMode', self.isLearningEnabled())
classifier.prepareInputs()
classifier.compute()
# What we get out is the score for each category. The argmax is
# then the index of the winning category
classificationDist = classifier.getOutputData('categoriesOut')
classification = classificationDist.argmax()
probabilities = classifier.getOutputData('categoryProbabilitiesOut')
numCategories = classifier.getParameter('activeOutputCount')
classConfidences = dict(zip(xrange(numCategories), probabilities))
inference[InferenceElement.classification] = classification
inference[InferenceElement.classConfidences] = {0: classConfidences}
return inference
def _reconstructionCompute(self):
if not self.isInferenceEnabled():
return {}
tp = self._getTPRegion()
sp = self._getSPRegion()
sensor = self._getSensorRegion()
# TP Top-down flow
self._tpTopDownCompute()
#--------------------------------------------------
# SP Top-down flow
sp.setParameter('topDownMode', True)
sp.prepareInputs()
sp.compute()
#--------------------------------------------------
# Sensor Top-down flow
sensor.setParameter('topDownMode', True)
sensor.prepareInputs()
sensor.compute()
# Need to call getOutputValues() instead of going through getOutputData()
# because the return values may contain strings, which cannot be passed
# through the Region.cpp code.
# predictionRow is a list of values, one for each field. The value is
# in the same type as the original input to the encoder and may be a
# string for category fields for example.
predictionRow = copy.copy(sensor.getSelf().getOutputValues('temporalTopDownOut'))
predictionFieldEncodings = sensor.getSelf().getOutputValues('temporalTopDownEncodings')
inferences = {}
inferences[InferenceElement.prediction] = tuple(predictionRow)
inferences[InferenceElement.encodings] = tuple(predictionFieldEncodings)
return inferences
def _anomalyCompute(self, computeTPTopDown):
"""
Compute Anomaly score, if required
computeTPTopDown: If True, first perform a
"""
inferenceType = self.getInferenceType()
inferences = {}
if inferenceType == InferenceType.NontemporalAnomaly:
sp = self._getSPRegion()
score = sp.getOutputData("anomalyScore")[0]
inferences[InferenceElement.anomalyScore] = score
# -----------------------------------------------------------------------
# Temporal Anomaly Score
if inferenceType == InferenceType.TemporalAnomaly:
sp = self._getSPRegion()
tp = self._getTPRegion()
sensor = self._getSensorRegion()
if computeTPTopDown:
self._tpTopDownCompute()
if sp is not None:
activeColumns = sp.getOutputData("bottomUpOut").nonzero()[0]
else:
activeColumns = sensor.getOutputData('dataOut').nonzero()[0]
# Calculate the anomaly score using the active columns
# and previous predicted columns
inferences[InferenceElement.anomalyScore] = (
computeAnomalyScore(activeColumns, self._prevPredictedColumns))
# Store the predicted columns for the next timestep
predictedColumns = tp.getOutputData("topDownOut").nonzero()[0]
self._prevPredictedColumns = copy.deepcopy(predictedColumns)
# Calculate the classifier's output and use the result as the anomaly
# label. Stores as string of results.
# TODO: make labels work with non-SP models
if sp is not None:
self._getAnomalyClassifier().setParameter(
"activeColumnCount", len(activeColumns))
self._getAnomalyClassifier().prepareInputs()
self._getAnomalyClassifier().compute()
labels = self._getAnomalyClassifier().getSelf().getLabelResults()
inferences[InferenceElement.anomalyLabel] = "%s" % labels
return inferences
def _handleCLAClassifierMultiStep(self, patternNZ,
inputTSRecordIdx,
rawInput):
""" Handle the CLA Classifier compute logic when implementing multi-step
prediction. This is where the patternNZ is associated with one of the
other fields from the dataset 0 to N steps in the future. This method is
used by each type of network (encoder only, SP only, SP +TP) to handle the
compute logic through the CLA Classifier. It fills in the inference dict with
the results of the compute.
Parameters:
-------------------------------------------------------------------
patternNZ: The input the CLA Classifier as a list of active input indices
inputTSRecordIdx: The index of the record as computed from the timestamp
and aggregation interval. This normally increments by 1
each time unless there are missing records. If there is no
aggregation interval or timestamp in the data, this will be
None.
rawInput: The raw input to the sensor, as a dict.
"""
sensor = self._getSensorRegion()
classifier = self._getClassifierRegion()
minLikelihoodThreshold = self._minLikelihoodThreshold
maxPredictionsPerStep = self._maxPredictionsPerStep
inferenceArgs = self.getInferenceArgs()
needLearning = self.isLearningEnabled()
inferences = {}
predictedFieldName = inferenceArgs.get('predictedField', None)
# Get the classifier input encoder, if we don't have it already
if self._classifierInputEncoder is None:
if predictedFieldName is None:
raise RuntimeError("This experiment description is missing "
"the 'predictedField' in its config, which is required "
"for multi-step prediction inference.")
# This is getting index of predicted field if being fed to CLA.
self._predictedFieldName = predictedFieldName
encoderList = sensor.getSelf().encoder.getEncoderList()
self._numFields = len(encoderList)
fieldNames = sensor.getSelf().encoder.getScalarNames()
if predictedFieldName in fieldNames:
self._predictedFieldIdx = fieldNames.index(predictedFieldName)
else:
# Predicted field was not fed into the network, only to the classifier
self._predictedFieldIdx = None
# In a multi-step model, the classifier input encoder is separate from
# the other encoders and always disabled from going into the bottom of
# the network.
if sensor.getSelf().disabledEncoder is not None:
encoderList = sensor.getSelf().disabledEncoder.getEncoderList()
else:
encoderList = []
if len(encoderList) >= 1:
fieldNames = sensor.getSelf().disabledEncoder.getScalarNames()
self._classifierInputEncoder = encoderList[fieldNames.index(
predictedFieldName)]
else:
# Legacy multi-step networks don't have a separate encoder for the
# classifier, so use the one that goes into the bottom of the network
encoderList = sensor.getSelf().encoder.getEncoderList()
self._classifierInputEncoder = encoderList[self._predictedFieldIdx]
# Get the actual value and the bucket index for this sample. The
# predicted field may not be enabled for input to the network, so we
# explicitly encode it outside of the sensor
# TODO: All this logic could be simpler if in the encoder itself
absoluteValue = rawInput[predictedFieldName]
bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[0]
# Convert the absolute values to deltas if necessary
# The bucket index should be handled correctly by the underlying delta encoder
if self._classifierInputEncoder.isDelta():
# Make the delta before any values have been seen 0 so that we do not mess up the
# range for the adaptive scalar encoder.
if not hasattr(self,"_ms_prevVal"):
self._ms_prevVal = absoluteValue
prevValue = self._ms_prevVal
self._ms_prevVal = absoluteValue
actualValue = absoluteValue - prevValue
else:
actualValue = absoluteValue
if isinstance(actualValue, float) and math.isnan(actualValue):
actualValue = SENTINEL_VALUE_FOR_MISSING_DATA
# Pass this information to the classifier's custom compute method
# so that it can assign the current classification to possibly
# multiple patterns from the past and current, and also provide
# the expected classification for some time step(s) in the future.
classifier.setParameter('inferenceMode', True)
classifier.setParameter('learningMode', needLearning)
classificationIn = {'bucketIdx': bucketIdx,
'actValue': actualValue}
# Handle missing records
if inputTSRecordIdx is not None:
recordNum = inputTSRecordIdx
else:
recordNum = self.__numRunCalls
clResults = classifier.getSelf().customCompute(recordNum=recordNum,
patternNZ=patternNZ,
classification=classificationIn)
# ---------------------------------------------------------------
# Get the prediction for every step ahead learned by the classifier
predictionSteps = classifier.getParameter('steps')
predictionSteps = [int(x) for x in predictionSteps.split(',')]
# We will return the results in this dict. The top level keys
# are the step number, the values are the relative likelihoods for
# each classification value in that time step, represented as
# another dict where the keys are the classification values and
# the values are the relative likelihoods.
inferences[InferenceElement.multiStepPredictions] = dict()
inferences[InferenceElement.multiStepBestPredictions] = dict()
# ======================================================================
# Plug in the predictions for each requested time step.
for steps in predictionSteps:
# From the clResults, compute the predicted actual value. The
# CLAClassifier classifies the bucket index and returns a list of
# relative likelihoods for each bucket. Let's find the max one
# and then look up the actual value from that bucket index
likelihoodsVec = clResults[steps]
bucketValues = clResults['actualValues']
# Create a dict of value:likelihood pairs. We can't simply use
# dict(zip(bucketValues, likelihoodsVec)) because there might be
# duplicate bucketValues (this happens early on in the model when
# it doesn't have actual values for each bucket so it returns
# multiple buckets with the same default actual value).
likelihoodsDict = dict()
bestActValue = None
bestProb = None
for (actValue, prob) in zip(bucketValues, likelihoodsVec):
if actValue in likelihoodsDict:
likelihoodsDict[actValue] += prob
else:
likelihoodsDict[actValue] = prob
# Keep track of best
if bestProb is None or likelihoodsDict[actValue] > bestProb:
bestProb = likelihoodsDict[actValue]
bestActValue = actValue
# Remove entries with 0 likelihood or likelihood less than
# minLikelihoodThreshold, but don't leave an empty dict.
likelihoodsDict = CLAModel._removeUnlikelyPredictions(
likelihoodsDict, minLikelihoodThreshold, maxPredictionsPerStep)
# ---------------------------------------------------------------------
# If we have a delta encoder, we have to shift our predicted output value
# by the sum of the deltas
if self._classifierInputEncoder.isDelta():
# Get the prediction history for this number of timesteps.
# The prediction history is a store of the previous best predicted values.
# This is used to get the final shift from the current absolute value.
if not hasattr(self, '_ms_predHistories'):
self._ms_predHistories = dict()
predHistories = self._ms_predHistories
if not steps in predHistories:
predHistories[steps] = deque()
predHistory = predHistories[steps]
# Find the sum of the deltas for the steps and use this to generate
# an offset from the current absolute value
sumDelta = sum(predHistory)
offsetDict = dict()
for (k, v) in likelihoodsDict.iteritems():
if k is not None:
# Reconstruct the absolute value based on the current actual value,
# the best predicted values from the previous iterations,
# and the current predicted delta
offsetDict[absoluteValue+float(k)+sumDelta] = v
# Push the current best delta to the history buffer for reconstructing the final delta
if bestActValue is not None:
predHistory.append(bestActValue)
# If we don't need any more values in the predictionHistory, pop off
# the earliest one.
if len(predHistory) >= steps:
predHistory.popleft()
# Provide the offsetDict as the return value
if len(offsetDict)>0:
inferences[InferenceElement.multiStepPredictions][steps] = \
offsetDict
else:
inferences[InferenceElement.multiStepPredictions][steps] = \
likelihoodsDict
if bestActValue is None:
inferences[InferenceElement.multiStepBestPredictions][steps] = \
None
else:
inferences[InferenceElement.multiStepBestPredictions][steps] = \
absoluteValue + sumDelta + bestActValue
# ---------------------------------------------------------------------
# Normal case, no delta encoder. Just plug in all our multi-step predictions
# with likelihoods as well as our best prediction
else:
# The multiStepPredictions element holds the probabilities for each
# bucket
inferences[InferenceElement.multiStepPredictions][steps] = \
likelihoodsDict
inferences[InferenceElement.multiStepBestPredictions][steps] = \
bestActValue
return inferences
#############################################################################
@classmethod
def _removeUnlikelyPredictions(cls, likelihoodsDict, minLikelihoodThreshold,
maxPredictionsPerStep):
"""Remove entries with 0 likelihood or likelihood less than
minLikelihoodThreshold, but don't leave an empty dict.
"""
maxVal = (None, None)
for (k, v) in likelihoodsDict.items():
if len(likelihoodsDict) <= 1:
break
if maxVal[0] is None or v >= maxVal[1]:
if maxVal[0] is not None and maxVal[1] < minLikelihoodThreshold:
del likelihoodsDict[maxVal[0]]
maxVal = (k, v)
elif v < minLikelihoodThreshold:
del likelihoodsDict[k]
# Limit the number of predictions to include.
likelihoodsDict = dict(sorted(likelihoodsDict.iteritems(),
key=itemgetter(1),
reverse=True)[:maxPredictionsPerStep])
return likelihoodsDict
def getRuntimeStats(self):
""" [virtual method override] get runtime statistics specific to this
model, i.e. activeCellOverlapAvg
return:
a dict where keys are statistic names and values are the stats
"""
ret = {"numRunCalls" : self.__numRunCalls}
#--------------------------------------------------
# Query temporal network stats
temporalStats = dict()
if self._hasTP:
for stat in self._netInfo.statsCollectors:
sdict = stat.getStats()
temporalStats.update(sdict)
ret[InferenceType.getLabel(InferenceType.TemporalNextStep)] = temporalStats
return ret
def getFieldInfo(self, includeClassifierOnlyField=False):
""" [virtual method override]
Returns the sequence of FieldMetaInfo objects specifying this
Model's output; note that this may be different than the list of
FieldMetaInfo objects supplied at initialization (e.g., due to the
transcoding of some input fields into meta-fields, such as datetime
-> dayOfWeek, timeOfDay, etc.)
Returns: List of FieldMetaInfo objects (see description above)
"""
encoder = self._getEncoder()
fieldNames = encoder.getScalarNames()
fieldTypes = encoder.getDecoderOutputFieldTypes()
assert len(fieldNames) == len(fieldTypes)
# Also include the classifierOnly field?
encoder = self._getClassifierOnlyEncoder()
if includeClassifierOnlyField and encoder is not None:
addFieldNames = encoder.getScalarNames()
addFieldTypes = encoder.getDecoderOutputFieldTypes()
assert len(addFieldNames) == len(addFieldTypes)
fieldNames = list(fieldNames) + addFieldNames
fieldTypes = list(fieldTypes) + addFieldTypes
fieldMetaList = map(FieldMetaInfo._make,
zip(fieldNames,
fieldTypes,
itertools.repeat(FieldMetaSpecial.none)))
return tuple(fieldMetaList)
def _getLogger(self):
""" Get the logger for this object. This is a protected method that is used
by the Model to access the logger created by the subclass
return:
A logging.Logger object. Should not be None
"""
return self.__logger
def _getSPRegion(self):
"""
Returns reference to the network's SP region
"""
return self._netInfo.net.regions.get('SP', None)
def _getTPRegion(self):
"""
Returns reference to the network's TP region
"""
return self._netInfo.net.regions.get('TP', None)
def _getSensorRegion(self):
"""
Returns reference to the network's Sensor region
"""
return self._netInfo.net.regions['sensor']
def _getClassifierRegion(self):
"""
Returns reference to the network's Classifier region
"""
if 'Classifier' in self._netInfo.net.regions:
return self._netInfo.net.regions['Classifier']
else:
return None
def _getAnomalyClassifier(self):
return self._netInfo.net.regions.get("AnomalyClassifier", None)
def _getEncoder(self):
"""
Returns: sensor region's encoder for the given network
"""
return self._getSensorRegion().getSelf().encoder
def _getClassifierOnlyEncoder(self):
"""
Returns: sensor region's encoder that is sent only to the classifier,
not to the bottom of the network
"""
return self._getSensorRegion().getSelf().disabledEncoder
def _getDataSource(self):
"""
Returns: data source that we installed in sensor region
"""
return self._getSensorRegion().getSelf().dataSource
#############################################################################
def __createCLANetwork(self, sensorParams, spEnable, spParams, tpEnable,
tpParams, clParams, anomalyParams):
""" Create a CLA network and return it.
description: CLA Model description dictionary (TODO: define schema)
Returns: NetworkInfo instance;
"""
isTemporal = self._hasTP
#--------------------------------------------------
# Create the network
n = Network()
#--------------------------------------------------
# Add the Sensor
n.addRegion("sensor", "py.RecordSensor", json.dumps(dict(verbosity=sensorParams['verbosity'])))
sensor = n.regions['sensor'].getSelf()
enabledEncoders = copy.deepcopy(sensorParams['encoders'])
for name, params in enabledEncoders.items():
if params is not None:
classifierOnly = params.pop('classifierOnly', False)
if classifierOnly:
enabledEncoders.pop(name)
# Disabled encoders are encoders that are fed to CLAClassifierRegion but not
# SP or TP Regions. This is to handle the case where the predicted field
# is not fed through the SP/TP. We typically just have one of these now.
disabledEncoders = copy.deepcopy(sensorParams['encoders'])
for name, params in disabledEncoders.items():
if params is None:
disabledEncoders.pop(name)
else:
classifierOnly = params.pop('classifierOnly', False)
if not classifierOnly:
disabledEncoders.pop(name)
encoder = MultiEncoder(enabledEncoders)
sensor.encoder = encoder
sensor.disabledEncoder = MultiEncoder(disabledEncoders)
sensor.dataSource = DataBuffer()
# This is old functionality that would automatically reset the TP state
# at a regular interval, such as every week for daily data, every day for
# hourly data, etc.
# TODO: remove, not being used anymore
if sensorParams['sensorAutoReset']:
sensorAutoResetDict = sensorParams['sensorAutoReset']
supportedUnits = set(('days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds', 'weeks'))
units = set(sensorAutoResetDict.keys())
assert units.issubset(supportedUnits), \
"Unexpected units: %s" % (units - supportedUnits)
dd = defaultdict(lambda: 0, sensorAutoResetDict)
# class timedelta([days[, seconds[, microseconds[, milliseconds[, minutes[,
# hours[, weeks]]]]]]])
if not (0 == dd['days'] == dd['hours'] == dd['minutes'] == dd['seconds'] \
== dd['milliseconds'] == dd['microseconds'] == dd['weeks']):
interval = timedelta(days=dd['days'],
hours=dd['hours'],
minutes=dd['minutes'],
seconds=dd['seconds'],
milliseconds=dd['milliseconds'],
microseconds=dd['microseconds'],
weeks=dd['weeks'])
self.__logger.debug(
"Adding AutoResetFilter; sensorAutoResetDict: %r, timeDelta: %r" % (
sensorAutoResetDict, interval))
# see if sensor already has an autoreset filter
for filter_ in sensor.preEncodingFilters:
if isinstance(filter_, AutoResetFilter):
break
else:
filter_ = AutoResetFilter()
sensor.preEncodingFilters.append(filter_)
filter_.setInterval(interval)
prevRegion = "sensor"
prevRegionWidth = encoder.getWidth()
# SP is not enabled for spatial classification network
if spEnable:
spParams = spParams.copy()
spParams['inputWidth'] = prevRegionWidth
self.__logger.debug("Adding SPRegion; spParams: %r" % spParams)
n.addRegion("SP", "py.SPRegion", json.dumps(spParams))
# Link SP region
n.link("sensor", "SP", "UniformLink", "")
n.link("sensor", "SP", "UniformLink", "", srcOutput="resetOut",
destInput="resetIn")
n.link("SP", "sensor", "UniformLink", "", srcOutput="spatialTopDownOut",
destInput="spatialTopDownIn")
n.link("SP", "sensor", "UniformLink", "", srcOutput="temporalTopDownOut",
destInput="temporalTopDownIn")
prevRegion = "SP"
prevRegionWidth = spParams['columnCount']
if tpEnable:
tpParams = tpParams.copy()
if prevRegion == 'sensor':
tpParams['inputWidth'] = tpParams['columnCount'] = prevRegionWidth
else:
assert tpParams['columnCount'] == prevRegionWidth
tpParams['inputWidth'] = tpParams['columnCount']
self.__logger.debug("Adding TPRegion; tpParams: %r" % tpParams)
n.addRegion("TP", "py.TPRegion", json.dumps(tpParams))
# Link TP region
n.link(prevRegion, "TP", "UniformLink", "")
if prevRegion != "sensor":
n.link("TP", prevRegion, "UniformLink", "", srcOutput="topDownOut",
destInput="topDownIn")
else:
n.link("TP", prevRegion, "UniformLink", "", srcOutput="topDownOut",
destInput="temporalTopDownIn")
n.link("sensor", "TP", "UniformLink", "", srcOutput="resetOut",
destInput="resetIn")
prevRegion = "TP"
prevRegionWidth = tpParams['inputWidth']
if clParams is not None:
clParams = clParams.copy()
clRegionName = clParams.pop('regionName')
self.__logger.debug("Adding %s; clParams: %r" % (clRegionName,
clParams))
n.addRegion("Classifier", "py.%s" % str(clRegionName), json.dumps(clParams))
n.link("sensor", "Classifier", "UniformLink", "", srcOutput="categoryOut",
destInput="categoryIn")
n.link(prevRegion, "Classifier", "UniformLink", "")
if self.getInferenceType() == InferenceType.TemporalAnomaly:
anomalyClParams = dict(
trainRecords=anomalyParams.get('autoDetectWaitRecords', None),
anomalyThreshold=anomalyParams.get('autoDetectThreshold', None),
cacheSize=anomalyParams.get('anomalyCacheRecords', None)
)
self._addAnomalyClassifierRegion(n, anomalyClParams, spEnable, tpEnable)
#--------------------------------------------------
# NuPIC doesn't initialize the network until you try to run it
# but users may want to access components in a setup callback
n.initialize()
# Stats collector is used to collect statistics about the various regions as
# it goes along. The concept is very useful for debugging but not used
# anymore.
# TODO: remove, including NetworkInfo, DutyCycleStatistic, CLAStatistic
#--------------------------------------------------
# Create stats collectors for this network
#
# TODO: need to extract stats requests from description
stats = []
# Suppressing DutyCycleStatistic as there is no need for it at this time.
#stats.append(DutyCycleStatistic())
## Why do we need a separate tiny class for NetworkInfo??
result = NetworkInfo(net=n, statsCollectors=stats)
return result
#############################################################################
#
# CLAModel Methods to support serialization
#
#############################################################################
def __getstate__(self):
"""
Return serializable state. This function will return a version of the
__dict__ with data that shouldn't be pickled stripped out. In particular,
the CLA Network is stripped out because it has it's own serialization
mechanism)
See also: _serializeExtraData()
"""
# Remove ephemeral member variables from state
state = self.__dict__.copy()
state["_netInfo"] = NetworkInfo(net=None,
statsCollectors=self._netInfo.statsCollectors)
for ephemeral in [self.__manglePrivateMemberName("__restoringFromState"),
self.__manglePrivateMemberName("__logger")]:
state.pop(ephemeral)
return state
def __setstate__(self, state):
"""
Set the state of ourself from a serialized state.
See also: _deSerializeExtraData
"""
self.__dict__.update(state)
# Mark beginning of restoration.
#
# self.__restoringFromState will be reset to False upon completion of
# object restoration in _deSerializeExtraData()
self.__restoringFromState = True
# set up logging
self.__logger = initLogger(self)
# =========================================================================
# TODO: Temporary migration solution
if not hasattr(self, "_Model__inferenceType"):
self.__restoringFromV1 = True
self._hasSP = True
if self.__temporalNetInfo is not None:
self._Model__inferenceType = InferenceType.TemporalNextStep
self._netInfo = self.__temporalNetInfo
self._hasTP = True
else:
raise RuntimeError("The Nontemporal inference type is not supported")
self._Model__inferenceArgs = {}
self._Model__learningEnabled = True
self._Model__inferenceEnabled = True
# Remove obsolete members
self.__dict__.pop("_CLAModel__encoderNetInfo", None)
self.__dict__.pop("_CLAModel__nonTemporalNetInfo", None)
self.__dict__.pop("_CLAModel__temporalNetInfo", None)
# -----------------------------------------------------------------------
# Migrate from v2
if not hasattr(self, "_netInfo"):
self._hasSP = False
self._hasTP = False
if self.__encoderNetInfo is not None:
self._netInfo = self.__encoderNetInfo
elif self.__nonTemporalNetInfo is not None:
self._netInfo = self.__nonTemporalNetInfo
self._hasSP = True
else:
self._netInfo = self.__temporalNetInfo
self._hasSP = True
self._hasTP = True
# Remove obsolete members
self.__dict__.pop("_CLAModel__encoderNetInfo", None)
self.__dict__.pop("_CLAModel__nonTemporalNetInfo", None)
self.__dict__.pop("_CLAModel__temporalNetInfo", None)
# This gets filled in during the first infer because it can only be
# determined at run-time
self._classifierInputEncoder = None
if not hasattr(self, '_minLikelihoodThreshold'):
self._minLikelihoodThreshold = DEFAULT_LIKELIHOOD_THRESHOLD
if not hasattr(self, '_maxPredictionsPerStep'):
self._maxPredictionsPerStep = DEFAULT_MAX_PREDICTIONS_PER_STEP
self.__logger.info("Restoring %s from state..." % self.__class__.__name__)
def _serializeExtraData(self, extraDataDir):
""" [virtual method override] This method is called during serialization
with an external directory path that can be used to bypass pickle for saving
large binary states.
extraDataDir:
Model's extra data directory path
"""
makeDirectoryFromAbsolutePath(extraDataDir)
#--------------------------------------------------
# Save the network
outputDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir)
self.__logger.debug("Serializing network...")
self._netInfo.net.save(outputDir)
self.__logger.info("Finished serializing network")
return
def _deSerializeExtraData(self, extraDataDir):
""" [virtual method override] This method is called during deserialization
(after __setstate__) with an external directory path that can be used to
bypass pickle for loading large binary states.
extraDataDir:
Model's extra data directory path
"""
assert self.__restoringFromState
#--------------------------------------------------
# Check to make sure that our Network member wasn't restored from
# serialized data
assert (self._netInfo.net is None), "Network was already unpickled"
#--------------------------------------------------
# Restore the network
stateDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir)
self.__logger.debug(
"(%s) De-serializing network...", self)
self._netInfo.net = Network(stateDir)
self.__logger.info(
"(%s) Finished de-serializing network", self)
# NuPIC doesn't initialize the network until you try to run it
# but users may want to access components in a setup callback
self._netInfo.net.initialize()
# Used for backwards compatibility for anomaly classification models.
# Previous versions used the CLAModelClassifierHelper class for utilizing
# the KNN classifier. Current version uses KNNAnomalyClassifierRegion to
# encapsulate all the classifier functionality.
if self.getInferenceType() == InferenceType.TemporalAnomaly:
classifierType = self._getAnomalyClassifier().getSelf().__class__.__name__
if classifierType is 'KNNClassifierRegion':
anomalyClParams = dict(
trainRecords=self._classifier_helper._autoDetectWaitRecords,
anomalyThreshold=None,
cacheSize=self._classifier_helper._history_length,
)
if '_classificationThreshold' in self._classifier_helper.__dict__:
anomalyClParams['anomalyThreshold'] = (
self._classifier_helper._classificationThreshold)
else:
anomalyClParams['anomalyThreshold'] = (
self._classifier_helper.getAutoDetectThreshold())
spEnable = (self._getSPRegion() is not None)
tpEnable = True
# Store original KNN region
knnRegion = self._getAnomalyClassifier().getSelf()
# Add new KNNAnomalyClassifierRegion
self._addAnomalyClassifierRegion(self._netInfo.net, anomalyClParams,
spEnable, tpEnable)
# Restore state
self._getAnomalyClassifier().getSelf()._iteration = self.__numRunCalls
self._getAnomalyClassifier().getSelf()._recordsCache = (
self._classifier_helper.saved_states)
self._getAnomalyClassifier().getSelf().saved_categories = (
self._classifier_helper.saved_categories)
self._getAnomalyClassifier().getSelf()._knnclassifier = knnRegion
# Set TP to output neccessary information
self._getTPRegion().setParameter('anomalyMode', True)
# Remove old classifier_helper
del self._classifier_helper
self._netInfo.net.initialize()
#--------------------------------------------------
# Mark end of restoration from state
self.__restoringFromState = False
self.__logger.info("(%s) Finished restoring from state", self)
return
def _addAnomalyClassifierRegion(self, network, params, spEnable, tpEnable):
"""
Attaches an 'AnomalyClassifier' region to the network. Will remove current
'AnomalyClassifier' region if it exists.
Parameters
-----------
network - network to add the AnomalyClassifier region
params - parameters to pass to the region
spEnable - True if network has an SP region
tpEnable - True if network has a TP region; Currently requires True
"""
allParams = copy.deepcopy(params)
knnParams = dict(k=1,
distanceMethod='rawOverlap',
distanceNorm=1,
doBinarization=1,
replaceDuplicates=0,
maxStoredPatterns=1000)
allParams.update(knnParams)
# Set defaults if not set
if allParams['trainRecords'] is None:
allParams['trainRecords'] = DEFAULT_ANOMALY_TRAINRECORDS
if allParams['anomalyThreshold'] is None:
allParams['anomalyThreshold'] = DEFAULT_ANOMALY_THRESHOLD
if allParams['cacheSize'] is None:
allParams['cacheSize'] = DEFAULT_ANOMALY_CACHESIZE
# Remove current instance if already created (used for deserializing)
if self._netInfo is not None and self._netInfo.net is not None \
and self._getAnomalyClassifier() is not None:
self._netInfo.net.removeRegion('AnomalyClassifier')
network.addRegion("AnomalyClassifier",
"py.KNNAnomalyClassifierRegion",
json.dumps(allParams))
# Attach link to SP
if spEnable:
network.link("SP", "AnomalyClassifier", "UniformLink", "",
srcOutput="bottomUpOut", destInput="spBottomUpOut")
else:
network.link("sensor", "AnomalyClassifier", "UniformLink", "",
srcOutput="dataOut", destInput="spBottomUpOut")
# Attach link to TP
if tpEnable:
network.link("TP", "AnomalyClassifier", "UniformLink", "",
srcOutput="topDownOut", destInput="tpTopDownOut")
network.link("TP", "AnomalyClassifier", "UniformLink", "",
srcOutput="lrnActiveStateT", destInput="tpLrnActiveStateT")
else:
raise RuntimeError("TemporalAnomaly models require a TP region.")
def __getNetworkStateDirectory(self, extraDataDir):
"""
extraDataDir:
Model's extra data directory path
Returns: Absolute directory path for saving CLA Network
"""
if self.__restoringFromV1:
if self.getInferenceType() == InferenceType.TemporalNextStep:
leafName = 'temporal'+ "-network.nta"
else:
leafName = 'nonTemporal'+ "-network.nta"
else:
leafName = InferenceType.getLabel(self.getInferenceType()) + "-network.nta"
path = os.path.join(extraDataDir, leafName)
path = os.path.abspath(path)
return path
def __manglePrivateMemberName(self, privateMemberName, skipCheck=False):
""" Mangles the given mangled (private) member name; a mangled member name
is one whose name begins with two or more underscores and ends with one
or zero underscores.
privateMemberName:
The private member name (e.g., "__logger")
skipCheck: Pass True to skip test for presence of the demangled member
in our instance.
Returns: The demangled member name (e.g., "_CLAModel__logger")
"""
assert privateMemberName.startswith("__"), \
"%r doesn't start with __" % privateMemberName
assert not privateMemberName.startswith("___"), \
"%r starts with ___" % privateMemberName
assert not privateMemberName.endswith("__"), \
"%r ends with more than one underscore" % privateMemberName
realName = "_" + (self.__myClassName).lstrip("_") + privateMemberName
if not skipCheck:
# This will throw an exception if the member is missing
value = getattr(self, realName)
return realName
###############################################################################
class DataBuffer(object):
"""
A simple FIFO stack. Add data when it's available, and
implement getNextRecordDict() so DataBuffer can be used as a DataSource
in a CLA Network.
Currently, DataBuffer requires the stack to contain 0 or 1 records.
This requirement may change in the future, and is trivially supported
by removing the assertions.
"""
def __init__(self):
self.stack = []
def push(self, data):
assert len(self.stack) == 0
# Copy the data, because sensor's pre-encoding filters (e.g.,
# AutoResetFilter) may modify it. Our caller relies on the input record
# remaining unmodified.
data = data.__class__(data)
self.stack.append(data)
def getNextRecordDict(self):
assert len(self.stack) > 0
return self.stack.pop()
###############################################################################
class CLAStatistic(object):
__metaclass__ = ABCMeta
@abstractmethod
def compute(self, net):
"""
Compute the statistic represented by this object
args:
net: the CLA network that we wish to compute statistics for
return:
nothing
"""
@abstractmethod
def getStats(self):
"""
return:
a dict of key/value pairs of the form {<stat_name> : <stat_value>, ...}
"""
###############################################################################
class DutyCycleStatistic(CLAStatistic):
def __init__(self):
self.numSamples = 0
self.coincActiveCount = None
def compute(self, net):
self.numSamples += 1
sensor = net.regions['sensor']
# initialize if necessary
if self.coincActiveCount is None:
self.coincActiveCount = numpy.zeros(sensor.getSelf().encoder.getWidth())
## TODO: this call is possibly wrong...need to verify
buOutputNZ = numpy.nonzero(net.regions['SP'].getOutputData('topDownOut'))
#print numpy.nonzero(net.regions['SP'].getOutputData('topDownOut'))
self.coincActiveCount[buOutputNZ] += 1
return
def getStats(self):
ret = dict()
ret['cellDutyCycleAvg'] = self.coincActiveCount.mean() / self.numSamples
ret['cellDutyCycleMin'] = self.coincActiveCount.min() / self.numSamples
ret['cellDutyCycleMax'] = self.coincActiveCount.max() / self.numSamples
return ret
| 0x0all/nupic | py/nupic/frameworks/opf/clamodel.py | Python | gpl-3.0 | 58,886 |
#importing integrate_html.py file
from integrate_html import*
import csv
import codecs
import re
import os
def parseCSV_getData ( file1 ): #total.csv
data = open(file1, "r")
data.readline()
data.readline()
state_map = {}
for line in data.readlines():
split = line.split(',')
state_fullname = split[0]
state = split[1]
state_data = []
state_data.append(state_fullname)
year = 2000
for i in range(2,12,2):
data_year = []
data_year.append(year)
D = split[i]; R = split[i+1]
if i == 10:
R = R[:-2]
data_year.append(D)
data_year.append(R)
year += 4
state_data.append(data_year)
state_map[state] = state_data
return state_map
#testing
#map1 = parseCSV_getData("total_updated.csv")
#k = map1['AL']
#print k
# output: ['Alabama', [2000, '692611', '941173'], [2004, '693933', '1176394'], [2008, '813479', '1266546'], [2012, '795696', '1255925'], [2016, '718084', '1306925']]
def make_file(first, end, state_abb ):
state_map = parseCSV_getData("total_updated.csv")
data = state_map[state_abb]
state_fullname = data[0]
l1 = '<font color ="black"><h2>Data for ' + state_fullname + ' </h2></font>'
l2 = '</br></br>'
l3 = '<p class="alignright">2016 ' + state_fullname + ' poll results by county</p>'
l4 = '</br>'
l5 = '<img\n'; l6 = 'src="' + state_abb.lower() + '.svg"\n' ; l7 = 'height="600px"'
l8 = 'width="600px"' ; l9 = '/>\n'
l10 = '</br></br></br></br>\n'
l10 += '<p class="alignleft">' + space4+ space6 +space4+ 'Demo ' + space4 + 'Repub</p>\n' + '</br></br>\n'
data_size = len(data)
data_lines = ''
for i in range(1, data_size):
each = data[i]
year = str(each[0]); D = addComma(each[1]); R = addComma(each[2])
t1 = '<p class="alignleft">' + year + ':' + space + space
t1 += D+ space + space + R + '</p>\n'
t2 = '</br></br>\n'
data_lines += t1 + t2
all_lines = l1 + l2 + l3 + l4 + l5+ l6 + 'align = "right"\n' + l7 + l8 + l9 + l10 + data_lines
return all_lines
def split_file(html_file, csv_file):
f=codecs.open(html_file, 'r')
html_data = f.read()
parsing_line = '<!--For Parsing-->'
parsing_line2 = '<!--Parsing Ends-->'
first = html_data.split(parsing_line)[0]
end = html_data.split(parsing_line2)[1]
data = open(csv_file, "r")
data.readline()
data.readline()
for line in data.readlines():
split = line.split(',')
state_abb = split[1]
state_fullname = split[0]
lines = make_file(first, end, state_abb)
whole_lines = first + lines + end
title = '<title>speech analysis</title>'
before = whole_lines.split(title)[0]
after = whole_lines.split(title)[1]
new_title = '<title>' + state_fullname + ' (' + state_abb + ') ' + '</title>\n'
make_html(before + after + new_title, state_abb)
def make_html(html_data, state_abb):
os.chdir('states')
Html_file = open( state_abb + ".html" ,"w") #make_file = e.g. "WY.html"
Html_file.write(html_data)
Html_file.close()
os.chdir('..')
split_file("state_temp.html", "total_updated.csv")
#
| ub-cse442/election-and-data-sci | make_html.py | Python | apache-2.0 | 3,302 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for assertions provided by C{SynchronousTestCase} and C{TestCase},
provided by L{twisted.trial.unittest}.
L{TestFailureTests} demonstrates that L{SynchronousTestCase.fail} works, so that
is the only method on C{twisted.trial.unittest.SynchronousTestCase} that is
initially assumed to work. The test classes are arranged so that the methods
demonstrated to work earlier in the file are used by those later in the file
(even though the runner will probably not run the tests in this order).
"""
from __future__ import division, absolute_import
import warnings
from pprint import pformat
import unittest as pyunit
from twisted.python.util import FancyEqMixin
from twisted.python.reflect import prefixedMethods, accumulateMethods
from twisted.python.deprecate import deprecated
from twisted.python.versions import Version, getVersionString
from twisted.python.failure import Failure
from twisted.trial import unittest
from twisted.internet.defer import Deferred, fail, succeed
class MockEquality(FancyEqMixin, object):
compareAttributes = ("name",)
def __init__(self, name):
self.name = name
def __repr__(self):
return "MockEquality(%s)" % (self.name,)
class ComparisonError(object):
"""
An object which raises exceptions from its comparison methods.
"""
def _error(self, other):
raise ValueError("Comparison is broken")
__eq__ = __ne__ = _error
class TestFailureTests(pyunit.TestCase):
"""
Tests for the most basic functionality of L{SynchronousTestCase}, for
failing tests.
This class contains tests to demonstrate that L{SynchronousTestCase.fail}
can be used to fail a test, and that that failure is reflected in the test
result object. This should be sufficient functionality so that further
tests can be built on L{SynchronousTestCase} instead of
L{unittest.TestCase}. This depends on L{unittest.TestCase} working.
"""
class FailingTest(unittest.SynchronousTestCase):
def test_fails(self):
self.fail("This test fails.")
def setUp(self):
"""
Load a suite of one test which can be used to exercise the failure
handling behavior.
"""
components = [
__name__, self.__class__.__name__, self.FailingTest.__name__]
self.loader = pyunit.TestLoader()
self.suite = self.loader.loadTestsFromName(".".join(components))
self.test = list(self.suite)[0]
def test_fail(self):
"""
L{SynchronousTestCase.fail} raises
L{SynchronousTestCase.failureException} with the given argument.
"""
try:
self.test.fail("failed")
except self.test.failureException as result:
self.assertEqual("failed", str(result))
else:
self.fail(
"SynchronousTestCase.fail method did not raise "
"SynchronousTestCase.failureException")
def test_failingExceptionFails(self):
"""
When a test method raises L{SynchronousTestCase.failureException}, the test is
marked as having failed on the L{TestResult}.
"""
result = pyunit.TestResult()
self.suite.run(result)
self.failIf(result.wasSuccessful())
self.assertEqual(result.errors, [])
self.assertEqual(len(result.failures), 1)
self.assertEqual(result.failures[0][0], self.test)
class AssertFalseTests(unittest.SynchronousTestCase):
"""
Tests for L{SynchronousTestCase}'s C{assertFalse} and C{failIf} assertion
methods.
This is pretty paranoid. Still, a certain paranoia is healthy if you
are testing a unit testing framework.
@note: As of 11.2, C{assertFalse} is preferred over C{failIf}.
"""
def _assertFalseFalse(self, method):
"""
Perform the positive case test for C{failIf} or C{assertFalse}.
@param method: The test method to test.
"""
for notTrue in [0, 0.0, False, None, (), []]:
result = method(notTrue, "failed on %r" % (notTrue,))
if result != notTrue:
self.fail("Did not return argument %r" % (notTrue,))
def _assertFalseTrue(self, method):
"""
Perform the negative case test for C{failIf} or C{assertFalse}.
@param method: The test method to test.
"""
for true in [1, True, 'cat', [1,2], (3,4)]:
try:
method(true, "failed on %r" % (true,))
except self.failureException as e:
if str(e) != "failed on %r" % (true,):
self.fail("Raised incorrect exception on %r: %r" % (true, e))
else:
self.fail("Call to failIf(%r) didn't fail" % (true,))
def test_failIfFalse(self):
"""
L{SynchronousTestCase.failIf} returns its argument if its argument is
not considered true.
"""
self._assertFalseFalse(self.failIf)
def test_assertFalseFalse(self):
"""
L{SynchronousTestCase.assertFalse} returns its argument if its argument
is not considered true.
"""
self._assertFalseFalse(self.assertFalse)
def test_failIfTrue(self):
"""
L{SynchronousTestCase.failIf} raises
L{SynchronousTestCase.failureException} if its argument is considered
true.
"""
self._assertFalseTrue(self.failIf)
def test_assertFalseTrue(self):
"""
L{SynchronousTestCase.assertFalse} raises
L{SynchronousTestCase.failureException} if its argument is considered
true.
"""
self._assertFalseTrue(self.assertFalse)
class AssertTrueTests(unittest.SynchronousTestCase):
"""
Tests for L{SynchronousTestCase}'s C{assertTrue} and C{failUnless} assertion
methods.
This is pretty paranoid. Still, a certain paranoia is healthy if you
are testing a unit testing framework.
@note: As of 11.2, C{assertTrue} is preferred over C{failUnless}.
"""
def _assertTrueFalse(self, method):
"""
Perform the negative case test for C{assertTrue} and C{failUnless}.
@param method: The test method to test.
"""
for notTrue in [0, 0.0, False, None, (), []]:
try:
method(notTrue, "failed on %r" % (notTrue,))
except self.failureException as e:
if str(e) != "failed on %r" % (notTrue,):
self.fail(
"Raised incorrect exception on %r: %r" % (notTrue, e))
else:
self.fail(
"Call to %s(%r) didn't fail" % (method.__name__, notTrue,))
def _assertTrueTrue(self, method):
"""
Perform the positive case test for C{assertTrue} and C{failUnless}.
@param method: The test method to test.
"""
for true in [1, True, 'cat', [1,2], (3,4)]:
result = method(true, "failed on %r" % (true,))
if result != true:
self.fail("Did not return argument %r" % (true,))
def test_assertTrueFalse(self):
"""
L{SynchronousTestCase.assertTrue} raises
L{SynchronousTestCase.failureException} if its argument is not
considered true.
"""
self._assertTrueFalse(self.assertTrue)
def test_failUnlessFalse(self):
"""
L{SynchronousTestCase.failUnless} raises
L{SynchronousTestCase.failureException} if its argument is not
considered true.
"""
self._assertTrueFalse(self.failUnless)
def test_assertTrueTrue(self):
"""
L{SynchronousTestCase.assertTrue} returns its argument if its argument
is considered true.
"""
self._assertTrueTrue(self.assertTrue)
def test_failUnlessTrue(self):
"""
L{SynchronousTestCase.failUnless} returns its argument if its argument
is considered true.
"""
self._assertTrueTrue(self.failUnless)
class TestSynchronousAssertions(unittest.SynchronousTestCase):
"""
Tests for L{SynchronousTestCase}'s assertion methods. That is, failUnless*,
failIf*, assert* (not covered by other more specific test classes).
Note: As of 11.2, assertEqual is preferred over the failUnlessEqual(s)
variants. Tests have been modified to reflect this preference.
This is pretty paranoid. Still, a certain paranoia is healthy if you are
testing a unit testing framework.
"""
def _testEqualPair(self, first, second):
x = self.assertEqual(first, second)
if x != first:
self.fail("assertEqual should return first parameter")
def _testUnequalPair(self, first, second):
try:
self.assertEqual(first, second)
except self.failureException as e:
expected = 'not equal:\na = %s\nb = %s\n' % (
pformat(first), pformat(second))
if str(e) != expected:
self.fail("Expected: %r; Got: %s" % (expected, str(e)))
else:
self.fail("Call to assertEqual(%r, %r) didn't fail"
% (first, second))
def test_assertEqual_basic(self):
self._testEqualPair('cat', 'cat')
self._testUnequalPair('cat', 'dog')
self._testEqualPair([1], [1])
self._testUnequalPair([1], 'orange')
def test_assertEqual_custom(self):
x = MockEquality('first')
y = MockEquality('second')
z = MockEquality('first')
self._testEqualPair(x, x)
self._testEqualPair(x, z)
self._testUnequalPair(x, y)
self._testUnequalPair(y, z)
def test_assertEqualMessage(self):
"""
When a message is passed to L{assertEqual}, it is included in the
error message.
"""
exception = self.assertRaises(
self.failureException, self.assertEqual,
'foo', 'bar', 'message')
self.assertEqual(
str(exception),
"message\nnot equal:\na = 'foo'\nb = 'bar'\n")
def test_assertEqualNoneMessage(self):
"""
If a message is specified as C{None}, it is not included in the error
message of L{assertEqual}.
"""
exception = self.assertRaises(
self.failureException, self.assertEqual, 'foo', 'bar', None)
self.assertEqual(str(exception), "not equal:\na = 'foo'\nb = 'bar'\n")
def test_assertEqual_incomparable(self):
apple = ComparisonError()
orange = ["orange"]
try:
self.assertEqual(apple, orange)
except self.failureException:
self.fail("Fail raised when ValueError ought to have been raised.")
except ValueError:
# good. error not swallowed
pass
else:
self.fail("Comparing %r and %r should have raised an exception"
% (apple, orange))
def _raiseError(self, error):
raise error
def test_failUnlessRaises_expected(self):
x = self.failUnlessRaises(ValueError, self._raiseError, ValueError)
self.failUnless(isinstance(x, ValueError),
"Expect failUnlessRaises to return instance of raised "
"exception.")
def test_failUnlessRaises_unexpected(self):
try:
self.failUnlessRaises(ValueError, self._raiseError, TypeError)
except TypeError:
self.fail("failUnlessRaises shouldn't re-raise unexpected "
"exceptions")
except self.failureException:
# what we expect
pass
else:
self.fail("Expected exception wasn't raised. Should have failed")
def test_failUnlessRaises_noException(self):
try:
self.failUnlessRaises(ValueError, lambda : None)
except self.failureException as e:
self.assertEqual(str(e),
'ValueError not raised (None returned)')
else:
self.fail("Exception not raised. Should have failed")
def test_failUnlessRaises_failureException(self):
x = self.failUnlessRaises(self.failureException, self._raiseError,
self.failureException)
self.failUnless(isinstance(x, self.failureException),
"Expected %r instance to be returned"
% (self.failureException,))
try:
x = self.failUnlessRaises(self.failureException, self._raiseError,
ValueError)
except self.failureException:
# what we expect
pass
else:
self.fail("Should have raised exception")
def test_failIfEqual_basic(self):
x, y, z = [1], [2], [1]
ret = self.failIfEqual(x, y)
self.assertEqual(ret, x,
"failIfEqual should return first parameter")
self.failUnlessRaises(self.failureException,
self.failIfEqual, x, x)
self.failUnlessRaises(self.failureException,
self.failIfEqual, x, z)
def test_failIfEqual_customEq(self):
x = MockEquality('first')
y = MockEquality('second')
z = MockEquality('fecund')
ret = self.failIfEqual(x, y)
self.assertEqual(ret, x,
"failIfEqual should return first parameter")
self.failUnlessRaises(self.failureException,
self.failIfEqual, x, x)
self.failIfEqual(x, z, "__ne__ should make these not equal")
def test_failIfIdenticalPositive(self):
"""
C{failIfIdentical} returns its first argument if its first and second
arguments are not the same object.
"""
x = object()
y = object()
result = self.failIfIdentical(x, y)
self.assertEqual(x, result)
def test_failIfIdenticalNegative(self):
"""
C{failIfIdentical} raises C{failureException} if its first and second
arguments are the same object.
"""
x = object()
self.failUnlessRaises(self.failureException,
self.failIfIdentical, x, x)
def test_failUnlessIdentical(self):
x, y, z = [1], [1], [2]
ret = self.failUnlessIdentical(x, x)
self.assertEqual(ret, x,
'failUnlessIdentical should return first '
'parameter')
self.failUnlessRaises(self.failureException,
self.failUnlessIdentical, x, y)
self.failUnlessRaises(self.failureException,
self.failUnlessIdentical, x, z)
def test_failUnlessApproximates(self):
x, y, z = 1.0, 1.1, 1.2
self.failUnlessApproximates(x, x, 0.2)
ret = self.failUnlessApproximates(x, y, 0.2)
self.assertEqual(ret, x, "failUnlessApproximates should return "
"first parameter")
self.failUnlessRaises(self.failureException,
self.failUnlessApproximates, x, z, 0.1)
self.failUnlessRaises(self.failureException,
self.failUnlessApproximates, x, y, 0.1)
def test_failUnlessAlmostEqual(self):
precision = 5
x = 8.000001
y = 8.00001
z = 8.000002
self.failUnlessAlmostEqual(x, x, precision)
ret = self.failUnlessAlmostEqual(x, z, precision)
self.assertEqual(ret, x, "failUnlessAlmostEqual should return "
"first parameter (%r, %r)" % (ret, x))
self.failUnlessRaises(self.failureException,
self.failUnlessAlmostEqual, x, y, precision)
def test_failIfAlmostEqual(self):
precision = 5
x = 8.000001
y = 8.00001
z = 8.000002
ret = self.failIfAlmostEqual(x, y, precision)
self.assertEqual(ret, x, "failIfAlmostEqual should return "
"first parameter (%r, %r)" % (ret, x))
self.failUnlessRaises(self.failureException,
self.failIfAlmostEqual, x, x, precision)
self.failUnlessRaises(self.failureException,
self.failIfAlmostEqual, x, z, precision)
def test_failUnlessSubstring(self):
x = "cat"
y = "the dog sat"
z = "the cat sat"
self.failUnlessSubstring(x, x)
ret = self.failUnlessSubstring(x, z)
self.assertEqual(ret, x, 'should return first parameter')
self.failUnlessRaises(self.failureException,
self.failUnlessSubstring, x, y)
self.failUnlessRaises(self.failureException,
self.failUnlessSubstring, z, x)
def test_failIfSubstring(self):
x = "cat"
y = "the dog sat"
z = "the cat sat"
self.failIfSubstring(z, x)
ret = self.failIfSubstring(x, y)
self.assertEqual(ret, x, 'should return first parameter')
self.failUnlessRaises(self.failureException,
self.failIfSubstring, x, x)
self.failUnlessRaises(self.failureException,
self.failIfSubstring, x, z)
def test_assertIs(self):
"""
L{assertIs} passes if two objects are identical.
"""
a = MockEquality("first")
self.assertIs(a, a)
def test_assertIsError(self):
"""
L{assertIs} fails if two objects are not identical.
"""
a, b = MockEquality("first"), MockEquality("first")
self.assertEqual(a, b)
self.assertRaises(self.failureException, self.assertIs, a, b)
def test_assertIsNot(self):
"""
L{assertIsNot} passes if two objects are not identical.
"""
a, b = MockEquality("first"), MockEquality("first")
self.assertEqual(a, b)
self.assertIsNot(a, b)
def test_assertIsNotError(self):
"""
L{assertIsNot} fails if two objects are identical.
"""
a = MockEquality("first")
self.assertRaises(self.failureException, self.assertIsNot, a, a)
def test_assertIsInstance(self):
"""
Test a true condition of assertIsInstance.
"""
A = type('A', (object,), {})
a = A()
self.assertIsInstance(a, A)
def test_assertIsInstanceMultipleClasses(self):
"""
Test a true condition of assertIsInstance with multiple classes.
"""
A = type('A', (object,), {})
B = type('B', (object,), {})
a = A()
self.assertIsInstance(a, (A, B))
def test_assertIsInstanceError(self):
"""
Test an error with assertIsInstance.
"""
A = type('A', (object,), {})
B = type('B', (object,), {})
a = A()
self.assertRaises(self.failureException, self.assertIsInstance, a, B)
def test_assertIsInstanceErrorMultipleClasses(self):
"""
Test an error with assertIsInstance and multiple classes.
"""
A = type('A', (object,), {})
B = type('B', (object,), {})
C = type('C', (object,), {})
a = A()
self.assertRaises(self.failureException, self.assertIsInstance, a, (B, C))
def test_assertIsInstanceCustomMessage(self):
"""
If L{TestCase.assertIsInstance} is passed a custom message as its 3rd
argument, the message is included in the failure exception raised when
the assertion fails.
"""
exc = self.assertRaises(
self.failureException,
self.assertIsInstance, 3, str, "Silly assertion")
self.assertIn("Silly assertion", str(exc))
def test_assertNotIsInstance(self):
"""
Test a true condition of assertNotIsInstance.
"""
A = type('A', (object,), {})
B = type('B', (object,), {})
a = A()
self.assertNotIsInstance(a, B)
def test_assertNotIsInstanceMultipleClasses(self):
"""
Test a true condition of assertNotIsInstance and multiple classes.
"""
A = type('A', (object,), {})
B = type('B', (object,), {})
C = type('C', (object,), {})
a = A()
self.assertNotIsInstance(a, (B, C))
def test_assertNotIsInstanceError(self):
"""
Test an error with assertNotIsInstance.
"""
A = type('A', (object,), {})
a = A()
error = self.assertRaises(self.failureException,
self.assertNotIsInstance, a, A)
self.assertEqual(str(error), "%r is an instance of %s" % (a, A))
def test_assertNotIsInstanceErrorMultipleClasses(self):
"""
Test an error with assertNotIsInstance and multiple classes.
"""
A = type('A', (object,), {})
B = type('B', (object,), {})
a = A()
self.assertRaises(self.failureException, self.assertNotIsInstance, a, (A, B))
def test_assertDictEqual(self):
"""
L{twisted.trial.unittest.TestCase} supports the C{assertDictEqual}
method inherited from the standard library in Python 2.7.
"""
self.assertDictEqual({'a': 1}, {'a': 1})
if getattr(unittest.SynchronousTestCase, 'assertDictEqual', None) is None:
test_assertDictEqual.skip = (
"assertDictEqual is not available on this version of Python")
class WarningAssertionTests(unittest.SynchronousTestCase):
def test_assertWarns(self):
"""
Test basic assertWarns report.
"""
def deprecated(a):
warnings.warn("Woo deprecated", category=DeprecationWarning)
return a
r = self.assertWarns(DeprecationWarning, "Woo deprecated", __file__,
deprecated, 123)
self.assertEqual(r, 123)
def test_assertWarnsRegistryClean(self):
"""
Test that assertWarns cleans the warning registry, so the warning is
not swallowed the second time.
"""
def deprecated(a):
warnings.warn("Woo deprecated", category=DeprecationWarning)
return a
r1 = self.assertWarns(DeprecationWarning, "Woo deprecated", __file__,
deprecated, 123)
self.assertEqual(r1, 123)
# The warning should be raised again
r2 = self.assertWarns(DeprecationWarning, "Woo deprecated", __file__,
deprecated, 321)
self.assertEqual(r2, 321)
def test_assertWarnsError(self):
"""
Test assertWarns failure when no warning is generated.
"""
def normal(a):
return a
self.assertRaises(self.failureException,
self.assertWarns, DeprecationWarning, "Woo deprecated", __file__,
normal, 123)
def test_assertWarnsWrongCategory(self):
"""
Test assertWarns failure when the category is wrong.
"""
def deprecated(a):
warnings.warn("Foo deprecated", category=DeprecationWarning)
return a
self.assertRaises(self.failureException,
self.assertWarns, UserWarning, "Foo deprecated", __file__,
deprecated, 123)
def test_assertWarnsWrongMessage(self):
"""
Test assertWarns failure when the message is wrong.
"""
def deprecated(a):
warnings.warn("Foo deprecated", category=DeprecationWarning)
return a
self.assertRaises(self.failureException,
self.assertWarns, DeprecationWarning, "Bar deprecated", __file__,
deprecated, 123)
def test_assertWarnsWrongFile(self):
"""
If the warning emitted by a function refers to a different file than is
passed to C{assertWarns}, C{failureException} is raised.
"""
def deprecated(a):
# stacklevel=2 points at the direct caller of the function. The
# way assertRaises is invoked below, the direct caller will be
# something somewhere in trial, not something in this file. In
# Python 2.5 and earlier, stacklevel of 0 resulted in a warning
# pointing to the warnings module itself. Starting in Python 2.6,
# stacklevel of 0 and 1 both result in a warning pointing to *this*
# file, presumably due to the fact that the warn function is
# implemented in C and has no convenient Python
# filename/linenumber.
warnings.warn(
"Foo deprecated", category=DeprecationWarning, stacklevel=2)
self.assertRaises(
self.failureException,
# Since the direct caller isn't in this file, try to assert that
# the warning *does* point to this file, so that assertWarns raises
# an exception.
self.assertWarns, DeprecationWarning, "Foo deprecated", __file__,
deprecated, 123)
def test_assertWarnsOnClass(self):
"""
Test assertWarns works when creating a class instance.
"""
class Warn:
def __init__(self):
warnings.warn("Do not call me", category=RuntimeWarning)
r = self.assertWarns(RuntimeWarning, "Do not call me", __file__,
Warn)
self.assertTrue(isinstance(r, Warn))
r = self.assertWarns(RuntimeWarning, "Do not call me", __file__,
Warn)
self.assertTrue(isinstance(r, Warn))
def test_assertWarnsOnMethod(self):
"""
Test assertWarns works when used on an instance method.
"""
class Warn:
def deprecated(self, a):
warnings.warn("Bar deprecated", category=DeprecationWarning)
return a
w = Warn()
r = self.assertWarns(DeprecationWarning, "Bar deprecated", __file__,
w.deprecated, 321)
self.assertEqual(r, 321)
r = self.assertWarns(DeprecationWarning, "Bar deprecated", __file__,
w.deprecated, 321)
self.assertEqual(r, 321)
def test_assertWarnsOnCall(self):
"""
Test assertWarns works on instance with C{__call__} method.
"""
class Warn:
def __call__(self, a):
warnings.warn("Egg deprecated", category=DeprecationWarning)
return a
w = Warn()
r = self.assertWarns(DeprecationWarning, "Egg deprecated", __file__,
w, 321)
self.assertEqual(r, 321)
r = self.assertWarns(DeprecationWarning, "Egg deprecated", __file__,
w, 321)
self.assertEqual(r, 321)
def test_assertWarnsFilter(self):
"""
Test assertWarns on a warning filterd by default.
"""
def deprecated(a):
warnings.warn("Woo deprecated", category=PendingDeprecationWarning)
return a
r = self.assertWarns(PendingDeprecationWarning, "Woo deprecated",
__file__, deprecated, 123)
self.assertEqual(r, 123)
def test_assertWarnsMultipleWarnings(self):
"""
C{assertWarns} does not raise an exception if the function it is passed
triggers the same warning more than once.
"""
def deprecated():
warnings.warn("Woo deprecated", category=PendingDeprecationWarning)
def f():
deprecated()
deprecated()
self.assertWarns(
PendingDeprecationWarning, "Woo deprecated", __file__, f)
def test_assertWarnsDifferentWarnings(self):
"""
For now, assertWarns is unable to handle multiple different warnings,
so it should raise an exception if it's the case.
"""
def deprecated(a):
warnings.warn("Woo deprecated", category=DeprecationWarning)
warnings.warn("Another one", category=PendingDeprecationWarning)
e = self.assertRaises(self.failureException,
self.assertWarns, DeprecationWarning, "Woo deprecated",
__file__, deprecated, 123)
self.assertEqual(str(e), "Can't handle different warnings")
def test_assertWarnsAfterUnassertedWarning(self):
"""
Warnings emitted before L{TestCase.assertWarns} is called do not get
flushed and do not alter the behavior of L{TestCase.assertWarns}.
"""
class TheWarning(Warning):
pass
def f(message):
warnings.warn(message, category=TheWarning)
f("foo")
self.assertWarns(TheWarning, "bar", __file__, f, "bar")
[warning] = self.flushWarnings([f])
self.assertEqual(warning['message'], "foo")
class TestResultOfAssertions(unittest.SynchronousTestCase):
"""
Tests for L{SynchronousTestCase.successResultOf},
L{SynchronousTestCase.failureResultOf}, and
L{SynchronousTestCase.assertNoResult}.
"""
result = object()
failure = Failure(Exception("Bad times"))
def test_withoutSuccessResult(self):
"""
L{SynchronousTestCase.successResultOf} raises
L{SynchronousTestCase.failureException} when called with a L{Deferred}
with no current result.
"""
self.assertRaises(
self.failureException, self.successResultOf, Deferred())
def test_successResultOfWithFailure(self):
"""
L{SynchronousTestCase.successResultOf} raises
L{SynchronousTestCase.failureException} when called with a L{Deferred}
with a failure result.
"""
self.assertRaises(
self.failureException, self.successResultOf, fail(self.failure))
def test_successResultOfWithFailureHasTraceback(self):
"""
L{SynchronousTestCase.successResultOf} raises a
L{SynchronousTestCase.failureException} that has the original failure
traceback when called with a L{Deferred} with a failure result.
"""
try:
self.successResultOf(fail(self.failure))
except self.failureException as e:
self.assertIn(self.failure.getTraceback(), str(e))
def test_withoutFailureResult(self):
"""
L{SynchronousTestCase.failureResultOf} raises
L{SynchronousTestCase.failureException} when called with a L{Deferred}
with no current result.
"""
self.assertRaises(
self.failureException, self.failureResultOf, Deferred())
def test_failureResultOfWithSuccess(self):
"""
L{SynchronousTestCase.failureResultOf} raises
L{SynchronousTestCase.failureException} when called with a L{Deferred}
with a success result.
"""
self.assertRaises(
self.failureException, self.failureResultOf, succeed(self.result))
def test_failureResultOfWithWrongFailure(self):
"""
L{SynchronousTestCase.failureResultOf} raises
L{SynchronousTestCase.failureException} when called with a L{Deferred}
with a failure type that was not expected.
"""
self.assertRaises(
self.failureException, self.failureResultOf, fail(self.failure),
KeyError)
def test_failureResultOfWithWrongFailureOneExpectedFailure(self):
"""
L{SynchronousTestCase.failureResultOf} raises
L{SynchronousTestCase.failureException} when called with a L{Deferred}
with a failure type that was not expected, and the
L{SynchronousTestCase.failureException} message contains the original
failure traceback as well as the expected failure type
"""
try:
self.failureResultOf(fail(self.failure), KeyError)
except self.failureException as e:
self.assertIn(self.failure.getTraceback(), str(e))
self.assertIn(
"Failure of type ({0}.{1}) expected on".format(
KeyError.__module__, KeyError.__name__),
str(e))
def test_failureResultOfWithWrongFailureMultiExpectedFailure(self):
"""
L{SynchronousTestCase.failureResultOf} raises
L{SynchronousTestCase.failureException} when called with a L{Deferred}
with a failure type that was not expected, and the
L{SynchronousTestCase.failureException} message contains the original
failure traceback as well as the expected failure types in the error
message
"""
try:
self.failureResultOf(fail(self.failure), KeyError, IOError)
except self.failureException as e:
self.assertIn(self.failure.getTraceback(), str(e))
self.assertIn(
"Failure of type ({0}.{1} or {2}.{3}) expected on".format(
KeyError.__module__, KeyError.__name__,
IOError.__module__, IOError.__name__),
str(e))
def test_withSuccessResult(self):
"""
When passed a L{Deferred} which currently has a result (ie,
L{Deferred.addCallback} would cause the added callback to be called
before C{addCallback} returns), L{SynchronousTestCase.successResultOf}
returns that result.
"""
self.assertIdentical(
self.result, self.successResultOf(succeed(self.result)))
def test_withExpectedFailureResult(self):
"""
When passed a L{Deferred} which currently has a L{Failure} result (ie,
L{Deferred.addErrback} would cause the added errback to be called
before C{addErrback} returns), L{SynchronousTestCase.failureResultOf}
returns that L{Failure} if that L{Failure}'s type is expected.
"""
self.assertIdentical(
self.failure,
self.failureResultOf(fail(self.failure), self.failure.type,
KeyError))
def test_withFailureResult(self):
"""
When passed a L{Deferred} which currently has a L{Failure} result
(ie, L{Deferred.addErrback} would cause the added errback to be called
before C{addErrback} returns), L{SynchronousTestCase.failureResultOf}
returns that L{Failure}.
"""
self.assertIdentical(
self.failure, self.failureResultOf(fail(self.failure)))
def test_assertNoResultSuccess(self):
"""
When passed a L{Deferred} which currently has a success result (see
L{test_withSuccessResult}), L{SynchronousTestCase.assertNoResult} raises
L{SynchronousTestCase.failureException}.
"""
self.assertRaises(
self.failureException, self.assertNoResult, succeed(self.result))
def test_assertNoResultFailure(self):
"""
When passed a L{Deferred} which currently has a failure result (see
L{test_withFailureResult}), L{SynchronousTestCase.assertNoResult} raises
L{SynchronousTestCase.failureException}.
"""
self.assertRaises(
self.failureException, self.assertNoResult, fail(self.failure))
def test_assertNoResult(self):
"""
When passed a L{Deferred} with no current result,
"""
self.assertNoResult(Deferred())
def test_assertNoResultPropagatesSuccess(self):
"""
When passed a L{Deferred} with no current result, which is then
fired with a success result, L{SynchronousTestCase.assertNoResult}
doesn't modify the result of the L{Deferred}.
"""
d = Deferred()
self.assertNoResult(d)
d.callback(self.result)
self.assertEqual(self.result, self.successResultOf(d))
def test_assertNoResultPropagatesLaterFailure(self):
"""
When passed a L{Deferred} with no current result, which is then
fired with a L{Failure} result, L{SynchronousTestCase.assertNoResult}
doesn't modify the result of the L{Deferred}.
"""
d = Deferred()
self.assertNoResult(d)
d.errback(self.failure)
self.assertEqual(self.failure, self.failureResultOf(d))
def test_assertNoResultSwallowsImmediateFailure(self):
"""
When passed a L{Deferred} which currently has a L{Failure} result,
L{SynchronousTestCase.assertNoResult} changes the result of the
L{Deferred} to a success.
"""
d = fail(self.failure)
try:
self.assertNoResult(d)
except self.failureException:
pass
self.assertEqual(None, self.successResultOf(d))
class TestAssertionNames(unittest.SynchronousTestCase):
"""
Tests for consistency of naming within TestCase assertion methods
"""
def _getAsserts(self):
dct = {}
accumulateMethods(self, dct, 'assert')
return [ dct[k] for k in dct if not k.startswith('Not') and k != '_' ]
def _name(self, x):
return x.__name__
def test_failUnlessMatchesAssert(self):
"""
The C{failUnless*} test methods are a subset of the C{assert*} test
methods. This is intended to ensure that methods using the
I{failUnless} naming scheme are not added without corresponding methods
using the I{assert} naming scheme. The I{assert} naming scheme is
preferred, and new I{assert}-prefixed methods may be added without
corresponding I{failUnless}-prefixed methods.
"""
asserts = set(self._getAsserts())
failUnlesses = set(prefixedMethods(self, 'failUnless'))
self.assertEqual(
failUnlesses, asserts.intersection(failUnlesses))
def test_failIf_matches_assertNot(self):
asserts = prefixedMethods(unittest.SynchronousTestCase, 'assertNot')
failIfs = prefixedMethods(unittest.SynchronousTestCase, 'failIf')
self.assertEqual(sorted(asserts, key=self._name),
sorted(failIfs, key=self._name))
def test_equalSpelling(self):
for name, value in vars(self).items():
if not callable(value):
continue
if name.endswith('Equal'):
self.failUnless(hasattr(self, name+'s'),
"%s but no %ss" % (name, name))
self.assertEqual(value, getattr(self, name+'s'))
if name.endswith('Equals'):
self.failUnless(hasattr(self, name[:-1]),
"%s but no %s" % (name, name[:-1]))
self.assertEqual(value, getattr(self, name[:-1]))
class TestCallDeprecated(unittest.SynchronousTestCase):
"""
Test use of the L{SynchronousTestCase.callDeprecated} method with version objects.
"""
version = Version('Twisted', 8, 0, 0)
def test_callDeprecatedSuppressesWarning(self):
"""
callDeprecated calls a deprecated callable, suppressing the
deprecation warning.
"""
self.callDeprecated(self.version, oldMethod, 'foo')
self.assertEqual(
self.flushWarnings(), [], "No warnings should be shown")
def test_callDeprecatedCallsFunction(self):
"""
L{callDeprecated} actually calls the callable passed to it, and
forwards the result.
"""
result = self.callDeprecated(self.version, oldMethod, 'foo')
self.assertEqual('foo', result)
def test_failsWithoutDeprecation(self):
"""
L{callDeprecated} raises a test failure if the callable is not
deprecated.
"""
def notDeprecated():
pass
exception = self.assertRaises(
self.failureException,
self.callDeprecated, self.version, notDeprecated)
self.assertEqual(
"%r is not deprecated." % notDeprecated, str(exception))
def test_failsWithIncorrectDeprecation(self):
"""
callDeprecated raises a test failure if the callable was deprecated
at a different version to the one expected.
"""
differentVersion = Version('Foo', 1, 2, 3)
exception = self.assertRaises(
self.failureException,
self.callDeprecated,
differentVersion, oldMethod, 'foo')
self.assertIn(getVersionString(self.version), str(exception))
self.assertIn(getVersionString(differentVersion), str(exception))
def test_nestedDeprecation(self):
"""
L{callDeprecated} ignores all deprecations apart from the first.
Multiple warnings are generated when a deprecated function calls
another deprecated function. The first warning is the one generated by
the explicitly called function. That's the warning that we care about.
"""
differentVersion = Version('Foo', 1, 2, 3)
def nestedDeprecation(*args):
return oldMethod(*args)
nestedDeprecation = deprecated(differentVersion)(nestedDeprecation)
self.callDeprecated(differentVersion, nestedDeprecation, 24)
# The oldMethod deprecation should have been emitted too, not captured
# by callDeprecated. Flush it now to make sure it did happen and to
# prevent it from showing up on stdout.
warningsShown = self.flushWarnings()
self.assertEqual(len(warningsShown), 1)
def test_callDeprecationWithMessage(self):
"""
L{callDeprecated} can take a message argument used to check the warning
emitted.
"""
self.callDeprecated((self.version, "newMethod"),
oldMethodReplaced, 1)
def test_callDeprecationWithWrongMessage(self):
"""
If the message passed to L{callDeprecated} doesn't match,
L{callDeprecated} raises a test failure.
"""
exception = self.assertRaises(
self.failureException,
self.callDeprecated,
(self.version, "something.wrong"),
oldMethodReplaced, 1)
self.assertIn(getVersionString(self.version), str(exception))
self.assertIn("please use newMethod instead", str(exception))
@deprecated(TestCallDeprecated.version)
def oldMethod(x):
"""
Deprecated method for testing.
"""
return x
@deprecated(TestCallDeprecated.version, replacement="newMethod")
def oldMethodReplaced(x):
"""
Another deprecated method, which has been deprecated in favor of the
mythical 'newMethod'.
"""
return 2 * x
| skycucumber/Messaging-Gateway | webapp/venv/lib/python2.7/site-packages/twisted/trial/test/test_assertions.py | Python | gpl-2.0 | 42,547 |
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.grid.grid_search import H2OGridSearch
from h2o.estimators.random_forest import H2ORandomForestEstimator
def fiftycatRF():
# Training set has only 45 categories cat1 through cat45
train = h2o.import_file(path=pyunit_utils.locate("smalldata/gbm_test/50_cattest_train.csv"))
train["y"] = train["y"].asfactor()
# Train H2O DRF Grid:
hyper_parameters = {'ntrees':[10,50], 'max_depth':[20,10]}
model = H2OGridSearch(H2ORandomForestEstimator, hyper_params=hyper_parameters )
print(model)
model.train(x=["x1", "x2"], y="y", training_frame=train)
model.show()
model.summary()
for m in model:
assert isinstance(m, H2ORandomForestEstimator)
if __name__ == "__main__":
pyunit_utils.standalone_test(fiftycatRF)
else:
fiftycatRF()
| YzPaul3/h2o-3 | h2o-py/tests/testdir_algos/rf/pyunit_fiftycatRF_grid.py | Python | apache-2.0 | 852 |
import ConfigParser
import os.path
import time
import getpass
import subprocess
import argparse
import shlex
import re
import urlparse
import tempfile
import log
class textColours:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
HIGHLIGHT = '\033[96m'
def append_to_single_dimension_dict(dictionary, name_of_component, version):
if name_of_component in dictionary:
dictionary[name_of_component].append(version)
else:
dictionary[name_of_component] = [version]
def append_to_dual_dimension_dict(dictionary, name_of_component, environment_name, version):
if name_of_component in dictionary:
dictionary[name_of_component][environment_name] = version
else:
dictionary[name_of_component] = {environment_name: version}
| stratus-ss/python_scripts | openshift_scripts/pipeline_related/common.py | Python | lgpl-3.0 | 913 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""最適化用のクラスをまとめたモジュール"""
from __future__ import print_function, unicode_literals
import logging
try:
from llvm import passes
except:
pass
from tinyc.code import (
Code, Comment, Data, Extern, Global, Label, Memory, Register, Registers)
class Optimizer(object):
def __init__(self):
self.logger = logging.getLogger()
self.optimized = 0
def optimize(self, code):
return code
class LabelOptimizer(Optimizer):
"""不要なラベルを削除することによる最適化"""
def __init__(self):
super(LabelOptimizer, self).__init__()
self.replace_table = {}
def _optimize_duplication(self, code):
"""重複するラベルの削除"""
label = None
# 重複しているラベルを見つけて, 置換表を作成する
for line in code:
if isinstance(line, Label):
if label is None:
label = line
elif line.glob:
# GLOBAL に用いるラベルは置換しない
continue
else:
self.replace_table[line] = label
elif isinstance(line, Comment):
continue
else:
label = None
# 重複するラベルの削除と置換
new_code = []
for line in code:
if isinstance(line, Label):
if line in self.replace_table:
# 重複するラベル宣言はコードから外す
self.logger.info(
'Remove: label dupulication: ' + line.label)
self.optimized += 1
continue
elif isinstance(line, Code):
new_args = []
for arg in line.args:
if isinstance(arg, Label) and arg in self.replace_table:
# 置換対象のラベルを引数に見つければ置換する
self.optimized += 1
new_args.append(self.replace_table[arg])
else:
new_args.append(arg)
line.args = new_args
new_code.append(line)
return new_code
def _optimize_unused(self, code):
"""利用されていないラベルの削除"""
unused = {}
used = {}
# 利用されていないラベルを見つける
for i, line in enumerate(code):
if isinstance(line, Label):
if line not in used and not line.glob:
# 現時点で引数に利用されていないラベルであれば未使用に追加
# ただしグローバルで利用するものは削除しない
unused[line] = i
elif isinstance(line, Code):
# 引数でラベルが利用されている場合は使用中のラベルに追加
labels = filter(lambda a: isinstance(a, Label), line.args)
if len(labels) == 0:
continue
for label in labels:
if label in unused:
del unused[label]
if label not in used:
used[label] = True
self.optimized += len(unused)
map(lambda l: self.logger.info('Remove: unused label: ' + l.label), unused)
return [i for j, i in enumerate(code) if j not in unused.values()]
def optimize(self, code):
code = self._optimize_duplication(code)
code = self._optimize_unused(code)
return code
class GlobalExternOptimizer(Optimizer):
"""EXTERN の重複を取り除き, GLOBAL とともに先頭にまとめる"""
def optimize(self, code):
_globals = []
externs = {}
others = []
new_code = []
for line in code:
if isinstance(line, Global):
_globals.append(line)
elif isinstance(line, Extern):
# EXTERN が重複しないように追加していく
if str(line) in externs:
self.logger.info(
'Remove: extern dupulication: ' + line.label.label)
self.optimized += 1
else:
externs[str(line)] = line
else:
others.append(line)
for gl in _globals:
new_code.append(gl)
for ex in externs:
new_code.append(externs[ex])
for line in others:
new_code.append(line)
return new_code
class JumpOptimizer(Optimizer):
def _optimize_jump_1(self, code):
"""無条件ジャンプ後の実行されない不要な命令を削除"""
new_code = []
jump = False
for line in code:
if isinstance(line, Label):
# ラベルが出てきたら削除区間終了
jump = False
elif jump:
# 無条件ジャンプ直後の実行されないコードをスキップ
self.optimized += 1
self.logger.info('Remove: instruction after jmp')
continue
elif isinstance(line, Code):
if line.op == 'jmp':
# 無条件ジャンプ命令を検出 -> 削除区間開始
jump = True
new_code.append(line)
return new_code
def _optimize_jump_2(self, code):
"""直後のラベルへのジャンプ命令を削除"""
jump = None
jump_line = -1
delete_lines = []
for i, line in enumerate(code):
if isinstance(line, Label):
if jump is not None and line == jump.args[0]:
# 無条件ジャンプ命令直後のラベルであれば, ジャンプ命令を削除対象に追加
self.optimized += 1
delete_lines += [jump_line]
self.logger.info('Remove: unnecessary ' + jump.op)
jump = None
elif isinstance(line, Code):
if line.op in ('je', 'jmp', 'jz',):
# 無条件ジャンプ命令を検出
jump = line
jump_line = i
else:
jump = None
return [i for j, i in enumerate(code) if j not in delete_lines]
def optimize(self, code):
code = self._optimize_jump_1(code)
code = self._optimize_jump_2(code)
return code
class UnnecessaryCodeOptimizer(Optimizer):
"""不要コードを削除する"""
def _check_single_deletable(self, code):
"""コードを 1 行見て不要かどうか判定する"""
if code.op in ('add', 'sub', 'imul',):
if not isinstance(code.args[1], (int,)):
return False
elif code.op == 'add' and code.args[1] == 0:
self.logger.info('Remove: add R, 0')
return True
elif code.op == 'sub' and code.args[1] == 0:
self.logger.info('Remove: sub R, 0')
return True
elif code.op == 'imul' and code.args[1] == 1:
self.logger.info('Remove: imul R, 1')
return True
return False
def _optimize_single(self, code):
"""1行単位で行える最適化"""
# 不要な行を削除
new_code = []
for line in code:
if isinstance(line, Code) and self._check_single_deletable(line):
self.optimized += 1
continue
new_code.append(line)
return new_code
def _optimize_save_and_load(self, code):
"""メモリストア直後のロード命令の削除"""
store = None
store_line = -1
delete_lines = []
for i, line in enumerate(code):
if isinstance(line, Comment):
continue
elif isinstance(line, Code):
if line.op == 'mov':
if (isinstance(line.args[0], (Data, Memory,))
and isinstance(line.args[1], Register)
and line.args[1] == Registers.eax):
# ストア
store = line.args[0]
store_line = i
elif (isinstance(line.args[1], (Data, Memory,))
and store is not None
and isinstance(line.args[0], Register)
and line.args[0] == Registers.eax
and store == line.args[1]):
# ロード
self.optimized += 1
delete_lines += [store_line, i]
store = None
self.logger.info(
'Remove: mov (load) after mov (store)')
else:
store = None
else:
store = None
return [i for j, i in enumerate(code) if j not in delete_lines]
def _is_register_read(self, code, register=Registers.eax):
op = code.op.replace(' dword', '')
# eax の場合の例外
if register == Registers.eax:
if op == 'movzx':
if (isinstance(code.args[1], Registers)
and code.args[1] == Registers.al):
return True
if op in ('cdq', 'idiv', 'ret',):
return True
elif op in ('add', 'and', 'cmp', 'dec', 'imul', 'inc', 'neg', 'or',
'sub', 'test', 'xor',):
for arg in code.args:
if isinstance(arg, Registers) and arg == register:
return True
elif op in ('mov', 'movzx',):
if (isinstance(code.args[1], Registers)
and code.args[1] == register):
return True
return False
def _is_register_write(self, code, register=Registers.eax):
op = code.op.replace(' dword', '')
if register == Registers.eax:
# cdq, idiv は eax に結果を書き込む
if op in ('cdq', 'idiv',):
return True
elif op in ('sete', 'setg', 'setge', 'setl', 'setle', 'setne',):
if (isinstance(code.args[0], Registers)
and code.args[0] == Registers.al):
return True
if op in ('add', 'and', 'call', 'dec', 'imul', 'inc', 'neg', 'mov',
'movzx', 'or', 'pop', 'sub', 'xor',):
if (isinstance(code.args[0], Registers)
and code.args[0] == register):
return True
return False
def _optimize_unused_code(self, code):
used = False
start = -1
delete_lines = []
# 使用されないレジスタ書き込みを検出して削除する
for i, line in enumerate(code):
if isinstance(line, Label):
start = -1
elif isinstance(line, Code):
if self._is_register_read(line):
start = -1
elif self._is_register_write(line):
if start > 0:
self.optimized += 1
delete_lines.append(start)
start = -1
self.logger.info('Remove: unused mov (store)')
start = i
return [i for j, i in enumerate(code) if j not in delete_lines]
def optimize(self, code):
code = self._optimize_single(code)
code = self._optimize_save_and_load(code)
code = self._optimize_unused_code(code)
return code
class ReplaceCodeOptimizer(Optimizer):
"""より効率の良いコードに書き換えることによる最適化"""
def optimize(self, code):
for i, line in enumerate(code[:]):
if isinstance(line, Code):
if (line.op == 'mov'
and isinstance(line.args[0], Registers)
and isinstance(line.args[1], (int, str,))
and int(line.args[1]) == 0):
# mov を xor に置換する
code[i].op = 'xor'
code[i].args[1] = code[i].args[0]
code[i].comment += ' (Optimized mov -> xor)'
self.optimized += 1
self.logger.info('Replace: mov R, 0 -> xor R, R')
elif (line.op == 'imul'
and isinstance(line.args[1], (int, str,))
and int(line.args[1]) == 0):
# 0 乗算 を mov に置換する
code[i].op = 'mov'
code[i].args[1] = 0
code[i].comment += ' (Optimized imul -> mov)'
self.optimized += 1
self.logger.info('Replace: imul R, 0 -> mov R, 0')
elif line.op == 'inc':
# inc -> add
code[i].op = 'add'
code[i].args.append(1)
code[i].comment += ' (Optimized inc -> add)'
self.optimized += 1
self.logger.info('Replace: inc R -> add R, 1')
elif line.op == 'dec':
# dec -> sub
code[i].op = 'sub'
code[i].args.append(1)
code[i].comment += ' (Optimized dec -> sub)'
self.optimized += 1
self.logger.info('Replace: dec R -> sub R, 1')
return code
class StackPointerOptimzier(Optimizer):
def optimize(self, code):
flag = False
functions = []
start = -1
end = -1
offset = 0
for i, line in enumerate(code):
# 関数の開始地点を見つける
if not isinstance(line, Code):
continue
elif (line.op == 'push'
and isinstance(line.args[0], Registers)
and line.args[0] == Registers.ebp):
flag = True
start = i
elif start == i - 1:
if not (line.op == 'mov'
and line.args[0] == Registers.ebp
and line.args[1] == Registers.esp):
flag = False
elif start == i - 2:
if line.op == 'sub' and line.args[0] == Registers.esp:
offset = int(line.args[1])
else:
offset = 0
# 関数中に Push があれば不成立
elif flag and line.op == 'push':
flag = False
# 関数の終了地点を見つける
elif flag and line.op == 'pop' and line.args[0] == Registers.ebp:
end = i
functions.append((start, offset, end,))
for function in functions:
for i, line in enumerate(code[function[0]:function[2] + 1]):
if not isinstance(line, Code):
continue
for j, memory in enumerate(line.args):
if (isinstance(memory, Memory)
and memory.register == Registers.ebp):
# ebp 相対アクセスを esp 相対アクセスに書き換え
code[function[0] + i].args[j].register = Registers.esp
offset = function[1] + memory.offset - 4
code[function[0] + i].args[j].offset = offset
self.logger.info('Replace: [ebp+n] -> [esp+n]')
delete_lines = []
for function in functions:
self.optimized += 1
self.logger.info('Remove: unnecessary calling conventions')
# 関数の先頭と末尾の不要になった部分を削除
delete_lines += [function[0], function[0] + 1, function[2] - 1]
# 関数の最終行に esp を戻す処理を追加
# (offset 0 のときも最適化されるので問題ない)
code[function[2]] = Code('add', 'esp', function[1],
comment='Optimized ebp -> esp')
return [i for j, i in enumerate(code) if j not in delete_lines]
class LLVMPasses(object):
def __init__(self):
self.logger = logging.getLogger()
self.passes = (
' -targetlibinfo -no-aa -tbaa -basicaa -notti -preverify -domtree'
' -verify -globalopt -ipsccp -deadargelim -instcombine -simplifycfg'
' -basiccg -prune-eh -inline-cost -inline -functionattrs'
' -argpromotion -sroa -domtree -early-cse -simplify-libcalls'
' -lazy-value-info -jump-threading -correlated-propagation'
' -simplifycfg -instcombine -tailcallelim -simplifycfg -reassociate'
' -domtree -loops -loop-simplify -lcssa -loop-rotate -licm -lcssa'
' -loop-unswitch -instcombine -scalar-evolution -loop-simplify'
' -lcssa -indvars -loop-idiom -loop-deletion -loop-unroll -memdep'
' -gvn -memdep -memcpyopt -sccp -instcombine -lazy-value-info'
' -jump-threading -correlated-propagation -domtree -memdep -dse'
' -adce -simplifycfg -instcombine -strip-dead-prototypes -globaldce'
' -constmerge -preverify -domtree -verify').split(' -')
def optimize(self, module):
manager = passes.PassManager.new()
for ps in self.passes:
if ps.split != '':
manager.add(ps)
manager.run(module)
return module
| ymyzk/tinyc | tinyc/optimizer.py | Python | mit | 17,856 |
#!/usr/bin/env python3
# Copyright (c) 2014, Jesse Elwell
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of python-backup nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
## \file create_backup.py
#
# A script that uses `backup` to create and remove backups
from backup.BackupManager import backup_manager
from backup.BackupPrinter import backup_printer
import argparse
import configparser
import os
import sys
# Parses the command line into a dictionary. Does not include anything with a
# value of None
## Parses a command-line (or similar list) for use constructing `backup` objects
# \param args list of command-line arguments
#
# Parses a list, usually the command-line, into a dictionary that is suitable
# for use in constructing `backup` objects. This includes getting rid of any
# Nones (i.e. unspecififed settings without default values)
def parse_command_line(l):
parser = argparse.ArgumentParser(description='Creates and rotates remote backups')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Verbose output')
parser.add_argument('-n', '--dry-run', action='store_true',
help='Do not actually create backup')
parser.add_argument('-c', '--config-file', type=str, metavar='FILE',
help='Configuration file to use')
parser.add_argument('-b', '--num-backups', type=int, metavar='N',
help='Number of backups to keep')
parser.add_argument('-s', '--source-dir', type=str, dest='src',
metavar='DIR', help='Source directory (local)')
parser.add_argument('-d', '--dest-dir', type=str, metavar='DIR',
dest='dest', help='Destination directory (remote)')
parser.add_argument('-m', '--remote-machine', type=str, metavar='MACHINE',
dest='host', help='Destination host')
parser.add_argument('-u', '--user', type=str,
help='Username on destination machine')
parser.add_argument('-k', '--key', type=str, dest='ssh_key',
help='SSH key to use')
parser.add_argument('-e', '--exclude-file', type=str, metavar='FILE',
help="File to use as rsync's exclude file")
parser.add_argument('-l', '--log-excludes', action='store_true',
help='Store a log of the excluded files')
parser.add_argument('-p', '--prefix', type=str, dest='prefix',
help='String to use as prefix for backup')
args = parser.parse_args(l)
return {key: value for key, value in vars(args).items()
if value is not None}
## Parses a list of configuration files
# \param list of config files to parse
# \param `backup_printer` to use for output
#
# Parses a list of configuration files (in order). Each configuration file
# overrides settings from previously read files, so they can cascade.
def parse_config_files(files, out):
config = configparser.SafeConfigParser()
config_files = config.read(files)
settings = dict()
for s in config.sections():
for o in config.options(s):
#if config.get(s, o) is not None:
if o == 'num_backups':
try:
settings[o] = config.getint(s, o)
except ValueError:
out.error('Invalid int value specified in configuration'
' file: {0} using 1 instead\n'.format(config.get(s, o)))
settings[o] = 1
else:
settings[o] = config.get(s, o)
# Remove anything with a value of None before returning
return {k: v for k, v in settings.items() if v is not None}, config_files
## Create and rotate a backup according to settings
#
# Creates a single backup and removes oldest backups according to the settings
# specified via configuration files and command-line options. A breif outline of
# the funciton is:
# 1. Parse command-line argument(s)
# 2. Create `backup_printer` according to command-line
# 3. Read configuration file(s)
# 4. Create `backup` object according to all settings
# 5. Create backup and remove backup(s) using this object
def main():
# Read command-line arguments ----------------------------------------------
cl_settings = parse_command_line(sys.argv[1:])
# Set up object to display output based on the verbose level we got --------
s = {}
# Always show warnings on stdout
s['warn'] = sys.stdout
# If we got no -v show nothing
if cl_settings['verbose'] < 1:
s['info'] = s['debug'] = None
# if we got -v only show info messages on stdout
elif cl_settings['verbose'] < 2:
s['info'] = sys.stdout
s['debug'] = None
# if we got -vv show everything on stdout
else:
s['info'] = s['debug'] = sys.stdout
# Always show errors and fatal messages on stderr
s['error'] = sys.stderr
s['fatal'] = sys.stderr
# Add printer to cl_settings so it gets picked up by backup object
cl_settings['printer'] = backup_printer(**s)
# Remove the verbose level from the dictionary
del cl_settings['verbose']
# Or we could log the output somewhere with something like this:
#with open('./backup.log', 'w') as log:
# s = {}
# s['warn'] = s['info'] = s['debug'] = s['error'] = s['fatal'] = log
# cl_settings['printer'] = backup.backup_printer(**s)
# Read config files --------------------------------------------------------
# Default paths to read
config_files = ['/etc/backup.conf', os.path.expanduser('~/.backup.conf')]
# If a config file is specified on the command line add it to the list and
# remove it from settings
if 'config_file' in cl_settings:
config_files.append(cl_settings.pop('config_file'))
settings, cf_read = parse_config_files(config_files, cl_settings['printer'])
# Merge and output the settings and the files read to get them -------------
# Merge the command line settings with the configuration file settings
# (command-line overrides configuration values if both specified)
settings.update(cl_settings)
# List any configuration files used before checking settings so if there is
# an error the user has some recourse to find it
settings['printer'].info('Configuration file(s) read: {0}\n'.format(' '.join(cf_read)))
# Output all settings for debugging (sorted for sanity)
settings['printer'].debug('SETTINGS DUMP:\n{0}\n'.format(
'\n'.join(sorted(['{0}={1}'.format(x, settings[x]) for x in settings]))
)
)
# Do work ------------------------------------------------------------------
# Create a backup object to work with
bck = backup_manager(**settings)
if bck.dry_run:
settings['printer'].info('Performing a dry run...\n')
# Make sure we can get to host
bck.check_host()
# Check that the destination directory exists and if this isn't a dry run,
# have it created
bck.check_dest()
# Create the new backup
bck.create_backup()
# Get rid of old backups
bck.remove_backups()
if __name__ == '__main__':
main()
| jesseelwell/python-backup | create_backup.py | Python | bsd-3-clause | 8,442 |
#
# (C) Copyright 2015-2018, 2020 by Rocky Bernstein
# (C) Copyright 2000-2002 by hartmut Goebel <h.goebel@crazy-compilers.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
byte-code verification
"""
from __future__ import print_function
import operator, sys
import xdis.std as dis
from subprocess import call
import uncompyle6
from uncompyle6.scanner import Token as ScannerToken, get_scanner
from uncompyle6 import PYTHON3
from xdis import iscode, load_file, load_module, pretty_code_flags, PYTHON_MAGIC_INT
# FIXME: DRY
if PYTHON3:
truediv = operator.truediv
from functools import reduce
else:
truediv = operator.div
def code_equal(a, b):
return a.co_code == b.co_code
BIN_OP_FUNCS = {
"BINARY_POWER": operator.pow,
"BINARY_MULTIPLY": operator.mul,
"BINARY_DIVIDE": truediv,
"BINARY_FLOOR_DIVIDE": operator.floordiv,
"BINARY_TRUE_DIVIDE": operator.truediv,
"BINARY_MODULO": operator.mod,
"BINARY_ADD": operator.add,
"BINARY_SUBRACT": operator.sub,
"BINARY_LSHIFT": operator.lshift,
"BINARY_RSHIFT": operator.rshift,
"BINARY_AND": operator.and_,
"BINARY_XOR": operator.xor,
"BINARY_OR": operator.or_,
}
JUMP_OPS = None
# --- exceptions ---
class VerifyCmpError(Exception):
pass
class CmpErrorConsts(VerifyCmpError):
"""Exception to be raised when consts differ."""
def __init__(self, name, index):
self.name = name
self.index = index
def __str__(self):
return "Compare Error within Consts of %s at index %i" % (
repr(self.name),
self.index,
)
class CmpErrorConstsType(VerifyCmpError):
"""Exception to be raised when consts differ."""
def __init__(self, name, index):
self.name = name
self.index = index
def __str__(self):
return "Consts type differ in %s at index %i" % (repr(self.name), self.index)
class CmpErrorConstsLen(VerifyCmpError):
"""Exception to be raised when length of co_consts differs."""
def __init__(self, name, consts1, consts2):
self.name = name
self.consts = (consts1, consts2)
def __str__(self):
return "Consts length differs in %s:\n\n%i:\t%s\n\n%i:\t%s\n\n" % (
repr(self.name),
len(self.consts[0]),
repr(self.consts[0]),
len(self.consts[1]),
repr(self.consts[1]),
)
class CmpErrorCode(VerifyCmpError):
"""Exception to be raised when code differs."""
def __init__(self, name, index, token1, token2, tokens1, tokens2):
self.name = name
self.index = index
self.token1 = token1
self.token2 = token2
self.tokens = [tokens1, tokens2]
def __str__(self):
s = reduce(
lambda s, t: "%s%-37s\t%-37s\n" % (s, t[0], t[1]),
list(map(lambda a, b: (a, b), self.tokens[0], self.tokens[1])),
"Code differs in %s\n" % str(self.name),
)
return (
"Code differs in %s at offset %s [%s] != [%s]\n\n"
% (repr(self.name), self.index, repr(self.token1), repr(self.token2))
) + s
class CmpErrorCodeLen(VerifyCmpError):
"""Exception to be raised when code length differs."""
def __init__(self, name, tokens1, tokens2):
self.name = name
self.tokens = [tokens1, tokens2]
def __str__(self):
return reduce(
lambda s, t: "%s%-37s\t%-37s\n" % (s, t[0], t[1]),
list(map(lambda a, b: (a, b), self.tokens[0], self.tokens[1])),
"Code len differs in %s\n" % str(self.name),
)
class CmpErrorMember(VerifyCmpError):
"""Exception to be raised when other members differ."""
def __init__(self, name, member, data1, data2):
self.name = name
self.member = member
self.data = (data1, data2)
def __str__(self):
return "Member %s differs in %s:\n\t%s\n\t%s\n" % (
repr(self.member),
repr(self.name),
repr(self.data[0]),
repr(self.data[1]),
)
# --- compare ---
# these members are ignored
__IGNORE_CODE_MEMBERS__ = [
"co_filename",
"co_firstlineno",
"co_lnotab",
"co_stacksize",
"co_names",
]
def cmp_code_objects(version, is_pypy, code_obj1, code_obj2, verify, name=""):
"""
Compare two code-objects.
This is the main part of this module.
"""
# print code_obj1, type(code_obj2)
assert iscode(
code_obj1
), "cmp_code_object first object type is %s, not code" % type(code_obj1)
assert iscode(
code_obj2
), "cmp_code_object second object type is %s, not code" % type(code_obj2)
# print dir(code_obj1)
if isinstance(code_obj1, object):
# new style classes (Python 2.2)
# assume _both_ code objects to be new stle classes
assert dir(code_obj1) == dir(code_obj2)
else:
# old style classes
assert dir(code_obj1) == code_obj1.__members__
assert dir(code_obj2) == code_obj2.__members__
assert code_obj1.__members__ == code_obj2.__members__
if name == "__main__":
name = code_obj1.co_name
else:
name = "%s.%s" % (name, code_obj1.co_name)
if name == ".?":
name = "__main__"
if isinstance(code_obj1, object) and code_equal(code_obj1, code_obj2):
# use the new style code-classes' __cmp__ method, which
# should be faster and more sophisticated
# if this compare fails, we use the old routine to
# find out, what exactly is nor equal
# if this compare succeds, simply return
# return
pass
if isinstance(code_obj1, object):
members = [x for x in dir(code_obj1) if x.startswith("co_")]
else:
members = dir(code_obj1)
members.sort() # ; members.reverse()
tokens1 = None
for member in members:
if member in __IGNORE_CODE_MEMBERS__ or verify != "verify":
pass
elif member == "co_code":
if verify != "strong":
continue
scanner = get_scanner(version, is_pypy, show_asm=False)
global JUMP_OPS
JUMP_OPS = list(scan.JUMP_OPS) + ["JUMP_BACK"]
# use changed Token class
# We (re)set this here to save exception handling,
# which would get confusing.
scanner.setTokenClass(Token)
try:
# ingest both code-objects
tokens1, customize = scanner.ingest(code_obj1)
del customize # save memory
tokens2, customize = scanner.ingest(code_obj2)
del customize # save memory
finally:
scanner.resetTokenClass() # restore Token class
targets1 = dis.findlabels(code_obj1.co_code)
tokens1 = [t for t in tokens1 if t.kind != "COME_FROM"]
tokens2 = [t for t in tokens2 if t.kind != "COME_FROM"]
i1 = 0
i2 = 0
offset_map = {}
check_jumps = {}
while i1 < len(tokens1):
if i2 >= len(tokens2):
if (
len(tokens1) == len(tokens2) + 2
and tokens1[-1].kind == "RETURN_VALUE"
and tokens1[-2].kind == "LOAD_CONST"
and tokens1[-2].pattr is None
and tokens1[-3].kind == "RETURN_VALUE"
):
break
else:
raise CmpErrorCodeLen(name, tokens1, tokens2)
offset_map[tokens1[i1].offset] = tokens2[i2].offset
for idx1, idx2, offset2 in check_jumps.get(tokens1[i1].offset, []):
if offset2 != tokens2[i2].offset:
raise CmpErrorCode(
name,
tokens1[idx1].offset,
tokens1[idx1],
tokens2[idx2],
tokens1,
tokens2,
)
if tokens1[i1].kind != tokens2[i2].kind:
if tokens1[i1].kind == "LOAD_CONST" == tokens2[i2].kind:
i = 1
while tokens1[i1 + i].kind == "LOAD_CONST":
i += 1
if tokens1[i1 + i].kind.startswith(
("BUILD_TUPLE", "BUILD_LIST")
) and i == int(tokens1[i1 + i].kind.split("_")[-1]):
t = tuple([elem.pattr for elem in tokens1[i1 : i1 + i]])
if t != tokens2[i2].pattr:
raise CmpErrorCode(
name,
tokens1[i1].offset,
tokens1[i1],
tokens2[i2],
tokens1,
tokens2,
)
i1 += i + 1
i2 += 1
continue
elif (
i == 2
and tokens1[i1 + i].kind == "ROT_TWO"
and tokens2[i2 + 1].kind == "UNPACK_SEQUENCE_2"
):
i1 += 3
i2 += 2
continue
elif i == 2 and tokens1[i1 + i].kind in BIN_OP_FUNCS:
f = BIN_OP_FUNCS[tokens1[i1 + i].kind]
if (
f(tokens1[i1].pattr, tokens1[i1 + 1].pattr)
== tokens2[i2].pattr
):
i1 += 3
i2 += 1
continue
elif tokens1[i1].kind == "UNARY_NOT":
if tokens2[i2].kind == "POP_JUMP_IF_TRUE":
if tokens1[i1 + 1].kind == "POP_JUMP_IF_FALSE":
i1 += 2
i2 += 1
continue
elif tokens2[i2].kind == "POP_JUMP_IF_FALSE":
if tokens1[i1 + 1].kind == "POP_JUMP_IF_TRUE":
i1 += 2
i2 += 1
continue
elif (
tokens1[i1].kind in ("JUMP_FORWARD", "JUMP_BACK")
and tokens1[i1 - 1].kind == "RETURN_VALUE"
and tokens2[i2 - 1].kind in ("RETURN_VALUE", "RETURN_END_IF")
and int(tokens1[i1].offset) not in targets1
):
i1 += 1
continue
elif (
tokens1[i1].kind == "JUMP_BACK"
and tokens2[i2].kind == "CONTINUE"
):
# FIXME: should make sure that offset is inside loop, not outside of it
i1 += 2
i2 += 2
continue
elif (
tokens1[i1].kind == "JUMP_FORWARD"
and tokens2[i2].kind == "JUMP_BACK"
and tokens1[i1 + 1].kind == "JUMP_BACK"
and tokens2[i2 + 1].kind == "JUMP_BACK"
and int(tokens1[i1].pattr) == int(tokens1[i1].offset) + 3
):
if int(tokens1[i1].pattr) == int(tokens1[i1 + 1].offset):
i1 += 2
i2 += 2
continue
elif (
tokens1[i1].kind == "LOAD_NAME"
and tokens2[i2].kind == "LOAD_CONST"
and tokens1[i1].pattr == "None"
and tokens2[i2].pattr is None
):
pass
elif (
tokens1[i1].kind == "LOAD_GLOBAL"
and tokens2[i2].kind == "LOAD_NAME"
and tokens1[i1].pattr == tokens2[i2].pattr
):
pass
elif (
tokens1[i1].kind == "LOAD_ASSERT"
and tokens2[i2].kind == "LOAD_NAME"
and tokens1[i1].pattr == tokens2[i2].pattr
):
pass
elif (
tokens1[i1].kind == "RETURN_VALUE"
and tokens2[i2].kind == "RETURN_END_IF"
):
pass
elif (
tokens1[i1].kind == "BUILD_TUPLE_0" and tokens2[i2].pattr == ()
):
pass
else:
raise CmpErrorCode(
name,
tokens1[i1].offset,
tokens1[i1],
tokens2[i2],
tokens1,
tokens2,
)
elif (
tokens1[i1].kind in JUMP_OPS
and tokens1[i1].pattr != tokens2[i2].pattr
):
if tokens1[i1].kind == "JUMP_BACK":
dest1 = int(tokens1[i1].pattr)
dest2 = int(tokens2[i2].pattr)
if offset_map[dest1] != dest2:
raise CmpErrorCode(
name,
tokens1[i1].offset,
tokens1[i1],
tokens2[i2],
tokens1,
tokens2,
)
else:
# import pdb; pdb.set_trace()
try:
dest1 = int(tokens1[i1].pattr)
if dest1 in check_jumps:
check_jumps[dest1].append((i1, i2, dest2))
else:
check_jumps[dest1] = [(i1, i2, dest2)]
except:
pass
i1 += 1
i2 += 1
del tokens1, tokens2 # save memory
elif member == "co_consts":
# partial optimization can make the co_consts look different,
# so we'll just compare the code consts
codes1 = (c for c in code_obj1.co_consts if hasattr(c, "co_consts"))
codes2 = (c for c in code_obj2.co_consts if hasattr(c, "co_consts"))
for c1, c2 in zip(codes1, codes2):
cmp_code_objects(version, is_pypy, c1, c2, verify, name=name)
elif member == "co_flags":
flags1 = code_obj1.co_flags
flags2 = code_obj2.co_flags
if is_pypy:
# For PYPY for now we don't care about PYPY_SOURCE_IS_UTF8:
flags2 &= ~0x0100 # PYPY_SOURCE_IS_UTF8
# We also don't care about COROUTINE or GENERATOR for now
flags1 &= ~0x000000A0
flags2 &= ~0x000000A0
if flags1 != flags2:
raise CmpErrorMember(
name,
"co_flags",
pretty_code_flags(flags1),
pretty_code_flags(flags2),
)
else:
# all other members must be equal
if getattr(code_obj1, member) != getattr(code_obj2, member):
raise CmpErrorMember(
name, member, getattr(code_obj1, member), getattr(code_obj2, member)
)
class Token(ScannerToken):
"""Token class with changed semantics for 'cmp()'."""
def __cmp__(self, o):
t = self.kind # shortcut
if t == "BUILD_TUPLE_0" and o.kind == "LOAD_CONST" and o.pattr == ():
return 0
if t == "COME_FROM" == o.kind:
return 0
if t == "PRINT_ITEM_CONT" and o.kind == "PRINT_ITEM":
return 0
if t == "RETURN_VALUE" and o.kind == "RETURN_END_IF":
return 0
if t == "JUMP_IF_FALSE_OR_POP" and o.kind == "POP_JUMP_IF_FALSE":
return 0
if JUMP_OPS and t in JUMP_OPS:
# ignore offset
return t == o.kind
return (t == o.kind) or self.pattr == o.pattr
def __repr__(self):
return "%s %s (%s)" % (str(self.kind), str(self.attr), repr(self.pattr))
def __str__(self):
return "%s\t%-17s %r" % (self.offset, self.kind, self.pattr)
def compare_code_with_srcfile(pyc_filename, src_filename, verify):
"""Compare a .pyc with a source code file. If everything is okay, None
is returned. Otherwise a string message describing the mismatch is returned.
"""
(
version,
timestamp,
magic_int,
code_obj1,
is_pypy,
source_size,
sip_hash,
) = load_module(pyc_filename)
if magic_int != PYTHON_MAGIC_INT:
msg = (
"Can't compare code - Python is running with magic %s, but code is magic %s "
% (PYTHON_MAGIC_INT, magic_int)
)
return msg
try:
code_obj2 = load_file(src_filename)
except SyntaxError as e:
# src_filename can be the first of a group sometimes
return str(e).replace(src_filename, pyc_filename)
cmp_code_objects(version, is_pypy, code_obj1, code_obj2, verify)
if verify == "verify-run":
try:
retcode = call("%s %s" % (sys.executable, src_filename), shell=True)
if retcode != 0:
return "Child was terminated by signal %d" % retcode
pass
except OSError as e:
return "Execution failed: %s" % e
pass
return None
def compare_files(pyc_filename1, pyc_filename2, verify):
"""Compare two .pyc files."""
(
version1,
timestamp,
magic_int1,
code_obj1,
is_pypy,
source_size,
sip_hash,
) = uncompyle6.load_module(pyc_filename1)
(
version2,
timestamp,
magic_int2,
code_obj2,
is_pypy,
source_size,
sip_hash,
) = uncompyle6.load_module(pyc_filename2)
if (magic_int1 != magic_int2) and verify == "verify":
verify = "weak_verify"
cmp_code_objects(version1, is_pypy, code_obj1, code_obj2, verify)
if __name__ == "__main__":
t1 = Token("LOAD_CONST", None, "code_object _expandLang", 52)
t2 = Token("LOAD_CONST", -421, "code_object _expandLang", 55)
print(repr(t1))
print(repr(t2))
print(t1.kind == t2.kind, t1.attr == t2.attr)
| TeamSPoon/logicmoo_workspace | packs_web/butterfly/lib/python3.7/site-packages/uncompyle6/verify.py | Python | mit | 19,768 |
import socket
UDP_IP = raw_input('Host IP [Default: localhost]: ')
if not UDP_IP:
UDP_IP = 'localhost'
UDP_PORT = raw_input('Port Number [Default: 22000]: ')
if not UDP_PORT:
UDP_PORT = 22000
UDP_PORT = int(UDP_PORT)
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind((UDP_IP, UDP_PORT))
print "Server OnLine"
filename = ""
receiving_file = 0
received = 0
Exit = 1
while Exit:
#data, addr = sock.recv(1024) # buffer size is 1024 bytes
data, addr = sock.recvfrom(2048) # buffer size is 1024 bytes
#print '[IP: %s, Port: %s] Received message: %s' %(addr[0],addr[1],data)
#print data
#print received
if (data == "123FILE321"):
#filename, addr = sock.recvfrom(2048)
#data, addr = sock.recvfrom(2048)
msg, addr = sock.recvfrom(2048)
msg = msg.split("/")
filename = msg[-1]
print "RECEBENDO ARQUIVO DE: ", addr
print "Nome do Arquivo: ", filename
f = open(filename,'w')
receiving_file = 1
elif (data == "123CLOSE321"):
print "\nArquivo recebido: ", filename
print "Verifique o arquivo na pasta onde este script se encontra."
receiving_file = 0
f.close()
#Exit = 0
elif(receiving_file == 1):
data=bytearray(data)
f.write(data) # python will convert \n to os.linesep
else:
data=bytearray(data)
print (data)
print "Fechando tudo.."
sock.close()
| lucaspcamargo/ufsc-siscom-sender | siscom-sender/server/UDPServer_for_SiscomSender.py | Python | gpl-3.0 | 1,546 |
##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""SQL Script interfaces
$Id: interfaces.py 67630 2006-04-27 00:54:03Z jim $
"""
import zope.schema
from zope.rdb.interfaces import ISQLCommand
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('zope')
class MissingInput(Exception):
pass
class ISQLScript(ISQLCommand):
"""A persistent script that can execute SQL."""
connectionName = zope.schema.Choice(
title=_(u"Connection Name"),
description=_(u"The Connection Name for the connection to be used."),
vocabulary="Connection Names",
required=False)
arguments = zope.schema.BytesLine(
title=_(u"Arguments"),
description=_(
u"A set of attributes that can be used during the SQL command "
u"rendering process to provide dynamic data."),
required=False,
default='',
missing_value='')
source = zope.schema.ASCII(
title=_(u"Source"),
description=_(u"The SQL command to be run."),
required=False,
default='',
missing_value='')
def getArguments():
"""Returns a set of arguments. Note that this is not a string!"""
def getTemplate():
"""Get the SQL DTML Template object."""
| Donkyhotay/MoonPy | zope/app/sqlscript/interfaces.py | Python | gpl-3.0 | 1,852 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
help.py
---------------------
Date : March 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'March 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import codecs
from processing.core.Processing import Processing
from processing.core.parameters import ParameterMultipleInput, ParameterTableField, ParameterVector, ParameterSelection
from processing.tools.system import mkdir
def baseHelpForAlgorithm(alg, folder):
baseDir = os.path.join(folder, alg.provider.getName().lower())
mkdir(baseDir)
groupName = alg.group.lower()
groupName = groupName.replace('[', '').replace(']', '').replace(' - ', '_')
groupName = groupName.replace(' ', '_')
cmdLineName = alg.commandLineName()
algName = cmdLineName[cmdLineName.find(':') + 1:].lower()
validChars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
safeGroupName = ''.join(c for c in groupName if c in validChars)
safeAlgName = ''.join(c for c in algName if c in validChars)
dirName = os.path.join(baseDir, safeGroupName)
mkdir(dirName)
filePath = os.path.join(dirName, safeAlgName + '.rst')
with codecs.open(filePath, 'w', encoding='utf-8') as f:
f.write('{}\n'.format(alg.name))
f.write('{}\n\n'.format('=' * len(alg.name)))
f.write('Description\n')
f.write('-----------\n\n<put algortithm description here>\n\n')
# Algorithm parameters
f.write('Parameters\n')
f.write('----------\n\n')
for p in alg.parameters:
if isinstance(p, (ParameterMultipleInput, ParameterTableField, ParameterVector)):
f.write('``{}`` [{}: {}]\n'.format(p.description, p.typeName(), p.dataType()))
else:
f.write('``{}`` [{}]\n'.format(p.description, p.typeName()))
if hasattr(p, 'optional'):
if p.optional:
f.write(' Optional.\n\n')
f.write(' <put parameter description here>\n\n')
if isinstance(p, ParameterSelection):
f.write(' Options:\n\n')
for count, opt in enumerate(p.options):
f.write(' * {} --- {}\n'.format(count, opt))
f.write('\n')
if hasattr(p, 'default'):
f.write(' Default: *{}*\n\n'.format(p.default if p.default != '' else '(not set)'))
# Algorithm outputs
f.write('Outputs\n')
f.write('-------\n\n')
for o in alg.outputs:
f.write('``{}`` [{}]\n'.format(o.description, o.typeName()))
f.write(' <put output description here>\n\n')
# Console usage
f.write('Console usage\n')
f.write('-------------\n')
f.write('\n::\n\n')
cmd = " processing.runalg('{}', ".format(alg.commandLineName())
for p in alg.parameters:
cmd += '{}, '.format(p.name.lower().strip())
for o in alg.outputs:
if not o.hidden:
cmd += '{}, '.format(o.name.lower().strip())
cmd = cmd[:-2] + ')\n\n'
f.write(cmd)
f.write('See also\n')
f.write('--------\n\n')
def createBaseHelpFiles(folder):
for provider in Processing.providers:
if 'grass' in provider.getName():
continue
for alg in provider.algs:
baseHelpForAlgorithm(alg, folder)
def createAlgorithmHelp(algName, folder):
alg = Processing.getAlgorithm(algName)
baseHelpForAlgorithm(alg, folder)
| michaelkirk/QGIS | python/plugins/processing/tools/help.py | Python | gpl-2.0 | 4,423 |
import flask
import unittest
from superdesk.io.subjectcodes import init_app
class SubjectsTestCase(unittest.TestCase):
def test_app_subjects(self):
app = flask.Flask(__name__)
app.api_prefix = "/test"
init_app(app)
with app.app_context():
app.subjects.register({"01000000": "Foo"})
self.assertEqual("Foo", app.subjects["01000000"])
app.subjects.clear()
with self.assertRaises(KeyError):
app.subjects["01000000"]
| superdesk/superdesk-core | tests/subjectcodes_test.py | Python | agpl-3.0 | 516 |
__author__ = 'michael'
from django import forms
import models
from unobase import models as unobase_models
class BulkSelectedMixin(forms.Form):
"""
Mixin form used for bulk actions, used to determine bulk selected users.
"""
selected = forms.CharField(
widget=forms.HiddenInput(),
)
def __new__(cls, *args, **kwargs):
new_class = super(BulkSelectedMixin, cls).__new__(cls, *args, **kwargs)
new_class._meta = getattr(new_class, 'Meta', None)
return new_class
def get_selected(self):
ids = self.cleaned_data['selected']
if ids:
return self._meta.model.objects.filter(id__in=ids.split(','))
return None
class BulkTagForm(BulkSelectedMixin):
"""
Form used to bulk tag objects.
"""
class Meta:
model = unobase_models.TagModel
tag_id = forms.IntegerField(required=False)
tag = forms.CharField(
required=False,
initial='Create a new tag',
)
action = forms.CharField(
required=False,
widget=forms.HiddenInput()
)
def save(self, request, *args, **kwargs):
objs = self.get_selected()
tag_title = self.cleaned_data['tag'].lower()
if self.cleaned_data['action'] == 'save':
tag, created = models.Tag.objects.get_or_create(
title=tag_title
)
old_tag = None
if self.cleaned_data['tag_id']:
old_tag = models.Tag.objects.get(pk=int(self.cleaned_data['tag_id']))
for obj in objs:
if old_tag:
obj.tags.remove(old_tag)
obj.tags.add(tag)
if self.cleaned_data['action'] == 'delete':
try:
tag = models.Tag.objects.get(title=tag_title)
except models.Tag.DoesNotExist:
return
for obj in objs:
obj.tags.remove(tag) | unomena/unobase | unobase/tagging/forms.py | Python | bsd-3-clause | 1,938 |
import html
from collections import defaultdict
from urllib.parse import urlencode, quote
from django.urls import reverse
from django.conf import settings
from django.template import Context
from django.contrib.gis.measure import D
from froide.campaign.utils import connect_foirequest
from froide.foirequest.models import FoiRequest
from ..models import InformationObject
from ..serializers import CampaignProviderItemSerializer
LIMIT = 100
def first(x):
if not x:
return
return None
class BaseProvider:
ORDER_ZOOM_LEVEL = 15
CREATE_ALLOWED = False
ORDER_BY = "-featured"
def __init__(self, campaign, **kwargs):
self.campaign = campaign
self.kwargs = kwargs
def get_by_ident(self, ident):
return InformationObject.objects.get(campaign=self.campaign, ident=ident)
def get_ident_list(self, qs):
return [i.ident for i in qs]
def get_queryset(self):
return InformationObject.objects.filter(campaign=self.campaign).select_related(
"publicbody"
)
def search(self, **filter_kwargs):
iobjs = self.get_queryset()
iobjs = self.filter(iobjs, **filter_kwargs)
iobjs = self.filter_geo(iobjs, **filter_kwargs)
iobjs = iobjs.order_by(self.ORDER_BY, "?").distinct()
iobjs.distinct()
if not filter_kwargs.get("featured") == 1:
iobjs = self.limit(iobjs)
foirequests_mapping = self.get_foirequests_mapping(iobjs)
data = [
self.get_provider_item_data(iobj, foirequests=foirequests_mapping)
for iobj in iobjs
]
serializer = CampaignProviderItemSerializer(data, many=True)
return serializer.data
def detail(self, ident):
obj = self.get_by_ident(ident)
data = self.get_provider_item_data(obj, detail=True)
serializer = CampaignProviderItemSerializer(data)
return serializer.data
def get_detail_data(self, iobj):
mapping = self.get_foirequests_mapping([iobj])
data = self.get_provider_item_data(iobj, foirequests=mapping, detail=True)
serializer = CampaignProviderItemSerializer(data)
return serializer.data
def filter(self, iobjs, **filter_kwargs):
if filter_kwargs.get("q"):
iobjs = InformationObject.objects.search(iobjs, filter_kwargs["q"])
if filter_kwargs.get("requested") is not None:
iobjs = iobjs.filter(
foirequests__isnull=not bool(filter_kwargs["requested"])
)
if filter_kwargs.get("featured") is not None:
iobjs = iobjs.filter(featured=bool(filter_kwargs["featured"]))
return iobjs
def filter_geo(
self, qs, q=None, coordinates=None, radius=None, zoom=None, **kwargs
):
if coordinates is None:
return qs
if radius is None:
radius = 1000
radius = int(radius * 0.9)
qs = (
qs.filter(geo__isnull=False)
.filter(geo__dwithin=(coordinates, radius))
.filter(geo__distance_lte=(coordinates, D(m=radius)))
)
# order_distance = zoom is None or zoom >= self.ORDER_ZOOM_LEVEL
# if not q and order_distance:
# qs = (
# qs.annotate(distance=Distance("geo", coordinates))
# .order_by("distance")
# )
# else:
# qs = qs.order_by('?')
return qs
def limit(self, qs):
return qs[: self.kwargs.get("limit", LIMIT)]
def get_provider_item_data(self, obj, foirequests=None, detail=False):
data = {
"id": obj.id,
"ident": obj.ident,
"title": obj.title,
"subtitle": obj.subtitle,
"address": obj.address,
"request_url": self.get_request_url_redirect(obj.ident),
"publicbody_name": self.get_publicbody_name(obj),
"description": obj.get_description(),
"lat": obj.get_latitude(),
"lng": obj.get_longitude(),
"foirequest": None,
"foirequests": [],
"resolution": "normal",
"context": obj.context,
# obj.categories + translations prefetched
"categories": [
{"id": c.id, "title": c.title} for c in obj.categories.all()
],
"featured": obj.featured,
}
if foirequests and foirequests[obj.ident]:
fr, res, public = self._get_foirequest_info(foirequests[obj.ident])
data.update(
{
"foirequest": fr,
"foirequests": foirequests[obj.ident],
"resolution": res,
"public": public,
}
)
return data
def _get_foirequest_info(self, frs):
fr_id, res = frs[0].get("id"), frs[0].get("resolution")
public = frs[0].get("public", False)
success_strings = ["successful", "partially_successful"]
withdrawn_strings = ["user_withdrew_costs", "user_withdrew"]
resolution = "pending"
if res:
if res in success_strings:
resolution = "successful"
if res == "refused":
resolution = "refused"
if res in withdrawn_strings:
resolution = "user_withdrew"
return fr_id, resolution, public
def get_foirequests_mapping(self, qs):
ident_list = self.get_ident_list(qs)
iobjs = InformationObject.objects.filter(ident__in=ident_list)
mapping = defaultdict(list)
iterable = (
InformationObject.foirequests.through.objects.filter(
informationobject__in=iobjs
)
.order_by("-foirequest__first_message")
.values_list(
"informationobject__ident",
"foirequest_id",
"foirequest__resolution",
"foirequest__visibility",
)
)
for iobj_ident, fr_id, resolution, visibility in iterable:
mapping[iobj_ident].append(
{
"id": fr_id,
"resolution": resolution,
"public": visibility == FoiRequest.VISIBILITY.VISIBLE_TO_PUBLIC,
}
)
return mapping
def get_publicbody_name(self, obj):
if obj.publicbody is None:
return ""
return obj.publicbody.name
def get_publicbody(self, ident):
obj = self.get_by_ident(ident)
return self._get_publicbody(obj)
def get_publicbodies(self, ident):
pb = self.get_publicbody(ident)
if pb:
return [pb]
return []
def _get_publicbody(self, obj):
return obj.publicbody
def get_request_url_redirect(self, ident):
return reverse(
"campaign-redirect_to_make_request",
kwargs={"campaign_id": self.campaign.id, "ident": ident},
)
def get_request_url(self, ident, language=None):
obj = self.get_by_ident(ident)
return self.get_request_url_with_object(ident, obj, language=language)
def get_request_url_context(self, obj, language=None):
return obj.get_context(language)
def get_request_url_with_object(self, ident, obj, language=None):
context = self.get_request_url_context(obj, language)
publicbody = self._get_publicbody(obj)
return self.make_request_url(ident, context, publicbody)
def make_request_url(self, ident, context, publicbody=None):
if publicbody is not None:
pb_slug = publicbody.slug
url = reverse(
"foirequest-make_request", kwargs={"publicbody_slug": pb_slug}
)
else:
url = reverse("foirequest-make_request")
context = Context(context)
subject = html.unescape(self.campaign.get_subject_template().render(context))
if len(subject) > 250:
subject = subject[:250] + "..."
body = html.unescape(self.campaign.get_template().render(context)).encode(
"utf-8"
)
ref = ("campaign:%s@%s" % (self.campaign.pk, ident)).encode("utf-8")
query = {"subject": subject.encode("utf-8"), "body": body, "ref": ref}
if self.kwargs.get("lawType"):
query["law_type"] = self.kwargs["lawType"].encode()
if self.kwargs.get("redirect_url"):
query["redirect_url"] = self.kwargs["redirect_url"].encode()
hide_features = ["public", "full_text", "similar", "draft", "editing"]
if publicbody is not None:
hide_features.append("publicbody")
hide_features = [
"hide_{}".format(x)
for x in hide_features
if not self.kwargs.get("show_{}".format(x))
]
query.update({f: b"1" for f in hide_features})
query = urlencode(query, quote_via=quote)
return "%s%s?%s" % (settings.SITE_URL, url, query)
def get_user_request_count(self, user):
if not user.is_authenticated:
return 0
return InformationObject.objects.filter(
campaign=self.campaign, foirequests__user=user
).count()
def connect_request(self, ident, sender):
try:
iobj = self.get_by_ident(ident)
except InformationObject.DoesNotExist:
return
if iobj.publicbody != sender.public_body:
return
if iobj.foirequest is None:
iobj.foirequest = sender
iobj.foirequests.add(sender)
iobj.save()
connect_foirequest(sender, self.campaign.slug)
return iobj
| okfde/froide-campaign | froide_campaign/providers/base.py | Python | mit | 9,736 |
"""
@package mi.instrument.seabird.sbe16plus_v2.dosta.driver
@file mi/instrument/seabird/sbe16plus_v2/dosta/driver.py
@author Dan Mergens
@brief Driver class for dissolved oxygen sensor for the sbe16plus V2 CTD instrument.
"""
import re
from mi.core.common import BaseEnum
from mi.core.log import get_logger
from mi.core.util import hex2value
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.data_particle import CommonDataParticleType, DataParticle, DataParticleKey
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.instrument_driver import DriverAsyncEvent, DriverEvent, DriverProtocolState
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol
from mi.core.exceptions import SampleException
from mi.instrument.seabird.sbe16plus_v2.driver import Prompt, NEWLINE
__author__ = 'Dan Mergens'
__license__ = 'Apache 2.0'
log = get_logger()
########################################
# Finite State Machine Configuration
# - bare-bones, allowing discovery only
########################################
class ProtocolState(BaseEnum):
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
class ProtocolEvent(BaseEnum):
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
GET = DriverEvent.GET
DISCOVER = DriverEvent.DISCOVER
class Capability(BaseEnum):
GET = ProtocolEvent.GET
DISCOVER = ProtocolEvent.DISCOVER
########################################
# Particle Definitions
########################################
class DataParticleType(BaseEnum):
RAW = CommonDataParticleType.RAW
DO_SAMPLE = 'do_stable_sample'
class DoSampleParticleKey(BaseEnum):
OXYGEN = "oxygen"
OXY_CALPHASE = "oxy_calphase"
OXY_TEMP = "oxy_temp"
EXT_VOLT0 = "ext_volt0"
class DoSampleParticle(DataParticle):
"""
Class for handling the DO stable sample coming from CTDBP-N/O, CTDPF-A/B or CTDPF-SBE43.
Sample:
04570F0A1E910828FC47BC59F199952C64C9 - CTDBP-NO, CTDPF-AB
04570F0A1E910828FC47BC59F1 - CTDPF-SBE43
Format:
ttttttccccccppppppTTTTvvvvwwwwoooooo
ttttttccccccppppppTTTTvvvv
Temperature = tttttt
Conductivity = cccccc
quartz pressure = pppppp
quartz pressure temperature compensation = TTTT
First external voltage = vvvv (ext_volt0 or oxy_calphase)
Second external voltage = wwww (oxy_temp)
Oxygen = oooooo (oxygen)
"""
_data_particle_type = DataParticleType.DO_SAMPLE
@staticmethod
def regex():
"""
This driver should only be used for instruments known to be
configured with an optode, so it may be unnecessary to allow
for missing optode records.
"""
pattern = r'#? *' # pattern may or may not start with a '
pattern += r'([0-9A-F]{22})' # temp, cond, pres, pres temp
pattern += r'(?P<optode>[0-9A-F]{0,14})' # volt0, volt1, oxygen
pattern += NEWLINE
return pattern
@staticmethod
def regex_compiled():
return re.compile(DoSampleParticle.regex())
def _build_parsed_values(self):
"""
Convert the instrument sample into a data particle.
:return: data particle as a dictionary
"""
match = DoSampleParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("No regex match of parsed sample data: [%s]" % self.raw_data)
optode = match.group('optode')
result = []
if len(optode) == 4: # SBE43 with attached optode (only has one optode value)
volt0 = hex2value(optode) # PD1377
result = [{DataParticleKey.VALUE_ID: DoSampleParticleKey.EXT_VOLT0,
DataParticleKey.VALUE: volt0},
]
elif len(optode) == 14: # CTDBP-NO with attached optode - e.g. '59F199952C64C9'
oxy_calphase = hex2value(optode[:4]) # 59F1 - PD835
oxy_temp = hex2value(optode[4:8]) # 9995 - PD197
oxygen = hex2value(optode[8:]) # 2C64C9 - PD386
result = [{DataParticleKey.VALUE_ID: DoSampleParticleKey.OXY_CALPHASE,
DataParticleKey.VALUE: oxy_calphase},
{DataParticleKey.VALUE_ID: DoSampleParticleKey.OXY_TEMP,
DataParticleKey.VALUE: oxy_temp},
{DataParticleKey.VALUE_ID: DoSampleParticleKey.OXYGEN,
DataParticleKey.VALUE: oxygen}
]
else:
log.warning('Expected optode data missing from CTD record')
return result
###############################################################################
# Seabird Electronics 16plus V2 NO Attached DOSTA Driver.
###############################################################################
class InstrumentDriver(SingleConnectionInstrumentDriver):
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(Prompt, NEWLINE, self._driver_event)
####################################################################################
# Command Protocols - read-only, the attached CTD is used to control the instrument
####################################################################################
class Protocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class for SBE16 DOSTA driver.
"""
particles = [
DoSampleParticle,
]
def __init__(self, prompts, newline, driver_event):
"""
@param prompts A BaseEnum class containing instrument prompts.
@param newline The SBE16 newline.
@param driver_event Driver process event callback.
"""
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# This driver does not process commands, the finite state machine and handlers are stubs
self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent,
ProtocolEvent.ENTER, ProtocolEvent.EXIT)
handlers = {
ProtocolState.UNKNOWN: {
(ProtocolEvent.ENTER, self._handler_state_change()),
(ProtocolEvent.EXIT, self._handler_pass_through()),
(ProtocolEvent.DISCOVER, self._handler_unknown_discover()),
},
ProtocolState.COMMAND: {
(ProtocolEvent.ENTER, self._handler_state_change()),
(ProtocolEvent.EXIT, self._handler_pass_through()),
(ProtocolEvent.GET, self._handler_pass_through()),
},
}
for state in handlers:
for event, handler in handlers[state]:
self._protocol_fsm.add_handler(state, event, handler)
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
self._protocol_fsm.start(ProtocolState.UNKNOWN)
self._chunker = StringChunker(self.sieve_function)
@staticmethod
def sieve_function(raw_data):
""" The method that splits samples
Over-ride sieve function to handle additional particles.
"""
matchers = []
return_list = []
matchers.append(DoSampleParticle.regex_compiled())
for matcher in matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _build_command_dict(self):
self._cmd_dict.add(Capability.DISCOVER, display_name='Discover', timeout=1)
def _build_param_dict(self):
pass
# self._param_dict.add(Parameter.OPTODE,
# r'OPTODE>(.*)</OPTODE',
# lambda match: True if match.group(1) == 'yes' else False,
# self._true_false_to_string,
# type=ParameterDictType.BOOL,
# display_name="Optode Attached",
# description="Enable optode: (true | false)",
# range={'True': True, 'False': False},
# startup_param=True,
# direct_access=True,
# default_value=True,
# visibility=ParameterDictVisibility.IMMUTABLE)
# self._param_dict.add(Parameter.VOLT1,
# r'ExtVolt1>(.*)</ExtVolt1',
# lambda match: True if match.group(1) == 'yes' else False,
# self._true_false_to_string,
# type=ParameterDictType.BOOL,
# display_name="Volt 1",
# description="Enable external voltage 1: (true | false)",
# range={'True': True, 'False': False},
# startup_param=True,
# direct_access=True,
# default_value=True,
# visibility=ParameterDictVisibility.IMMUTABLE)
def _got_chunk(self, chunk, timestamp):
"""
Over-ride sieve function to handle additional particles.
The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
"""
if self._extract_sample(DoSampleParticle, DoSampleParticle.regex_compiled(), chunk, timestamp):
self._sampling = True
return
def _build_driver_dict(self):
"""
Apparently VENDOR_SW_COMPATIBLE is required (TODO - move to the base class)
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, False)
####################
# Command Handlers
####################
def _handler_pass_through(self):
pass
def _handler_state_change(self):
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
# noinspection PyMethodMayBeStatic
def _handler_unknown_discover(self):
next_state = ProtocolState.COMMAND
return next_state, (next_state, None)
def _handler_command_enter(self):
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_get(self, *args, **kwargs):
next_state, result = self._handler_get(*args, **kwargs)
# TODO - need to find out why this doesn't match other handler return signatures:
# TODO (next_state, (next_state, result)
return next_state, result
def create_playback_protocol(callback):
return Protocol(None, None, callback)
| danmergens/mi-instrument | mi/instrument/seabird/sbe16plus_v2/dosta/driver.py | Python | bsd-2-clause | 10,867 |
#!/bin/env python2.7
"""
For each line in the Shakespeare text,
CREATE a corresponding record in the database.
Each record will include the name of the character speaking,
the (absolute) line number of the phrase and the phrase itself,
trimmed of any leading or following spaces
"""
import sqlite3 # provides python with a library for sqlite
import time
conn = sqlite3.connect("shakespeare.sqlite") # opens sqlite and a database file
myCursor = conn.cursor() # provides a connection to the database
# create the table we'll import data into
# Handling the CREATE string this way is somewhat inelegant. There are better ways to do this
# for example, use a dictionary to store the field names and types. This would allow for reuse in the "insert" segment
myCursor.execute('CREATE TABLE midsummer(line_number INTEGER PRIMARY KEY, cast_name TEXT, play_text TEXT);')
myCursor.execute('CREATE TABLE performanceStats(action TEXT,duration REAL);')
#####
# get the list of characters and place into a list
start_stopwatch = time.time()
with open('characters.txt') as play_characters:
listOfCharacters = play_characters.read().splitlines()
#####
# build the play text table
currentCharacter = "NoNameCharacter"
# Note: SQLite3 execute() prefers "?" over "%s"
sqlToDo = "INSERT INTO midsummer (cast_name,play_text) VALUES (?,?)"
with open("A_Midsummer_Nights_Dream.txt") as aLineInPlay:
for line in aLineInPlay:
if line.upper().strip() in listOfCharacters:
currentCharacter = line.upper().strip()
else:
argsToPass = currentCharacter,line
myCursor.execute(sqlToDo,argsToPass)
conn.commit()
####
# Figure how long this took
end_stopwatch = time.time()
myCursor.execute('SELECT max(line_number) FROM midsummer')
linesInPlay = myCursor.fetchall()[0][0]
stopwatch = end_stopwatch - start_stopwatch
lines_duration = stopwatch/linesInPlay
SQLToDo = 'INSERT INTO performanceStats(action,duration) VALUES ("CREATE",?)'
myCursor.execute(SQLToDo,(lines_duration,))
conn.commit()
| mnr/DatabaseClinic-SQLite | Ch05/CREATE_shakespeare.py | Python | gpl-3.0 | 2,041 |
import os
import time
import types
import sys
from types import StringType
from collections import namedtuple
from PyQt4.QtGui import *
from PyQt4.QtCore import *
class EnterTextEventHandler(QObject):
def __init__(self,control,model, button = None):
super(EnterTextEventHandler, self).__init__()
self.control = control
self.model = model
self.button = button
def eventFilter(self,object,event):
if event.type() == QEvent.FocusIn:
self.model.updateList([])
self.control.setCurrentIndex(0)
return QWidget.eventFilter(self, object, event)
class LmListModel(QAbstractListModel):
"""
@summary: List model used by Lifemapper Qt listing widgets
@note: Inherits from QtCore.QAbstractListModel
"""
# .........................................
def __init__(self, listData, parent=None, model=False, *args):
"""
@summary: Constructor for LmListModel
@param listData: List of objects to insert into list
@param parent: (optional) The parent of the LmListModel
@param model: bool, whether or not data model is for modeling layer set
@param args: Additional arguments to be passed
"""
QAbstractListModel.__init__(self, parent, *args)
self.listData = listData
self.model = model
# .........................................
def data(self, index, role):
"""
@summary: Gets data at the selected index
@param index: The index to return
@param role: The role of the item
@return: The requested item
@rtype: QtCore.QVariant
"""
if index.isValid() and (role == Qt.DisplayRole or role == Qt.EditRole):
if index.row() == 1 and self.model:
return "build new model"
else:
return str(self.listData[index.row()])
if index.isValid() and role == Qt.UserRole:
return self.listData[index.row()]
else:
return
# .........................................
def rowCount(self, parent=QModelIndex()):
"""
@summary: Returns the number of rows in the list
@param parent: (optional) The parent of the object
@return: The number of items in the list
@rtype: Integer
"""
return len(self.listData)
# .........................................
def updateList(self, newList):
"""
@summary: Updates the contents of the list
@param newList: A list of items to use for the new list
@note: The provided list will replace the old list
"""
#self.beginRemoveRows(QModelIndex(),0,len(self.listData)-1) # optional
#self.listData = [] # optional
#self.endRemoveRows() # optional
self.beginInsertRows(QModelIndex(), 0, len(newList)) #just len makes auto with setIndex work better
self.listData = newList
self.endInsertRows()
| lifemapper/LmQGIS | lifemapperTools/common/lmListModel.py | Python | gpl-2.0 | 2,928 |
# -*- coding: utf-8 -*-
"""
Proxy pattern
"""
class WorldOutside(object):
@staticmethod
def access():
print "I came out over the wall!"
class Proxy:
def __init__(self):
self.busy = 'No'
self.sales = None
self.endpoint = WorldOutside()
def access(self):
print "Proxy checking for endpoint availability"
self.endpoint.access()
print "I back in the wall!"
if __name__ == '__main__':
p = Proxy()
p.access()
| xuwei0455/design_patterns | Proxy.py | Python | mit | 494 |
import gdspy
from picwriter import toolkit as tk
import picwriter.components as pc
top = gdspy.Cell("top")
wgt = pc.WaveguideTemplate(
wg_width=0.45,
clad_width=10.0,
bend_radius=60,
resist="+",
fab="ETCH",
wg_layer=1,
wg_datatype=0,
clad_layer=2,
clad_datatype=0,
)
gc1 = pc.GratingCouplerFocusing(
wgt,
focus_distance=20.0,
width=20,
length=40,
period=1.0,
dutycycle=0.7,
port=(100, 0),
direction="WEST",
)
tk.add(top, gc1)
wg1 = pc.Waveguide([gc1.portlist["output"]["port"], (200, 0)], wgt)
tk.add(top, wg1)
mmi1 = pc.MMI1x2(
wgt, length=50, width=10, taper_width=2.0, wg_sep=3, **wg1.portlist["output"]
)
tk.add(top, mmi1)
mmi2 = pc.MMI1x2(
wgt,
length=50,
width=10,
taper_width=2.0,
wg_sep=3,
port=(1750, 0),
direction="WEST",
)
tk.add(top, mmi2)
(xtop, ytop) = mmi1.portlist["output_top"]["port"]
wg2 = pc.Waveguide(
[
(xtop, ytop),
(xtop + 100, ytop),
(xtop + 100, ytop + 200),
(xtop + 200, ytop + 200),
],
wgt,
)
tk.add(top, wg2)
sp = pc.Spiral(wgt, 800.0, 8000.0, parity=-1, **wg2.portlist["output"])
tk.add(top, sp)
(xtop_out, ytop_out) = sp.portlist["output"]["port"]
(xmmi_top, ymmi_top) = mmi2.portlist["output_bot"]["port"]
wg_spiral_out = pc.Waveguide(
[
(xtop_out, ytop_out),
(xmmi_top - 100, ytop_out),
(xmmi_top - 100, ytop_out - 200),
(xmmi_top, ytop_out - 200),
],
wgt,
)
tk.add(top, wg_spiral_out)
(xbot, ybot) = mmi1.portlist["output_bot"]["port"]
wg3 = pc.Waveguide(
[
(xbot, ybot),
(xbot + 100, ybot),
(xbot + 100, ybot - 200),
(xmmi_top - 100, ybot - 200),
(xmmi_top - 100, ybot),
(xmmi_top, ybot),
],
wgt,
)
tk.add(top, wg3)
gc2 = pc.GratingCouplerFocusing(
wgt,
focus_distance=20.0,
width=20,
length=40,
period=1.0,
dutycycle=0.7,
port=(mmi2.portlist["input"]["port"][0] + 100, mmi2.portlist["input"]["port"][1]),
direction="EAST",
)
tk.add(top, gc2)
wg_gc2 = pc.Waveguide(
[mmi2.portlist["input"]["port"], gc2.portlist["output"]["port"]], wgt
)
tk.add(top, wg_gc2)
tk.build_mask(top, wgt, final_layer=3, final_datatype=0)
gdspy.LayoutViewer(cells=top)
gdspy.write_gds("tutorial2.gds", unit=1.0e-6, precision=1.0e-9)
| DerekK88/PICwriter | docs/source/tutorial2.py | Python | mit | 2,341 |
import time
from selenium.webdriver.support.ui import Select
from draughtcraft import model
from draughtcraft.tests.selenium import TestSeleniumApp
class TestAllGrainBuilder(TestSeleniumApp):
def setUp(self):
super(TestAllGrainBuilder, self).setUp()
model.Style(
name='American IPA',
min_og=1.056,
max_og=1.075,
min_fg=1.01,
max_fg=1.018,
min_ibu=40,
max_ibu=70,
min_srm=6,
max_srm=15,
min_abv=.055,
max_abv=.075,
category_number=14,
style_letter='B'
)
model.Style(
name='Spice, Herb, or Vegetable Beer',
category_number=21,
style_letter='A'
)
model.commit()
self.get("/")
self.b.find_element_by_link_text("Create Your Own Recipe").click()
time.sleep(.1)
self.b.find_element_by_id("name").clear()
self.b.find_element_by_id("name").send_keys("Rocky Mountain River IPA")
Select(
self.b.find_element_by_id("type")
).select_by_visible_text("All Grain")
self.b.find_element_by_css_selector("button.ribbon").click()
@property
def b(self):
return self.browser
def blur(self):
self.b.find_element_by_css_selector(".logo").click()
def test_defaults(self):
self.wait.until(
lambda driver:
self.b.find_element_by_name("name").get_attribute("value") ==
"Rocky Mountain River IPA"
)
self.assertEqual(
"DraughtCraft - Rocky Mountain River IPA",
self.b.title
)
self.assertEqual(
"5",
self.b.find_element_by_name("volume").get_attribute("value")
)
assert self.b.find_element_by_css_selector('.step.mash') is not None
assert self.b.find_element_by_css_selector('.step.boil') is not None
assert self.b.find_element_by_css_selector('.step.ferment') \
is not None
def test_name_change_save(self):
self.b.find_element_by_name("name").send_keys("!")
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("name").get_attribute("value") ==
"Rocky Mountain River IPA!"
)
def test_name_change_updates_page_title(self):
self.b.find_element_by_name("name").send_keys("!")
self.blur()
assert self.b.title == 'DraughtCraft - Rocky Mountain River IPA!'
def test_style_choose(self):
self.b.find_element_by_link_text("No Style Specified").click()
self.b.find_element_by_link_text("American IPA").click()
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_css_selector(".selectBox-label").text ==
"American IPA"
)
self.b.find_element_by_link_text("American IPA").click()
self.b.find_element_by_link_text("No Style Specified").click()
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_css_selector(".selectBox-label").text ==
"No Style Specified"
)
def test_volume_change_save(self):
self.b.find_element_by_name("volume").clear()
self.b.find_element_by_name("volume").send_keys("10")
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("volume").get_attribute("value") ==
"10"
)
def test_notes_change_save(self):
self.b.find_element_by_css_selector('.notes textarea').send_keys("ABC")
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_css_selector('.notes textarea')
.get_attribute("value") == "ABC"
)
def test_remove_addition(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil', 'Ferment'):
self.b.find_element_by_link_text(step).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
label = 'Add Dry Hops...' if step == 'Ferment' else 'Add Hops...'
self.b.find_element_by_link_text(label).click()
self.b.find_element_by_link_text("Simcoe (US)").click()
time.sleep(2)
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition:not(:empty)' % step.lower()
)) == 1
self.b.find_element_by_css_selector(
'.%s .ingredient-list .addition .close a' % step.lower()
).click()
time.sleep(2)
self.b.refresh()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
def test_add_malt(self):
model.Fermentable(
name='2-Row',
type='MALT',
origin='US',
ppg=36,
lovibond=2,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil'):
self.b.find_element_by_link_text(step).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
self.b.find_element_by_link_text(
"Add Malt/Fermentables..."
).click()
self.b.find_element_by_link_text("2-Row (US)").click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition:not(:empty)' % step.lower()
)) == 1
def test_add_extract(self):
model.Fermentable(
name="Cooper's Amber LME",
type='EXTRACT',
origin='AUSTRALIAN',
ppg=36,
lovibond=13.3,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil'):
self.b.find_element_by_link_text(step).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
self.b.find_element_by_link_text("Add Malt Extract...").click()
self.b.find_element_by_link_text(
"Cooper's Amber LME (Australian)"
).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition:not(:empty)' % step.lower()
)) == 1
def test_add_hop(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil', 'Ferment'):
self.b.find_element_by_link_text(step).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
label = 'Add Dry Hops...' if step == 'Ferment' else 'Add Hops...'
self.b.find_element_by_link_text(label).click()
self.b.find_element_by_link_text("Simcoe (US)").click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition:not(:empty)' % step.lower()
)) == 1
def test_add_extra(self):
model.Extra(
name="Whirlfloc Tablet",
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil', 'Ferment'):
self.b.find_element_by_link_text(step).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
self.b.find_element_by_link_text("Add Misc...").click()
self.b.find_element_by_link_text("Whirlfloc Tablet").click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition:not(:empty)' % step.lower()
)) == 1
def test_mash_method_change(self):
Select(
self.b.find_element_by_name('mash_method')
).select_by_visible_text("Multi-Step")
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("mash_method").
get_attribute("value") == "MULTISTEP"
)
def test_mash_instructions_change(self):
self.b.find_element_by_name('mash_instructions').clear()
self.b.find_element_by_name('mash_instructions').send_keys(
'Testing 1 2 3'
)
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("mash_instructions").
get_attribute("value") == "Testing 1 2 3"
)
def test_boil_minutes(self):
self.b.find_element_by_link_text('Boil').click()
self.b.find_element_by_name('boil_minutes').clear()
self.b.find_element_by_name('boil_minutes').send_keys('90')
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("boil_minutes").
get_attribute("value") == "90"
)
def test_fermentation_schedule_change(self):
self.b.find_element_by_link_text('Ferment').click()
self.b.find_element_by_link_text("Add...").click()
self.b.find_element_by_link_text("Add...").click()
days = self.b.find_elements_by_css_selector('.process select.days')
temps = self.b.find_elements_by_css_selector(
'.process select.fahrenheit'
)
assert len(days) == 3
assert len(temps) == 3
for i, el in enumerate(days):
Select(el).select_by_visible_text(str(14 + (7 * i)))
for j, el in enumerate(temps):
Select(el).select_by_visible_text(str(68 + (2 * j)))
self.blur()
time.sleep(2)
self.b.refresh()
time.sleep(1)
days = self.b.find_elements_by_css_selector('.process select.days')
temps = self.b.find_elements_by_css_selector(
'.process select.fahrenheit'
)
assert len(days) == 3
assert len(temps) == 3
for i, d in enumerate(days):
assert d.get_attribute('value') == str(14 + (7 * i))
for j, t in enumerate(temps):
assert t.get_attribute('value') == str(68 + (2 * j))
def test_change_fermentable_amount(self):
model.Fermentable(
name='2-Row',
type='MALT',
origin='US',
ppg=36,
lovibond=2,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil'):
self.b.find_element_by_link_text(step).click()
self.b.find_element_by_link_text(
"Add Malt/Fermentables..."
).click()
self.b.find_element_by_link_text("2-Row (US)").click()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
i.clear()
i.send_keys('10 lb')
self.blur()
time.sleep(2)
self.b.refresh()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
assert i.get_attribute('value') == '10 lb'
def test_metric_entry(self):
model.Fermentable(
name='2-Row',
type='MALT',
origin='US',
ppg=36,
lovibond=2,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil'):
self.b.find_element_by_link_text(step).click()
self.b.find_element_by_link_text(
"Add Malt/Fermentables..."
).click()
self.b.find_element_by_link_text("2-Row (US)").click()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
i.clear()
i.send_keys('1 kg')
self.blur()
time.sleep(2)
self.b.refresh()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
assert i.get_attribute('value') == '2.204 lb'
def test_change_hop_form(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil', 'Ferment'):
self.b.find_element_by_link_text(step).click()
label = 'Add Dry Hops...' if step == 'Ferment' else 'Add Hops...'
self.b.find_element_by_link_text(label).click()
self.b.find_element_by_link_text("Simcoe (US)").click()
s = Select(self.b.find_element_by_css_selector(
'.%s .addition .form select' % step.lower()
))
s.select_by_visible_text('Pellet')
self.blur()
time.sleep(2)
self.b.refresh()
s = self.b.find_element_by_css_selector(
'.%s .addition .form select' % step.lower()
)
assert s.get_attribute('value') == 'PELLET'
def test_change_hop_aa(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil', 'Ferment'):
self.b.find_element_by_link_text(step).click()
label = 'Add Dry Hops...' if step == 'Ferment' else 'Add Hops...'
self.b.find_element_by_link_text(label).click()
self.b.find_element_by_link_text("Simcoe (US)").click()
i = self.b.find_element_by_css_selector(
'.%s .addition .unit input' % step.lower()
)
i.clear()
i.send_keys('12')
self.blur()
time.sleep(2)
self.b.refresh()
i = self.b.find_element_by_css_selector(
'.%s .addition .unit input' % step.lower()
)
assert i.get_attribute('value') == '12'
def test_change_hop_boil_time(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
self.b.find_element_by_link_text('Boil').click()
self.b.find_element_by_link_text('Add Hops...').click()
self.b.find_element_by_link_text("Simcoe (US)").click()
selects = self.b.find_elements_by_css_selector(
'.boil .addition .time select'
)
Select(selects[1]).select_by_visible_text('45 min')
self.blur()
time.sleep(2)
self.b.refresh()
selects = self.b.find_elements_by_css_selector(
'.boil .addition .time select'
)
assert selects[1].get_attribute('value') == '45'
def test_change_hop_first_wort(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
self.b.find_element_by_link_text('Boil').click()
self.b.find_element_by_link_text('Add Hops...').click()
self.b.find_element_by_link_text("Simcoe (US)").click()
selects = self.b.find_elements_by_css_selector(
'.boil .addition .time select'
)
Select(selects[0]).select_by_visible_text('First Wort')
assert not selects[1].is_displayed()
def test_change_hop_flameout(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
self.b.find_element_by_link_text('Boil').click()
self.b.find_element_by_link_text('Add Hops...').click()
self.b.find_element_by_link_text("Simcoe (US)").click()
selects = self.b.find_elements_by_css_selector(
'.boil .addition .time select'
)
Select(selects[0]).select_by_visible_text('Flame Out')
assert not selects[1].is_displayed()
def test_yeast_step(self):
model.Yeast(
name='Wyeast 1056 - American Ale',
type='ALE',
form='LIQUID',
attenuation=.75,
flocculation='MEDIUM/HIGH'
)
model.commit()
self.b.refresh()
self.b.find_element_by_link_text('Ferment').click()
self.b.find_element_by_link_text('Add Yeast...').click()
self.b.find_element_by_link_text('Wyeast 1056 - American Ale').click()
Select(self.b.find_element_by_css_selector(
'.ferment .addition select'
)).select_by_visible_text('Secondary')
time.sleep(2)
self.b.refresh()
assert self.b.find_element_by_css_selector(
'.ferment .addition select'
).get_attribute('value') == 'SECONDARY'
class TestExtractBuilder(TestSeleniumApp):
def setUp(self):
super(TestExtractBuilder, self).setUp()
self.get("/")
self.b.find_element_by_link_text("Create Your Own Recipe").click()
time.sleep(.1)
self.b.find_element_by_id("name").clear()
self.b.find_element_by_id("name").send_keys("Rocky Mountain River IPA")
Select(
self.b.find_element_by_id("type")
).select_by_visible_text("Extract")
self.b.find_element_by_css_selector("button.ribbon").click()
@property
def b(self):
return self.browser
def test_mash_missing(self):
assert len(
self.b.find_elements_by_css_selector('.step.boil h2 li a')
) == 2
class TestMetricBuilder(TestSeleniumApp):
def setUp(self):
super(TestMetricBuilder, self).setUp()
self.get("/")
self.b.find_element_by_link_text("Create Your Own Recipe").click()
self.b.find_element_by_link_text("Want Metric Units?").click()
time.sleep(.1)
self.b.find_element_by_id("name").clear()
self.b.find_element_by_id("name").send_keys("Rocky Mountain River IPA")
Select(
self.b.find_element_by_id("type")
).select_by_visible_text("All Grain")
self.b.find_element_by_css_selector("button.ribbon").click()
@property
def b(self):
return self.browser
def blur(self):
self.b.find_element_by_css_selector(".logo").click()
def test_defaults(self):
self.wait.until(
lambda driver:
self.b.find_element_by_name("name").get_attribute("value") ==
"Rocky Mountain River IPA"
)
self.assertEqual(
"DraughtCraft - Rocky Mountain River IPA",
self.b.title
)
self.assertEqual(
"20",
self.b.find_element_by_name("volume").get_attribute("value")
)
assert self.b.find_element_by_css_selector('.step.mash') is not None
assert self.b.find_element_by_css_selector('.step.boil') is not None
assert self.b.find_element_by_css_selector('.step.ferment') \
is not None
def test_volume_change_save(self):
self.b.find_element_by_name("volume").clear()
self.b.find_element_by_name("volume").send_keys("10")
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("volume").get_attribute("value") ==
"10"
)
def test_metric_ingredient_amount(self):
model.Fermentable(
name='2-Row',
type='MALT',
origin='US',
ppg=36,
lovibond=2,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil'):
self.b.find_element_by_link_text(step).click()
self.b.find_element_by_link_text(
"Add Malt/Fermentables..."
).click()
self.b.find_element_by_link_text("2-Row (US)").click()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
i.clear()
i.send_keys('1 kg')
self.blur()
time.sleep(2)
self.b.refresh()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
assert i.get_attribute('value') == '1 kg'
def test_fermentation_schedule_change(self):
self.b.find_element_by_link_text('Ferment').click()
self.b.find_element_by_link_text("Add...").click()
self.b.find_element_by_link_text("Add...").click()
days = self.b.find_elements_by_css_selector('.process select.days')
temps = self.b.find_elements_by_css_selector(
'.process select.fahrenheit'
)
assert len(days) == 3
assert len(temps) == 3
for i, el in enumerate(days):
Select(el).select_by_visible_text(str(14 + (7 * i)))
for j, el in enumerate(temps):
Select(el).select_by_visible_text(str(20 + (2 * j)))
self.blur()
time.sleep(2)
self.b.refresh()
time.sleep(1)
days = self.b.find_elements_by_css_selector('.process select.days')
temps = self.b.find_elements_by_css_selector(
'.process select.fahrenheit'
)
assert len(days) == 3
assert len(temps) == 3
for i, d in enumerate(days):
assert d.get_attribute('value') == str(14 + (7 * i))
for j, t in enumerate(temps):
assert t.get_attribute('value') == str(20 + (2 * j))
| ryanpetrello/draughtcraft | draughtcraft/tests/selenium/recipes/test_builder.py | Python | bsd-3-clause | 23,046 |
#!/usr/bin/env python
import elf
# Define section markers for various sections in elf
def elf_loadable_section_info(img):
elffile = elf.ElfFile.from_file(img)
# Sections markers for RW sections
rw_sections_start = 0
rw_sections_end = 0
# Section markers for RX and RO section combined
rx_sections_start = 0
rx_sections_end = 0
# Flag encoding used by elf
sh_flag_write = 1 << 0
sh_flag_load = 1 << 1
sh_flag_execute = 1 << 2
for sheader in elffile.sheaders:
x = sheader.ai
# Check for loadable sections
if x.sh_flags.get() & sh_flag_load:
start = x.sh_addr.get()
end = start + x.sh_size.get()
# RW Section
if x.sh_flags.get() & sh_flag_write:
if (rw_sections_start == 0) or (rw_sections_start > start):
rw_sections_start = start
if (rw_sections_end == 0) or (rw_sections_end < end):
rw_sections_end = end
# RX, RO Section
else:
if (rx_sections_start == 0) or (rx_sections_start > start):
rx_sections_start = start
if (rx_sections_end == 0) or (rx_sections_end < end):
rx_sections_end = end
return rw_sections_start, rw_sections_end, \
rx_sections_start, rx_sections_end
| freecores/c0or1k | tools/pyelf/elf_section_info.py | Python | gpl-3.0 | 1,362 |
#!/usr/local/bin/python2.7
##
# OOIPLACEHOLDER
#
# Copyright 2014 Raytheon Co.
##
__author__ = 'Rachel Manoni'
import os
from mi.logging import config
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.moas.gl.flord_m.flord_m_glider_driver import FlordMDriver
from mi.core.versioning import version
@version("15.6.0")
def parse(basePythonCodePath, sourceFilePath, particleDataHdlrObj):
config.add_configuration(os.path.join(basePythonCodePath, 'res', 'config', 'mi-logging.yml'))
parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'FlordTelemeteredDataParticle'
}
driver = FlordMDriver(basePythonCodePath, sourceFilePath, particleDataHdlrObj, parser_config)
return driver.process() | JeffRoy/mi-dataset | mi/dataset/driver/moas/gl/flord_m/flord_m_glider_telemetered_driver.py | Python | bsd-2-clause | 834 |
"""
Django settings for css122 project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# API Keys
GOOG_API_KEY = os.environ.get('GOOG_API_KEY')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'meethalfway',
'bootstrap3',
'bootstrapform',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'cs122.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cs122.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {'default':dj_database_url.config()}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
# Additional locations of static files
STATICFILES_DIRS = [os.path.join(PROJECT_DIR, '../meethalfway/static/')]
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
| cszc/Meet-Halfway | cs122/settings.py | Python | apache-2.0 | 3,647 |
#!/usr/bin/python
import sys
import os
import pprint
import copy
class ZoneRipper:
def __init__(self, zonename):
self.zonename = zonename
self.cfgTokens = []
self.cfgStr = ""
self.curToken = 0
self.saveToken = 0
self.zone = {}
return None
def consume(self):
_item = self.cfgTokens[self.curToken]
self.curToken+=1
return _item
def peek(self):
return self.cfgTokens[self.curToken]
def tokensAvail(self):
return self.curToken<len(self.cfgTokens)
def tokenize(self):
# Yes, this is an ugly hack of a tokenizer. Feel free to fix.
self.cfgStr = self.cfgStr.replace('}', ' } ')
self.cfgStr = self.cfgStr.replace(';', ' ; ')
self.cfgStr = self.cfgStr.replace('"', ' " ')
self.cfgTokens = self.cfgStr.split()
def uncommentAndStore(self, string):
inComment = 0
for line in string.split('\n'):
line = line.strip()
if line.count(';')>0:
self.cfgStr += line.split(';')[0].strip()+' '
continue
if line.count('#')>0:
self.cfgStr += line.split('#')[0].strip()+' '
continue
if line.count('//')>0:
self.cfgStr += line.split('//')[0].strip()+' '
continue
if line.count('/*')>0:
inComment = 1
self.cfgStr += line.split('/*')[0].strip()+' '
continue
if inComment and line.count('*/')==0:
continue
if inComment and line.count('*/')>0:
inComment = 0
self.cfgStr += line.split('*/')[1].strip()+' '
continue
self.cfgStr += line + ' '
return
def parseFile(self, filename):
self.filename = filename
if not os.path.exists(self.filename):
return
if not os.path.isfile(self.filename):
return
try:
self.fp = open(self.filename)
except:
return
self.uncommentAndStore(self.fp.read())
self.tokenize()
self.parse()
def parseString(self, string):
self.uncommentAndStore(string)
self.tokenize()
self.parse()
def parse(self):
while self.tokensAvail():
token = self.consume()
if token.startswith('$'):
self.processDirective(token)
continue
if token == '@':
itemName = self.zonename
itemType = self.consume()
elif token == 'IN':
itemName = self.zonename
itemType = token
else:
itemName = token
itemType = self.consume()
recordType = self.consume()
if recordType == 'SOA':
self.zone['soa'] = self.processSOA(itemName, itemType, recordType)
continue
elif recordType == 'A':
if not self.zone.has_key('addresses'):
self.zone['addresses'] = {}
self.zone['addresses'][itemName] = self.consume()
continue
elif recordType == 'NS':
if not self.zone.has_key('nameservers'):
self.zone['nameservers'] = {}
nstgt = self.consume()
if not self.zone['nameservers'].has_key(itemName):
self.zone['nameservers'][itemName] = []
self.zone['nameservers'][itemName].append(nstgt)
continue
elif recordType == 'MX':
if not self.zone.has_key('mailservers'):
self.zone['mailservers'] = {}
prio = self.consume()
serv = self.consume()
if not self.zone['mailservers'].has_key(itemName):
self.zone['mailservers'][itemName] = []
self.zone['mailservers'][itemName].append((serv, prio))
continue
elif recordType == 'CNAME':
if not self.zone.has_key('aliases'):
self.zone['aliases'] = {}
self.zone['aliases'][itemName] = self.consume()
continue
elif recordType == 'SRV':
prio = self.consume()
weight = self.consume()
port = self.consume()
target = self.consume()
if not self.zone.has_key('services'):
self.zone['services'] = []
self.zone['services'].append( (target, port, prio, weight) )
continue
elif recordType == 'PTR':
if not self.zone.has_key('reverses'):
self.zone['reverses'] = {}
self.zone['reverses'][itemName] = self.consume()
continue
elif recordType == 'TXT':
if not self.zone.has_key('txt'):
self.zone['txt'] = {}
if self.peek() == '"':
self.zone['txt'][itemName] = self.readQuotedField()
else:
self.zone['txt'][itemName] = self.consume()
continue
else:
print "Strange recordtype encountered: "+recordType
print "Zone being parsed: "+self.zonename
print "Next token: "+self.peek()
return
def readQuotedField(self):
val = ""
token = self.consume()
if not token == '"':
print "Parser fail - no open of quoted field for TXT record."
token = self.consume()
while token != '"':
val = val+token
if self.peek() != '"':
val = val+' '
token = self.consume()
return val
def processSOA(self, zone, entry, record):
soaRecord = {}
soaRecord['domain'] = zone
soaRecord['masterServer'] = self.consume()
soaRecord['mailbox'] = self.consume()
token = self.consume()
if token != '(':
raise Exception("Parse error - SOA record messed up")
soaRecord['serial'] = self.consume()
soaRecord['refresh'] = self.consume()
soaRecord['retry'] = self.consume()
soaRecord['expire'] = self.consume()
soaRecord['ttl'] = self.consume()
token = self.consume()
if token != ')':
raise Exception("Parse error - SOA record exit missed")
return soaRecord
def processDirective(self, directive):
if directive == '$TTL':
value = self.consume()
if not self.zone.has_key('globalvars'):
self.zone['globalvars'] = {}
self.zone['globalvars'][directive] = value
elif directive == '$INCLUDE':
subFile = self.consume()
zoneInc = ZoneRipper(self.zonename)
zoneInc.parseFile(subFile)
subZone = zoneInc.getZone()
for key in subZone.keys():
if self.zone.has_key(key):
for otherkey in zoneInc[key].keys():
self.zone[key][otherkey] = copy.deepcopy(subZone[key][otherkey])
else:
self.zone[key] = copy.deepcopy(subZone[key])
else:
print "Directive "+directive+" not understood!"
raise Exception("Parse fail - funky directive")
return
def printZone(self):
pprint.pprint(self.zone)
def getZone(self):
return copy.deepcopy(self.zone)
def runTests():
zr = ZoneRipper("railinc.com")
zr.parseFile('/home/itjxb01/named-copy/int.railinc.com')
zr.printZone()
zr2 = ZoneRipper("11.10.10.in-addr.arpa")
zr2.parseFile('/home/itjxb01/named-copy/db.10.10.11')
zr2.printZone()
if __name__ == '__main__':
runTests()
| jwbernin/dns-sanity-checker | zoneripper.py | Python | gpl-2.0 | 6,771 |
from mpi4py import MPI
# -----------------------------------------------------------------------------
import struct as _struct
try:
from numpy import empty as _empty
def _array_new(size, typecode, init=0):
a = _empty(size, typecode)
a.fill(init)
return a
def _array_set(ary, value):
ary.fill(value)
def _array_sum(ary):
return ary.sum()
except ImportError:
from array import array as _array
def _array_new(size, typecode, init=0):
return _array(typecode, [init]) * size
def _array_set(ary, value):
for i, _ in enumerate(ary):
ary[i] = value
def _array_sum(ary):
return sum(ary, 0)
# -----------------------------------------------------------------------------
class Counter(object):
def __init__(self, comm, init=0):
#
size = comm.Get_size()
rank = comm.Get_rank()
mask = 1
while mask < size:
mask <<= 1
mask >>= 1
idx = 0
get_idx = []
acc_idx = []
while mask >= 1:
left = idx + 1
right = idx + (mask<<1)
if rank < mask:
acc_idx.append( left )
get_idx.append( right )
idx = left
else:
acc_idx.append( right )
get_idx.append( left )
idx = right
rank = rank % mask
mask >>= 1
#
typecode = 'i'
datatype = MPI.INT
itemsize = datatype.Get_size()
#
root = 0
rank = comm.Get_rank()
if rank == root:
nlevels = len(get_idx) + 1
nentries = (1<<nlevels) - 1
self.mem = MPI.Alloc_mem(nentries*itemsize, MPI.INFO_NULL)
self.mem[:] = _struct.pack(typecode, init) * nentries
else:
self.mem = None
#
self.win = MPI.Win.Create(self.mem, itemsize, MPI.INFO_NULL, comm)
self.acc_type = datatype.Create_indexed_block(1, acc_idx).Commit()
self.get_type = datatype.Create_indexed_block(1, get_idx).Commit()
self.acc_buf = _array_new(len(acc_idx), typecode)
self.get_buf = _array_new(len(get_idx), typecode)
self.myval = 0
def free(self):
if self.win:
self.win.Free()
if self.mem:
MPI.Free_mem(self.mem)
self.mem = None
if self.get_type:
self.get_type.Free()
if self.acc_type:
self.acc_type.Free()
def next(self, increment=1):
_array_set(self.acc_buf, increment)
root = 0
self.win.Lock(MPI.LOCK_EXCLUSIVE, root, 0)
self.win.Get(self.get_buf, root, [0, 1, self.get_type])
self.win.Accumulate(self.acc_buf, root, [0, 1, self.acc_type], MPI.SUM)
self.win.Unlock(root)
nxtval = self.myval + _array_sum(self.get_buf)
self.myval += increment
return nxtval
# -----------------------------------------------------------------------------
class Mutex(object):
def __init__(self, comm):
self.counter = Counter(comm)
def __enter__(self):
self.lock()
return self
def __exit__(self, *exc):
self.unlock()
return None
def free(self):
self.counter.free()
def lock(self):
value = self.counter.next(+1)
while value != 0:
value = self.counter.next(-1)
value = self.counter.next(+1)
def unlock(self):
self.counter.next(-1)
# -----------------------------------------------------------------------------
def test_counter():
vals = []
counter = Counter(MPI.COMM_WORLD)
for i in range(5):
c = counter.next()
vals.append(c)
counter.free()
vals = MPI.COMM_WORLD.allreduce(vals)
assert sorted(vals) == list(range(len(vals)))
def test_mutex():
mutex = Mutex(MPI.COMM_WORLD)
mutex.lock()
mutex.unlock()
mutex.free()
if __name__ == '__main__':
test_counter()
test_mutex()
# -----------------------------------------------------------------------------
| capoe/espressopp.soap | contrib/mpi4py/mpi4py-1.3/demo/nxtval/nxtval-scalable.py | Python | gpl-3.0 | 4,142 |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""Contains classes and functions that a SAML2.0 Service Provider (SP) may use
to conclude its tasks.
"""
from saml2.request import LogoutRequest
import saml2
from saml2 import saml, SAMLError
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_SOAP
from saml2.ident import decode, code
from saml2.httpbase import HTTPError
from saml2.s_utils import sid
from saml2.s_utils import status_message_factory
from saml2.s_utils import success_status_factory
from saml2.samlp import STATUS_REQUEST_DENIED
from saml2.samlp import STATUS_UNKNOWN_PRINCIPAL
from saml2.time_util import not_on_or_after
from saml2.saml import AssertionIDRef
from saml2.saml import NAMEID_FORMAT_PERSISTENT
from saml2.client_base import Base
from saml2.client_base import LogoutError
from saml2.client_base import NoServiceDefined
from saml2.mdstore import destinations
try:
from urlparse import parse_qs
except ImportError:
# Compatibility with Python <= 2.5
from cgi import parse_qs
import logging
logger = logging.getLogger(__name__)
class Saml2Client(Base):
""" The basic pySAML2 service provider class """
def prepare_for_authenticate(self, entityid=None, relay_state="",
binding=saml2.BINDING_HTTP_REDIRECT, vorg="",
nameid_format=NAMEID_FORMAT_PERSISTENT,
scoping=None, consent=None, extensions=None,
sign=None,
response_binding=saml2.BINDING_HTTP_POST,
**kwargs):
""" Makes all necessary preparations for an authentication request.
:param entityid: The entity ID of the IdP to send the request to
:param relay_state: To where the user should be returned after
successfull log in.
:param binding: Which binding to use for sending the request
:param vorg: The entity_id of the virtual organization I'm a member of
:param scoping: For which IdPs this query are aimed.
:param consent: Whether the principal have given her consent
:param extensions: Possible extensions
:param sign: Whether the request should be signed or not.
:param response_binding: Which binding to use for receiving the response
:param kwargs: Extra key word arguments
:return: session id and AuthnRequest info
"""
destination = self._sso_location(entityid, binding)
reqid, req = self.create_authn_request(destination, vorg, scoping,
response_binding, nameid_format,
consent=consent,
extensions=extensions, sign=sign,
**kwargs)
_req_str = "%s" % req
logger.info("AuthNReq: %s" % _req_str)
info = self.apply_binding(binding, _req_str, destination, relay_state)
return reqid, info
def global_logout(self, name_id, reason="", expire=None, sign=None):
""" More or less a layer of indirection :-/
Bootstrapping the whole thing by finding all the IdPs that should
be notified.
:param name_id: The identifier of the subject that wants to be
logged out.
:param reason: Why the subject wants to log out
:param expire: The latest the log out should happen.
If this time has passed don't bother.
:param sign: Whether the request should be signed or not.
This also depends on what binding is used.
:return: Depends on which binding is used:
If the HTTP redirect binding then a HTTP redirect,
if SOAP binding has been used the just the result of that
conversation.
"""
if isinstance(name_id, basestring):
name_id = decode(name_id)
logger.info("logout request for: %s" % name_id)
# find out which IdPs/AAs I should notify
entity_ids = self.users.issuers_of_info(name_id)
return self.do_logout(name_id, entity_ids, reason, expire, sign)
def do_logout(self, name_id, entity_ids, reason, expire, sign=None,
expected_binding=None):
"""
:param name_id: Identifier of the Subject (a NameID instance)
:param entity_ids: List of entity ids for the IdPs that have provided
information concerning the subject
:param reason: The reason for doing the logout
:param expire: Try to logout before this time.
:param sign: Whether to sign the request or not
:param expected_binding: Specify the expected binding then not try it
all
:return:
"""
# check time
if not not_on_or_after(expire): # I've run out of time
# Do the local logout anyway
self.local_logout(name_id)
return 0, "504 Gateway Timeout", [], []
not_done = entity_ids[:]
responses = {}
for entity_id in entity_ids:
logger.debug("Logout from '%s'" % entity_id)
# for all where I can use the SOAP binding, do those first
for binding in [BINDING_SOAP, BINDING_HTTP_POST,
BINDING_HTTP_REDIRECT]:
if expected_binding and binding != expected_binding:
continue
try:
srvs = self.metadata.single_logout_service(entity_id,
binding,
"idpsso")
except:
srvs = None
if not srvs:
logger.debug("No SLO '%s' service" % binding)
continue
destination = destinations(srvs)[0]
logger.info("destination to provider: %s" % destination)
req_id, request = self.create_logout_request(
destination, entity_id, name_id=name_id, reason=reason,
expire=expire)
#to_sign = []
if binding.startswith("http://"):
sign = True
if sign is None:
sign = self.logout_requests_signed
if sign:
srequest = self.sign(request)
else:
srequest = "%s" % request
relay_state = self._relay_state(req_id)
http_info = self.apply_binding(binding, srequest, destination,
relay_state)
if binding == BINDING_SOAP:
response = self.send(**http_info)
if response and response.status_code == 200:
not_done.remove(entity_id)
response = response.text
logger.info("Response: %s" % response)
res = self.parse_logout_request_response(response)
responses[entity_id] = res
else:
logger.info("NOT OK response from %s" % destination)
else:
self.state[req_id] = {"entity_id": entity_id,
"operation": "SLO",
"entity_ids": entity_ids,
"name_id": code(name_id),
"reason": reason,
"not_on_of_after": expire,
"sign": sign}
responses[entity_id] = (binding, http_info)
not_done.remove(entity_id)
# only try one binding
break
if not_done:
# upstream should try later
raise LogoutError("%s" % (entity_ids,))
return responses
def local_logout(self, name_id):
""" Remove the user from the cache, equals local logout
:param name_id: The identifier of the subject
"""
self.users.remove_person(name_id)
return True
def is_logged_in(self, name_id):
""" Check if user is in the cache
:param name_id: The identifier of the subject
"""
identity = self.users.get_identity(name_id)[0]
return bool(identity)
def handle_logout_response(self, response):
""" handles a Logout response
:param response: A response.Response instance
:return: 4-tuple of (session_id of the last sent logout request,
response message, response headers and message)
"""
logger.info("state: %s" % (self.state,))
status = self.state[response.in_response_to]
logger.info("status: %s" % (status,))
issuer = response.issuer()
logger.info("issuer: %s" % issuer)
del self.state[response.in_response_to]
if status["entity_ids"] == [issuer]: # done
self.local_logout(decode(status["name_id"]))
return 0, "200 Ok", [("Content-type", "text/html")], []
else:
status["entity_ids"].remove(issuer)
return self.do_logout(decode(status["name_id"]),
status["entity_ids"],
status["reason"], status["not_on_or_after"],
status["sign"])
def _use_soap(self, destination, query_type, **kwargs):
_create_func = getattr(self, "create_%s" % query_type)
_response_func = getattr(self, "parse_%s_response" % query_type)
try:
response_args = kwargs["response_args"]
del kwargs["response_args"]
except KeyError:
response_args = None
qid, query = _create_func(destination, **kwargs)
response = self.send_using_soap(query, destination)
if response.status_code == 200:
if not response_args:
response_args = {"binding": BINDING_SOAP}
else:
response_args["binding"] = BINDING_SOAP
logger.info("Verifying response")
if response_args:
response = _response_func(response.content, **response_args)
else:
response = _response_func(response.content)
else:
raise HTTPError("%d:%s" % (response.status_code, response.error))
if response:
#not_done.remove(entity_id)
logger.info("OK response from %s" % destination)
return response
else:
logger.info("NOT OK response from %s" % destination)
return None
#noinspection PyUnusedLocal
def do_authz_decision_query(self, entity_id, action,
subject_id, nameid_format,
evidence=None, resource=None,
sp_name_qualifier=None,
name_qualifier=None,
consent=None, extensions=None, sign=False):
subject = saml.Subject(
name_id=saml.NameID(text=subject_id, format=nameid_format,
sp_name_qualifier=sp_name_qualifier,
name_qualifier=name_qualifier))
srvs = self.metadata.authz_service(entity_id, BINDING_SOAP)
for dest in destinations(srvs):
resp = self._use_soap(dest, "authz_decision_query",
action=action, evidence=evidence,
resource=resource, subject=subject)
if resp:
return resp
return None
def do_assertion_id_request(self, assertion_ids, entity_id,
consent=None, extensions=None, sign=False):
srvs = self.metadata.assertion_id_request_service(entity_id,
BINDING_SOAP)
if not srvs:
raise NoServiceDefined("%s: %s" % (entity_id,
"assertion_id_request_service"))
if isinstance(assertion_ids, basestring):
assertion_ids = [assertion_ids]
_id_refs = [AssertionIDRef(_id) for _id in assertion_ids]
for destination in destinations(srvs):
res = self._use_soap(destination, "assertion_id_request",
assertion_id_refs=_id_refs, consent=consent,
extensions=extensions, sign=sign)
if res:
return res
return None
def do_authn_query(self, entity_id,
consent=None, extensions=None, sign=False):
srvs = self.metadata.authn_request_service(entity_id, BINDING_SOAP)
for destination in destinations(srvs):
resp = self._use_soap(destination, "authn_query", consent=consent,
extensions=extensions, sign=sign)
if resp:
return resp
return None
def do_attribute_query(self, entityid, subject_id,
attribute=None, sp_name_qualifier=None,
name_qualifier=None, nameid_format=None,
real_id=None, consent=None, extensions=None,
sign=False, binding=BINDING_SOAP):
""" Does a attribute request to an attribute authority, this is
by default done over SOAP.
:param entityid: To whom the query should be sent
:param subject_id: The identifier of the subject
:param attribute: A dictionary of attributes and values that is
asked for
:param sp_name_qualifier: The unique identifier of the
service provider or affiliation of providers for whom the
identifier was generated.
:param name_qualifier: The unique identifier of the identity
provider that generated the identifier.
:param nameid_format: The format of the name ID
:param real_id: The identifier which is the key to this entity in the
identity database
:param binding: Which binding to use
:return: The attributes returned if BINDING_SOAP was used.
HTTP args if BINDING_HTT_POST was used.
"""
if real_id:
response_args = {"real_id": real_id}
else:
response_args = {}
if not binding:
binding, destination = self.pick_binding("attribute_service",
None,
"attribute_authority",
entity_id=entityid)
else:
srvs = self.metadata.attribute_service(entityid, binding)
if srvs is []:
raise SAMLError("No attribute service support at entity")
destination = destinations(srvs)[0]
if binding == BINDING_SOAP:
return self._use_soap(destination, "attribute_query",
consent=consent, extensions=extensions,
sign=sign, subject_id=subject_id,
attribute=attribute,
sp_name_qualifier=sp_name_qualifier,
name_qualifier=name_qualifier,
nameid_format=nameid_format,
response_args=response_args)
elif binding == BINDING_HTTP_POST:
mid = sid()
query = self.create_attribute_query(destination, subject_id,
attribute, mid, consent,
extensions, sign)
self.state[query.id] = {"entity_id": entityid,
"operation": "AttributeQuery",
"subject_id": subject_id,
"sign": sign}
relay_state = self._relay_state(query.id)
return self.apply_binding(binding, "%s" % query, destination,
relay_state)
else:
raise SAMLError("Unsupported binding")
def handle_logout_request(self, request, name_id, binding, sign=False,
relay_state=""):
"""
Deal with a LogoutRequest
:param request: The request as text string
:param name_id: The id of the current user
:param binding: Which binding the message came in over
:param sign: Whether the response will be signed or not
:return: Keyword arguments which can be used to send the response
what's returned follow different patterns for different bindings.
If the binding is BINDIND_SOAP, what is returned looks like this::
{
"data": <the SOAP enveloped response>
"url": "",
'headers': [('content-type', 'application/soap+xml')]
'method': "POST
}
"""
logger.info("logout request: %s" % request)
_req = self._parse_request(request, LogoutRequest,
"single_logout_service", binding)
if _req.message.name_id == name_id:
try:
if self.local_logout(name_id):
status = success_status_factory()
else:
status = status_message_factory("Server error",
STATUS_REQUEST_DENIED)
except KeyError:
status = status_message_factory("Server error",
STATUS_REQUEST_DENIED)
else:
status = status_message_factory("Wrong user",
STATUS_UNKNOWN_PRINCIPAL)
if binding == BINDING_SOAP:
response_bindings = [BINDING_SOAP]
elif binding == BINDING_HTTP_POST or BINDING_HTTP_REDIRECT:
response_bindings = [BINDING_HTTP_POST, BINDING_HTTP_REDIRECT]
else:
response_bindings = self.config.preferred_binding[
"single_logout_service"]
response = self.create_logout_response(_req.message, response_bindings,
status, sign)
rinfo = self.response_args(_req.message, response_bindings)
return self.apply_binding(rinfo["binding"], response,
rinfo["destination"], relay_state,
response=True)
| tpazderka/pysaml2 | src/saml2/client.py | Python | bsd-2-clause | 18,978 |
#
# Copyright (C) 2004 SIPfoundry Inc.
# Licensed by SIPfoundry under the GPL license.
#
# Copyright (C) 2004 SIP Forum
# Licensed to SIPfoundry under a Contributor Agreement.
#
#
# This file is part of SIP Forum User Agent Basic Test Suite which
# belongs to the SIP Forum Test Framework.
#
# SIP Forum User Agent Basic Test Suite is free software; you can
# redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# SIP Forum User Agent Basic Test Suite is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SIP Forum User Agent Basic Test Suite; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
# $Id: case507.py,v 1.2 2004/05/02 18:57:36 lando Exp $
#
from TestCase import TestCase
import NetworkEventHandler as NEH
import Log
class case507 (TestCase):
def config(self):
self.name = "Case 507"
self.description = "Correct Record-Routing parameter returning"
self.isClient = True
self.transport = "UDP"
self.interactRequired = True
def run(self):
self.neh = NEH.NetworkEventHandler(self.transport)
self.rrrepl = None
inv = self.createRequest("INVITE")
via = inv.getParsedHeaderValue("Via")
rr = self.getParsedHeaderInstance("Record-Route")
rr.uri.protocol = "sip"
rr.uri.host = via.host
rr.uri.port = via.port
rr.uri.params.append("lr")
rr.looseRouter = True
rr.uri.params.append("unknownParameter=unknownValue")
rr.uri.params.append(" unknownParameterWithLeadingLWS= unknowValueWithLeadingLWS")
rr.uri.params.append("unknownParameterWithoutAValue")
rr.uri.params.append(" unknownParameterWithoutAValueAndWithLeadingLWS")
inv.setParsedHeaderValue("Record-Route", rr)
inv.setHeaderValue("Record-Route", rr.create())
inv.transaction.dialog.ignoreRoute = True
self.writeMessageToNetwork(self.neh, inv)
self.code = 0
self.byed = 0
while (self.code <= 200) and (self.byed == 0):
repl = self.readReplyFromNetwork(self.neh)
if (repl is not None) and (repl.code > self.code):
self.code = repl.code
elif repl is None:
self.code = 999
if repl is None:
self.addResult(TestCase.TC_FAILED, "missing reply on request")
elif self.rrrepl is not None:
if self.rrrepl.hasParsedHeaderField("Record-Route"):
reprr = self.rrrepl.getParsedHeaderValue("Record-Route")
if reprr == rr:
self.addResult(TestCase.TC_PASSED, "Record-Route header contains all parameters from request")
else:
self.addResult(TestCase.TC_FAILED, "Record-Route header is not equal to Record-Route from request")
else:
self.addResult(TestCase.TC_ERROR, "missing Record-Route in reply")
else:
self.addResult(TestCase.TC_ERROR, "valid reply (1xx|2xx) with Record-Route missing")
self.neh.closeSock()
def onDefaultCode(self, message):
if message.code > self.code:
self.code = message.code
if message.code >= 200:
if message.getParsedHeaderValue("CSeq").method == "INVITE":
Log.logDebug("case507: sending ACK for >= 200 reply", 3)
ack = self.createRequest("ACK", trans=message.transaction)
self.writeMessageToNetwork(self.neh, ack)
if message.code == 200:
if message.transaction.canceled:
Log.logDebug("case507: received 200 for CANCEL", 3)
elif message.getParsedHeaderValue("CSeq").method == "INVITE":
self.rrrepl = message
Log.logDebug("case507: sending BYE for accepted INVITE", 3)
bye = self.createRequest("BYE", dia=message.transaction.dialog)
self.writeMessageToNetwork(self.neh, bye)
self.byed = 1
self.code = 999
rep = self.readReplyFromNetwork(self.neh)
if rep is None:
self.addResult(TestCase.TC_ERROR, "missing response on BYE")
else:
if message.hasParsedHeaderField("Record-Route"):
self.rrrepl = message
can = self.createRequest("CANCEL", trans=message.transaction)
message.transaction.canceled = True
self.writeMessageToNetwork(self.neh, can)
canrepl = self.readReplyFromNetwork(self.neh)
if canrepl is None:
self.addResult(TestCase.TC_ERROR, "missing 200 on CANCEL")
else:
print " !!!! PLEASE ANSWER/PICKUP THE CALL !!!!"
| VoIP-co-uk/sftf | UserAgentBasicTestSuite/case507.py | Python | gpl-2.0 | 4,527 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is part of the nmeta2 suite
.
It defines a custom traffic classifier
.
To create your own custom classifier, copy this example to a new
file in the same directory and update the code as required.
Call it from nmeta by specifying the name of the file (without the
.py) in main_policy.yaml
.
Classifiers are called per packet, so performance is important
.
"""
#*** Required for payload HTTP decode:
import dpkt
class Classifier(object):
"""
A custom classifier module for import by nmeta2
"""
def __init__(self, logger):
"""
Initialise the classifier
"""
self.logger = logger
def classifier(self, flow):
"""
A really basic HTTP URI classifier to demonstrate ability
to differentiate based on a payload characteristic.
.
This method is passed a Flow class object that holds the
current context of the flow
.
It returns a dictionary specifying a key/value of QoS treatment to
take (or not if no classification determination made).
.
Only works on TCP.
"""
#*** Maximum packets to accumulate in a flow before making a
#*** classification:
_max_packets = 5
#*** URI to match:
_match_uri = '/static/index.html'
#*** QoS actions to take:
_qos_action_match = 'constrained_bw'
_qos_action_no_match = 'default_priority'
#*** Dictionary to hold classification results:
_results = {}
http = ''
if not flow.finalised:
#*** Do some classification:
self.logger.debug("Checking packet")
#*** Get the latest packet payload from the flow class:
payload = flow.payload
#*** Check if the payload is HTTP:
if len(payload) > 0:
try:
http = dpkt.http.Request(payload)
except:
#*** not HTTP so ignore...
pass
if http:
#*** Decide actions based on the URI:
if http.uri == _match_uri:
#*** Matched URI:
self.logger.debug("Matched HTTP uri=%s", http.uri)
_results['qos_treatment'] = _qos_action_match
else:
#*** Doesn't match URI:
self.logger.debug("Did not match HTTP uri=%s", http.uri)
_results['qos_treatment'] = _qos_action_no_match
self.logger.debug("Decided on results %s", _results)
else:
self.logger.debug("Not HTTP so ignoring")
if flow.packet_count >= _max_packets:
flow.finalised = 1
return _results
| mattjhayes/nmeta2dpae | nmeta2dpae/classifiers/payload_uri_1.py | Python | apache-2.0 | 3,329 |
"""
Caching framework.
This package defines set of cache backends that all conform to a simple API.
In a nutshell, a cache is a set of values -- which can be any object that
may be pickled -- identified by string keys. For the complete API, see
the abstract BaseCache class in django.core.cache.backends.base.
Client code should not access a cache backend directly; instead it should
either use the "cache" variable made available here, or it should use the
get_cache() function made available here. get_cache() takes a backend URI
(e.g. "memcached://127.0.0.1:11211/") and returns an instance of a backend
cache class.
See docs/topics/cache.txt for information on the public API.
"""
from django.conf import settings
from django.core import signals
from django.core.cache.backends.base import (
InvalidCacheBackendError, CacheKeyWarning, BaseCache)
from django.core.exceptions import ImproperlyConfigured
from django.utils import importlib
try:
# The mod_python version is more efficient, so try importing it first.
from mod_python.util import parse_qsl
except ImportError:
try:
# Python 2.6 and greater
from urlparse import parse_qsl
except ImportError:
# Python 2.5, 2.4. Works on Python 2.6 but raises
# PendingDeprecationWarning
from cgi import parse_qsl
__all__ = [
'get_cache', 'cache', 'DEFAULT_CACHE_ALIAS'
]
# Name for use in settings file --> name of module in "backends" directory.
# Any backend scheme that is not in this dictionary is treated as a Python
# import path to a custom backend.
BACKENDS = {
'memcached': 'memcached',
'locmem': 'locmem',
'file': 'filebased',
'db': 'db',
'dummy': 'dummy',
}
DEFAULT_CACHE_ALIAS = 'default'
def parse_backend_uri(backend_uri):
"""
Converts the "backend_uri" into a cache scheme ('db', 'memcached', etc), a
host and any extra params that are required for the backend. Returns a
(scheme, host, params) tuple.
"""
if backend_uri.find(':') == -1:
raise InvalidCacheBackendError("Backend URI must start with scheme://")
scheme, rest = backend_uri.split(':', 1)
if not rest.startswith('//'):
raise InvalidCacheBackendError("Backend URI must start with scheme://")
host = rest[2:]
qpos = rest.find('?')
if qpos != -1:
params = dict(parse_qsl(rest[qpos+1:]))
host = rest[2:qpos]
else:
params = {}
if host.endswith('/'):
host = host[:-1]
return scheme, host, params
if not settings.CACHES:
import warnings
warnings.warn(
"settings.CACHE_* is deprecated; use settings.CACHES instead.",
PendingDeprecationWarning
)
# Mapping for new-style cache backend api
backend_classes = {
'memcached': 'memcached.CacheClass',
'locmem': 'locmem.LocMemCache',
'file': 'filebased.FileBasedCache',
'db': 'db.DatabaseCache',
'dummy': 'dummy.DummyCache',
}
engine, host, params = parse_backend_uri(settings.CACHE_BACKEND)
if engine in backend_classes:
engine = 'django.core.cache.backends.%s' % backend_classes[engine]
defaults = {
'BACKEND': engine,
'LOCATION': host,
}
defaults.update(params)
settings.CACHES[DEFAULT_CACHE_ALIAS] = defaults
if DEFAULT_CACHE_ALIAS not in settings.CACHES:
raise ImproperlyConfigured("You must define a '%s' cache" % DEFAULT_CACHE_ALIAS)
def parse_backend_conf(backend, **kwargs):
"""
Helper function to parse the backend configuration
that doesn't use the URI notation.
"""
# Try to get the CACHES entry for the given backend name first
conf = settings.CACHES.get(backend, None)
if conf is not None:
args = conf.copy()
backend = args.pop('BACKEND')
location = args.pop('LOCATION', '')
return backend, location, args
else:
# Trying to import the given backend, in case it's a dotted path
mod_path, cls_name = backend.rsplit('.', 1)
try:
mod = importlib.import_module(mod_path)
backend_cls = getattr(mod, cls_name)
except (AttributeError, ImportError):
raise InvalidCacheBackendError("Could not find backend '%s'" % backend)
location = kwargs.pop('LOCATION', '')
return backend, location, kwargs
raise InvalidCacheBackendError(
"Couldn't find a cache backend named '%s'" % backend)
def get_cache(backend, **kwargs):
"""
Function to load a cache backend dynamically. This is flexible by design
to allow different use cases:
To load a backend with the old URI-based notation::
cache = get_cache('locmem://')
To load a backend that is pre-defined in the settings::
cache = get_cache('default')
To load a backend with its dotted import path,
including arbitrary options::
cache = get_cache('django.core.cache.backends.memcached.MemcachedCache', **{
'LOCATION': '127.0.0.1:11211', 'TIMEOUT': 30,
})
"""
try:
if '://' in backend:
# for backwards compatibility
backend, location, params = parse_backend_uri(backend)
if backend in BACKENDS:
backend = 'django.core.cache.backends.%s' % BACKENDS[backend]
params.update(kwargs)
mod = importlib.import_module(backend)
backend_cls = mod.CacheClass
else:
backend, location, params = parse_backend_conf(backend, **kwargs)
mod_path, cls_name = backend.rsplit('.', 1)
mod = importlib.import_module(mod_path)
backend_cls = getattr(mod, cls_name)
except (AttributeError, ImportError), e:
raise InvalidCacheBackendError(
"Could not find backend '%s': %s" % (backend, e))
return backend_cls(location, params)
cache = get_cache(DEFAULT_CACHE_ALIAS)
# Some caches -- python-memcached in particular -- need to do a cleanup at the
# end of a request cycle. If the cache provides a close() method, wire it up
# here.
if hasattr(cache, 'close'):
signals.request_finished.connect(cache.close)
| heracek/django-nonrel | django/core/cache/__init__.py | Python | bsd-3-clause | 6,144 |
import sys
import warnings
import itertools
import platform
import pytest
import math
from decimal import Decimal
import numpy as np
from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_warns, assert_array_max_ulp, HAS_REFCOUNT
)
from numpy.core._rational_tests import rational
from hypothesis import assume, given, strategies as st
from hypothesis.extra import numpy as hynp
class TestResize:
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
assert_equal(np.resize(A, (2, 4)), Ar1)
Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
assert_equal(np.resize(A, (4, 2)), Ar2)
Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
assert_equal(np.resize(A, (4, 3)), Ar3)
def test_repeats(self):
A = np.array([1, 2, 3])
Ar1 = np.array([[1, 2, 3, 1], [2, 3, 1, 2]])
assert_equal(np.resize(A, (2, 4)), Ar1)
Ar2 = np.array([[1, 2], [3, 1], [2, 3], [1, 2]])
assert_equal(np.resize(A, (4, 2)), Ar2)
Ar3 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])
assert_equal(np.resize(A, (4, 3)), Ar3)
def test_zeroresize(self):
A = np.array([[1, 2], [3, 4]])
Ar = np.resize(A, (0,))
assert_array_equal(Ar, np.array([]))
assert_equal(A.dtype, Ar.dtype)
Ar = np.resize(A, (0, 2))
assert_equal(Ar.shape, (0, 2))
Ar = np.resize(A, (2, 0))
assert_equal(Ar.shape, (2, 0))
def test_reshape_from_zero(self):
# See also gh-6740
A = np.zeros(0, dtype=[('a', np.float32)])
Ar = np.resize(A, (2, 1))
assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))
assert_equal(A.dtype, Ar.dtype)
def test_negative_resize(self):
A = np.arange(0, 10, dtype=np.float32)
new_shape = (-10, -1)
with pytest.raises(ValueError, match=r"negative"):
np.resize(A, new_shape=new_shape)
def test_subclass(self):
class MyArray(np.ndarray):
__array_priority__ = 1.
my_arr = np.array([1]).view(MyArray)
assert type(np.resize(my_arr, 5)) is MyArray
assert type(np.resize(my_arr, 0)) is MyArray
my_arr = np.array([]).view(MyArray)
assert type(np.resize(my_arr, 5)) is MyArray
class TestNonarrayArgs:
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
[3, 4, 5],
[5, 6, 7]]
tgt = [5, 1, 5]
a = [2, 0, 1]
out = np.choose(a, choices)
assert_equal(out, tgt)
def test_clip(self):
arr = [-1, 5, 2, 3, 10, -4, -9]
out = np.clip(arr, 2, 7)
tgt = [2, 5, 2, 3, 7, 2, 2]
assert_equal(out, tgt)
def test_compress(self):
arr = [[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]]
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
def test_count_nonzero(self):
arr = [[0, 1, 7, 0, 0],
[3, 0, 0, 2, 19]]
tgt = np.array([2, 3])
out = np.count_nonzero(arr, axis=1)
assert_equal(out, tgt)
def test_cumproduct(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))
def test_diagonal(self):
a = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]
out = np.diagonal(a)
tgt = [0, 5, 10]
assert_equal(out, tgt)
def test_mean(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.mean(A) == 3.5)
assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))
assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.mean([])))
assert_(w[0].category is RuntimeWarning)
def test_ptp(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.ptp(a, axis=0), 15.0)
def test_prod(self):
arr = [[1, 2, 3, 4],
[5, 6, 7, 9],
[10, 3, 4, 5]]
tgt = [24, 1890, 600]
assert_equal(np.prod(arr, axis=-1), tgt)
def test_ravel(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
assert_equal(np.ravel(a), tgt)
def test_repeat(self):
a = [1, 2, 3]
tgt = [1, 1, 2, 2, 3, 3]
out = np.repeat(a, 2)
assert_equal(out, tgt)
def test_reshape(self):
arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(np.reshape(arr, (2, 6)), tgt)
def test_round(self):
arr = [1.56, 72.54, 6.35, 3.25]
tgt = [1.6, 72.5, 6.4, 3.2]
assert_equal(np.around(arr, decimals=1), tgt)
s = np.float64(1.)
assert_(isinstance(s.round(), np.float64))
assert_equal(s.round(), 1.)
@pytest.mark.parametrize('dtype', [
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
])
def test_dunder_round(self, dtype):
s = dtype(1)
assert_(isinstance(round(s), int))
assert_(isinstance(round(s, None), int))
assert_(isinstance(round(s, ndigits=None), int))
assert_equal(round(s), 1)
assert_equal(round(s, None), 1)
assert_equal(round(s, ndigits=None), 1)
@pytest.mark.parametrize('val, ndigits', [
pytest.param(2**31 - 1, -1,
marks=pytest.mark.xfail(reason="Out of range of int32")
),
(2**31 - 1, 1-math.ceil(math.log10(2**31 - 1))),
(2**31 - 1, -math.ceil(math.log10(2**31 - 1)))
])
def test_dunder_round_edgecases(self, val, ndigits):
assert_equal(round(val, ndigits), round(np.int32(val), ndigits))
def test_dunder_round_accuracy(self):
f = np.float64(5.1 * 10**73)
assert_(isinstance(round(f, -73), np.float64))
assert_array_max_ulp(round(f, -73), 5.0 * 10**73)
assert_(isinstance(round(f, ndigits=-73), np.float64))
assert_array_max_ulp(round(f, ndigits=-73), 5.0 * 10**73)
i = np.int64(501)
assert_(isinstance(round(i, -2), np.int64))
assert_array_max_ulp(round(i, -2), 500)
assert_(isinstance(round(i, ndigits=-2), np.int64))
assert_array_max_ulp(round(i, ndigits=-2), 500)
@pytest.mark.xfail(raises=AssertionError, reason="gh-15896")
def test_round_py_consistency(self):
f = 5.1 * 10**73
assert_equal(round(np.float64(f), -73), round(f, -73))
def test_searchsorted(self):
arr = [-8, -5, -1, 3, 6, 10]
out = np.searchsorted(arr, 0)
assert_equal(out, 3)
def test_size(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.size(A) == 6)
assert_(np.size(A, 0) == 2)
assert_(np.size(A, 1) == 3)
def test_squeeze(self):
A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
assert_equal(np.squeeze(A).shape, (3, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3))
def test_std(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.std(A), 1.707825127659933)
assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))
assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.std([])))
assert_(w[0].category is RuntimeWarning)
def test_swapaxes(self):
tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]]
a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
out = np.swapaxes(a, 0, 2)
assert_equal(out, tgt)
def test_sum(self):
m = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
tgt = [[6], [15], [24]]
out = np.sum(m, axis=1, keepdims=True)
assert_equal(tgt, out)
def test_take(self):
tgt = [2, 3, 5]
indices = [1, 2, 4]
a = [1, 2, 3, 4, 5]
out = np.take(a, indices)
assert_equal(out, tgt)
def test_trace(self):
c = [[1, 2], [3, 4], [5, 6]]
assert_equal(np.trace(c), 5)
def test_transpose(self):
arr = [[1, 2], [3, 4], [5, 6]]
tgt = [[1, 3, 5], [2, 4, 6]]
assert_equal(np.transpose(arr, (1, 0)), tgt)
def test_var(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.var(A), 2.9166666666666665)
assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))
assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.var([])))
assert_(w[0].category is RuntimeWarning)
B = np.array([None, 0])
B[0] = 1j
assert_almost_equal(np.var(B), 0.25)
class TestIsscalar:
def test_isscalar(self):
assert_(np.isscalar(3.1))
assert_(np.isscalar(np.int16(12345)))
assert_(np.isscalar(False))
assert_(np.isscalar('numpy'))
assert_(not np.isscalar([3.1]))
assert_(not np.isscalar(None))
# PEP 3141
from fractions import Fraction
assert_(np.isscalar(Fraction(5, 17)))
from numbers import Number
assert_(np.isscalar(Number()))
class TestBoolScalar:
def test_logical(self):
f = np.False_
t = np.True_
s = "xyz"
assert_((t and s) is s)
assert_((f and s) is f)
def test_bitwise_or(self):
f = np.False_
t = np.True_
assert_((t | t) is t)
assert_((f | t) is t)
assert_((t | f) is t)
assert_((f | f) is f)
def test_bitwise_and(self):
f = np.False_
t = np.True_
assert_((t & t) is t)
assert_((f & t) is f)
assert_((t & f) is f)
assert_((f & f) is f)
def test_bitwise_xor(self):
f = np.False_
t = np.True_
assert_((t ^ t) is f)
assert_((f ^ t) is t)
assert_((t ^ f) is t)
assert_((f ^ f) is f)
class TestBoolArray:
def setup(self):
# offset for simd tests
self.t = np.array([True] * 41, dtype=bool)[1::]
self.f = np.array([False] * 41, dtype=bool)[1::]
self.o = np.array([False] * 42, dtype=bool)[2::]
self.nm = self.f.copy()
self.im = self.t.copy()
self.nm[3] = True
self.nm[-2] = True
self.im[3] = False
self.im[-2] = False
def test_all_any(self):
assert_(self.t.all())
assert_(self.t.any())
assert_(not self.f.all())
assert_(not self.f.any())
assert_(self.nm.any())
assert_(self.im.any())
assert_(not self.nm.all())
assert_(not self.im.all())
# check bad element in all positions
for i in range(256 - 7):
d = np.array([False] * 256, dtype=bool)[7::]
d[i] = True
assert_(np.any(d))
e = np.array([True] * 256, dtype=bool)[7::]
e[i] = False
assert_(not np.all(e))
assert_array_equal(e, ~d)
# big array test for blocked libc loops
for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
d = np.array([False] * 100043, dtype=bool)
d[i] = True
assert_(np.any(d), msg="%r" % i)
e = np.array([True] * 100043, dtype=bool)
e[i] = False
assert_(not np.all(e), msg="%r" % i)
def test_logical_not_abs(self):
assert_array_equal(~self.t, self.f)
assert_array_equal(np.abs(~self.t), self.f)
assert_array_equal(np.abs(~self.f), self.t)
assert_array_equal(np.abs(self.f), self.f)
assert_array_equal(~np.abs(self.f), self.t)
assert_array_equal(~np.abs(self.t), self.f)
assert_array_equal(np.abs(~self.nm), self.im)
np.logical_not(self.t, out=self.o)
assert_array_equal(self.o, self.f)
np.abs(self.t, out=self.o)
assert_array_equal(self.o, self.t)
def test_logical_and_or_xor(self):
assert_array_equal(self.t | self.t, self.t)
assert_array_equal(self.f | self.f, self.f)
assert_array_equal(self.t | self.f, self.t)
assert_array_equal(self.f | self.t, self.t)
np.logical_or(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t & self.t, self.t)
assert_array_equal(self.f & self.f, self.f)
assert_array_equal(self.t & self.f, self.f)
assert_array_equal(self.f & self.t, self.f)
np.logical_and(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t ^ self.t, self.f)
assert_array_equal(self.f ^ self.f, self.f)
assert_array_equal(self.t ^ self.f, self.t)
assert_array_equal(self.f ^ self.t, self.t)
np.logical_xor(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.f)
assert_array_equal(self.nm & self.t, self.nm)
assert_array_equal(self.im & self.f, False)
assert_array_equal(self.nm & True, self.nm)
assert_array_equal(self.im & False, self.f)
assert_array_equal(self.nm | self.t, self.t)
assert_array_equal(self.im | self.f, self.im)
assert_array_equal(self.nm | True, self.t)
assert_array_equal(self.im | False, self.im)
assert_array_equal(self.nm ^ self.t, self.im)
assert_array_equal(self.im ^ self.f, self.im)
assert_array_equal(self.nm ^ True, self.im)
assert_array_equal(self.im ^ False, self.im)
class TestBoolCmp:
def setup(self):
self.f = np.ones(256, dtype=np.float32)
self.ef = np.ones(self.f.size, dtype=bool)
self.d = np.ones(128, dtype=np.float64)
self.ed = np.ones(self.d.size, dtype=bool)
# generate values for all permutation of 256bit simd vectors
s = 0
for i in range(32):
self.f[s:s+8] = [i & 2**x for x in range(8)]
self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]
s += 8
s = 0
for i in range(16):
self.d[s:s+4] = [i & 2**x for x in range(4)]
self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]
s += 4
self.nf = self.f.copy()
self.nd = self.d.copy()
self.nf[self.ef] = np.nan
self.nd[self.ed] = np.nan
self.inff = self.f.copy()
self.infd = self.d.copy()
self.inff[::3][self.ef[::3]] = np.inf
self.infd[::3][self.ed[::3]] = np.inf
self.inff[1::3][self.ef[1::3]] = -np.inf
self.infd[1::3][self.ed[1::3]] = -np.inf
self.inff[2::3][self.ef[2::3]] = np.nan
self.infd[2::3][self.ed[2::3]] = np.nan
self.efnonan = self.ef.copy()
self.efnonan[2::3] = False
self.ednonan = self.ed.copy()
self.ednonan[2::3] = False
self.signf = self.f.copy()
self.signd = self.d.copy()
self.signf[self.ef] *= -1.
self.signd[self.ed] *= -1.
self.signf[1::6][self.ef[1::6]] = -np.inf
self.signd[1::6][self.ed[1::6]] = -np.inf
self.signf[3::6][self.ef[3::6]] = -np.nan
self.signd[3::6][self.ed[3::6]] = -np.nan
self.signf[4::6][self.ef[4::6]] = -0.
self.signd[4::6][self.ed[4::6]] = -0.
def test_float(self):
# offset for alignment test
for i in range(4):
assert_array_equal(self.f[i:] > 0, self.ef[i:])
assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
assert_array_equal(-self.f[i:] < 0, self.ef[i:])
assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
r = self.f[i:] != 0
assert_array_equal(r, self.ef[i:])
r2 = self.f[i:] != np.zeros_like(self.f[i:])
r3 = 0 != self.f[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])
assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])
assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])
assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:])
def test_double(self):
# offset for alignment test
for i in range(2):
assert_array_equal(self.d[i:] > 0, self.ed[i:])
assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
assert_array_equal(-self.d[i:] < 0, self.ed[i:])
assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
r = self.d[i:] != 0
assert_array_equal(r, self.ed[i:])
r2 = self.d[i:] != np.zeros_like(self.d[i:])
r3 = 0 != self.d[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:])
assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:])
assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:])
assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
class TestSeterr:
def test_default(self):
err = np.geterr()
assert_equal(err,
dict(divide='warn',
invalid='warn',
over='warn',
under='ignore')
)
def test_set(self):
with np.errstate():
err = np.seterr()
old = np.seterr(divide='print')
assert_(err == old)
new = np.seterr()
assert_(new['divide'] == 'print')
np.seterr(over='raise')
assert_(np.geterr()['over'] == 'raise')
assert_(new['divide'] == 'print')
np.seterr(**old)
assert_(np.geterr() == old)
@pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_divide_err(self):
with np.errstate(divide='raise'):
with assert_raises(FloatingPointError):
np.array([1.]) / np.array([0.])
np.seterr(divide='ignore')
np.array([1.]) / np.array([0.])
def test_errobj(self):
olderrobj = np.geterrobj()
self.called = 0
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(divide='warn'):
np.seterrobj([20000, 1, None])
np.array([1.]) / np.array([0.])
assert_equal(len(w), 1)
def log_err(*args):
self.called += 1
extobj_err = args
assert_(len(extobj_err) == 2)
assert_("divide" in extobj_err[0])
with np.errstate(divide='ignore'):
np.seterrobj([20000, 3, log_err])
np.array([1.]) / np.array([0.])
assert_equal(self.called, 1)
np.seterrobj(olderrobj)
with np.errstate(divide='ignore'):
np.divide(1., 0., extobj=[20000, 3, log_err])
assert_equal(self.called, 2)
finally:
np.seterrobj(olderrobj)
del self.called
def test_errobj_noerrmask(self):
# errmask = 0 has a special code path for the default
olderrobj = np.geterrobj()
try:
# set errobj to something non default
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT,
umath.ERR_DEFAULT + 1, None])
# call a ufunc
np.isnan(np.array([6]))
# same with the default, lots of times to get rid of possible
# pre-existing stack in the code
for i in range(10000):
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT,
None])
np.isnan(np.array([6]))
finally:
np.seterrobj(olderrobj)
class TestFloatExceptions:
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
flop(x, y)
assert_(False,
"Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
except FloatingPointError as exc:
assert_(str(exc).find(fpeerr) >= 0,
"Type %s raised wrong fpe error '%s'." % (ftype, exc))
def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
# Check that fpe exception is raised.
#
# Given a floating operation `flop` and two scalar values, check that
# the operation raises the floating point exception specified by
# `fpeerr`. Tests all variants with 0-d array scalars as well.
self.assert_raises_fpe(fpeerr, flop, sc1, sc2)
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2)
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
# Test for all real and complex float types
@pytest.mark.parametrize("typecode", np.typecodes["AllFloat"])
def test_floating_exceptions(self, typecode):
# Test basic arithmetic function errors
with np.errstate(all='raise'):
ftype = np.obj2sctype(typecode)
if np.dtype(ftype).kind == 'f':
# Get some extreme values for the type
fi = np.finfo(ftype)
ft_tiny = fi.machar.tiny
ft_max = fi.max
ft_eps = fi.eps
underflow = 'underflow'
divbyzero = 'divide by zero'
else:
# 'c', complex, corresponding real dtype
rtype = type(ftype(0).real)
fi = np.finfo(rtype)
ft_tiny = ftype(fi.machar.tiny)
ft_max = ftype(fi.max)
ft_eps = ftype(fi.eps)
# The complex types raise different exceptions
underflow = ''
divbyzero = ''
overflow = 'overflow'
invalid = 'invalid'
# The value of tiny for double double is NaN, so we need to
# pass the assert
if not np.isnan(ft_tiny):
self.assert_raises_fpe(underflow,
lambda a, b: a/b, ft_tiny, ft_max)
self.assert_raises_fpe(underflow,
lambda a, b: a*b, ft_tiny, ft_tiny)
self.assert_raises_fpe(overflow,
lambda a, b: a*b, ft_max, ftype(2))
self.assert_raises_fpe(overflow,
lambda a, b: a/b, ft_max, ftype(0.5))
self.assert_raises_fpe(overflow,
lambda a, b: a+b, ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
lambda a, b: a-b, -ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
np.power, ftype(2), ftype(2**fi.nexp))
self.assert_raises_fpe(divbyzero,
lambda a, b: a/b, ftype(1), ftype(0))
self.assert_raises_fpe(
invalid, lambda a, b: a/b, ftype(np.inf), ftype(np.inf)
)
self.assert_raises_fpe(invalid,
lambda a, b: a/b, ftype(0), ftype(0))
self.assert_raises_fpe(
invalid, lambda a, b: a-b, ftype(np.inf), ftype(np.inf)
)
self.assert_raises_fpe(
invalid, lambda a, b: a+b, ftype(np.inf), ftype(-np.inf)
)
self.assert_raises_fpe(invalid,
lambda a, b: a*b, ftype(0), ftype(np.inf))
def test_warnings(self):
# test warning code path
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(all="warn"):
np.divide(1, 0.)
assert_equal(len(w), 1)
assert_("divide by zero" in str(w[0].message))
np.array(1e300) * np.array(1e300)
assert_equal(len(w), 2)
assert_("overflow" in str(w[-1].message))
np.array(np.inf) - np.array(np.inf)
assert_equal(len(w), 3)
assert_("invalid value" in str(w[-1].message))
np.array(1e-300) * np.array(1e-300)
assert_equal(len(w), 4)
assert_("underflow" in str(w[-1].message))
class TestTypes:
def check_promotion_cases(self, promote_func):
# tests that the scalars get coerced correctly.
b = np.bool_(0)
i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0)
u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0)
f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0)
c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0)
# coercion within the same kind
assert_equal(promote_func(i8, i16), np.dtype(np.int16))
assert_equal(promote_func(i32, i8), np.dtype(np.int32))
assert_equal(promote_func(i16, i64), np.dtype(np.int64))
assert_equal(promote_func(u8, u32), np.dtype(np.uint32))
assert_equal(promote_func(f32, f64), np.dtype(np.float64))
assert_equal(promote_func(fld, f32), np.dtype(np.longdouble))
assert_equal(promote_func(f64, fld), np.dtype(np.longdouble))
assert_equal(promote_func(c128, c64), np.dtype(np.complex128))
assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble))
assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble))
# coercion between kinds
assert_equal(promote_func(b, i32), np.dtype(np.int32))
assert_equal(promote_func(b, u8), np.dtype(np.uint8))
assert_equal(promote_func(i8, u8), np.dtype(np.int16))
assert_equal(promote_func(u8, i32), np.dtype(np.int32))
assert_equal(promote_func(i64, u32), np.dtype(np.int64))
assert_equal(promote_func(u64, i32), np.dtype(np.float64))
assert_equal(promote_func(i32, f32), np.dtype(np.float64))
assert_equal(promote_func(i64, f32), np.dtype(np.float64))
assert_equal(promote_func(f32, i16), np.dtype(np.float32))
assert_equal(promote_func(f32, u32), np.dtype(np.float64))
assert_equal(promote_func(f32, c64), np.dtype(np.complex64))
assert_equal(promote_func(c128, f32), np.dtype(np.complex128))
assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble))
# coercion between scalars and 1-D arrays
assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8))
assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8))
assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32))
assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32))
assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8))
assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32))
assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32))
assert_equal(promote_func(np.int32(-1), np.array([u64])),
np.dtype(np.float64))
assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64))
assert_equal(promote_func(fld, np.array([c64])),
np.dtype(np.complex64))
assert_equal(promote_func(c64, np.array([f64])),
np.dtype(np.complex128))
assert_equal(promote_func(np.complex64(3j), np.array([f64])),
np.dtype(np.complex128))
# coercion between scalars and 1-D arrays, where
# the scalar has greater kind than the array
assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64))
assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64))
assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64))
# uint and int are treated as the same "kind" for
# the purposes of array-scalar promotion.
assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16))
# float and complex are treated as the same "kind" for
# the purposes of array-scalar promotion, so that you can do
# (0j + float32array) to get a complex64 array instead of
# a complex128 array.
assert_equal(promote_func(np.array([f32]), c128),
np.dtype(np.complex64))
def test_coercion(self):
def res_type(a, b):
return np.add(a, b).dtype
self.check_promotion_cases(res_type)
# Use-case: float/complex scalar * bool/int8 array
# shouldn't narrow the float/complex type
for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]:
b = 1.234 * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.longdouble(1.234) * a
assert_equal(b.dtype, np.dtype(np.longdouble),
"array type %s" % a.dtype)
b = np.float64(1.234) * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.float32(1.234) * a
assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype)
b = np.float16(1.234) * a
assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype)
b = 1.234j * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.clongdouble(1.234j) * a
assert_equal(b.dtype, np.dtype(np.clongdouble),
"array type %s" % a.dtype)
b = np.complex128(1.234j) * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.complex64(1.234j) * a
assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype)
# The following use-case is problematic, and to resolve its
# tricky side-effects requires more changes.
#
# Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
# a float32, shouldn't promote to float64
#
# a = np.array([1.0, 1.5], dtype=np.float32)
# t = np.array([True, False])
# b = t*a
# assert_equal(b, [1.0, 0.0])
# assert_equal(b.dtype, np.dtype('f4'))
# b = (1-t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
#
# Probably ~t (bitwise negation) is more proper to use here,
# but this is arguably less intuitive to understand at a glance, and
# would fail if 't' is actually an integer array instead of boolean:
#
# b = (~t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
def test_result_type(self):
self.check_promotion_cases(np.result_type)
assert_(np.result_type(None) == np.dtype(None))
def test_promote_types_endian(self):
# promote_types should always return native-endian types
assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21'))
assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21'))
assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21'))
assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8'))
assert_equal(np.promote_types('<U8', '<S5'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>S5'), np.dtype('U8'))
assert_equal(np.promote_types('<U5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>U5'), np.dtype('U8'))
assert_equal(np.promote_types('<M8', '<M8'), np.dtype('M8'))
assert_equal(np.promote_types('>M8', '>M8'), np.dtype('M8'))
assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8'))
assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8'))
def test_can_cast_and_promote_usertypes(self):
# The rational type defines safe casting for signed integers,
# boolean. Rational itself *does* cast safely to double.
# (rational does not actually cast to all signed integers, e.g.
# int64 can be both long and longlong and it registers only the first)
valid_types = ["int8", "int16", "int32", "int64", "bool"]
invalid_types = "BHILQP" + "FDG" + "mM" + "f" + "V"
rational_dt = np.dtype(rational)
for numpy_dtype in valid_types:
numpy_dtype = np.dtype(numpy_dtype)
assert np.can_cast(numpy_dtype, rational_dt)
assert np.promote_types(numpy_dtype, rational_dt) is rational_dt
for numpy_dtype in invalid_types:
numpy_dtype = np.dtype(numpy_dtype)
assert not np.can_cast(numpy_dtype, rational_dt)
with pytest.raises(TypeError):
np.promote_types(numpy_dtype, rational_dt)
double_dt = np.dtype("double")
assert np.can_cast(rational_dt, double_dt)
assert np.promote_types(double_dt, rational_dt) is double_dt
@pytest.mark.parametrize("swap", ["", "swap"])
@pytest.mark.parametrize("string_dtype", ["U", "S"])
def test_promote_types_strings(self, swap, string_dtype):
if swap == "swap":
promote_types = lambda a, b: np.promote_types(b, a)
else:
promote_types = np.promote_types
S = string_dtype
# Promote numeric with unsized string:
assert_equal(promote_types('bool', S), np.dtype(S+'5'))
assert_equal(promote_types('b', S), np.dtype(S+'4'))
assert_equal(promote_types('u1', S), np.dtype(S+'3'))
assert_equal(promote_types('u2', S), np.dtype(S+'5'))
assert_equal(promote_types('u4', S), np.dtype(S+'10'))
assert_equal(promote_types('u8', S), np.dtype(S+'20'))
assert_equal(promote_types('i1', S), np.dtype(S+'4'))
assert_equal(promote_types('i2', S), np.dtype(S+'6'))
assert_equal(promote_types('i4', S), np.dtype(S+'11'))
assert_equal(promote_types('i8', S), np.dtype(S+'21'))
# Promote numeric with sized string:
assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5'))
assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30'))
assert_equal(promote_types('b', S+'1'), np.dtype(S+'4'))
assert_equal(promote_types('b', S+'30'), np.dtype(S+'30'))
assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3'))
assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30'))
assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5'))
assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30'))
assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10'))
assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30'))
assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20'))
assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30'))
# Promote with object:
assert_equal(promote_types('O', S+'30'), np.dtype('O'))
@pytest.mark.parametrize(["dtype1", "dtype2"],
[[np.dtype("V6"), np.dtype("V10")],
[np.dtype([("name1", "i8")]), np.dtype([("name2", "i8")])],
[np.dtype("i8,i8"), np.dtype("i4,i4")],
])
def test_invalid_void_promotion(self, dtype1, dtype2):
# Mainly test structured void promotion, which currently allows
# byte-swapping, but nothing else:
with pytest.raises(TypeError):
np.promote_types(dtype1, dtype2)
@pytest.mark.parametrize(["dtype1", "dtype2"],
[[np.dtype("V10"), np.dtype("V10")],
[np.dtype([("name1", "<i8")]), np.dtype([("name1", ">i8")])],
[np.dtype("i8,i8"), np.dtype("i8,>i8")],
])
def test_valid_void_promotion(self, dtype1, dtype2):
assert np.promote_types(dtype1, dtype2) is dtype1
@pytest.mark.parametrize("dtype",
list(np.typecodes["All"]) +
["i,i", "S3", "S100", "U3", "U100", rational])
def test_promote_identical_types_metadata(self, dtype):
# The same type passed in twice to promote types always
# preserves metadata
metadata = {1: 1}
dtype = np.dtype(dtype, metadata=metadata)
res = np.promote_types(dtype, dtype)
assert res.metadata == dtype.metadata
# byte-swapping preserves and makes the dtype native:
dtype = dtype.newbyteorder()
if dtype.isnative:
# The type does not have byte swapping
return
res = np.promote_types(dtype, dtype)
if res.char in "?bhilqpBHILQPefdgFDGOmM" or dtype.type is rational:
# Metadata is lost for simple promotions (they create a new dtype)
assert res.metadata is None
else:
assert res.metadata == metadata
if dtype.kind != "V":
# the result is native (except for structured void)
assert res.isnative
@pytest.mark.slow
@pytest.mark.filterwarnings('ignore:Promotion of numbers:FutureWarning')
@pytest.mark.parametrize(["dtype1", "dtype2"],
itertools.product(
list(np.typecodes["All"]) +
["i,i", "S3", "S100", "U3", "U100", rational],
repeat=2))
def test_promote_types_metadata(self, dtype1, dtype2):
"""Metadata handling in promotion does not appear formalized
right now in NumPy. This test should thus be considered to
document behaviour, rather than test the correct definition of it.
This test is very ugly, it was useful for rewriting part of the
promotion, but probably should eventually be replaced/deleted
(i.e. when metadata handling in promotion is better defined).
"""
metadata1 = {1: 1}
metadata2 = {2: 2}
dtype1 = np.dtype(dtype1, metadata=metadata1)
dtype2 = np.dtype(dtype2, metadata=metadata2)
try:
res = np.promote_types(dtype1, dtype2)
except TypeError:
# Promotion failed, this test only checks metadata
return
if res.char in "?bhilqpBHILQPefdgFDGOmM" or res.type is rational:
# All simple types lose metadata (due to using promotion table):
assert res.metadata is None
elif res == dtype1:
# If one result is the result, it is usually returned unchanged:
assert res is dtype1
elif res == dtype2:
# dtype1 may have been cast to the same type/kind as dtype2.
# If the resulting dtype is identical we currently pick the cast
# version of dtype1, which lost the metadata:
if np.promote_types(dtype1, dtype2.kind) == dtype2:
res.metadata is None
else:
res.metadata == metadata2
else:
assert res.metadata is None
# Try again for byteswapped version
dtype1 = dtype1.newbyteorder()
assert dtype1.metadata == metadata1
res_bs = np.promote_types(dtype1, dtype2)
if res_bs.names is not None:
# Structured promotion doesn't remove byteswap:
assert res_bs.newbyteorder() == res
else:
assert res_bs == res
assert res_bs.metadata == res.metadata
@pytest.mark.parametrize(["dtype1", "dtype2"],
[[np.dtype("V6"), np.dtype("V10")],
[np.dtype([("name1", "i8")]), np.dtype([("name2", "i8")])],
[np.dtype("i8,i8"), np.dtype("i4,i4")],
])
def test_invalid_void_promotion(self, dtype1, dtype2):
# Mainly test structured void promotion, which currently allows
# byte-swapping, but nothing else:
with pytest.raises(TypeError):
np.promote_types(dtype1, dtype2)
@pytest.mark.parametrize(["dtype1", "dtype2"],
[[np.dtype("V10"), np.dtype("V10")],
[np.dtype([("name1", "<i8")]), np.dtype([("name1", ">i8")])],
[np.dtype("i8,i8"), np.dtype("i8,>i8")],
])
def test_valid_void_promotion(self, dtype1, dtype2):
assert np.promote_types(dtype1, dtype2) is dtype1
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
assert_(np.can_cast(np.float64, complex))
assert_(not np.can_cast(complex, float))
assert_(np.can_cast('i8', 'f8'))
assert_(not np.can_cast('i8', 'f4'))
assert_(np.can_cast('i4', 'S11'))
assert_(np.can_cast('i8', 'i8', 'no'))
assert_(not np.can_cast('<i8', '>i8', 'no'))
assert_(np.can_cast('<i8', '>i8', 'equiv'))
assert_(not np.can_cast('<i4', '>i8', 'equiv'))
assert_(np.can_cast('<i4', '>i8', 'safe'))
assert_(not np.can_cast('<i8', '>i4', 'safe'))
assert_(np.can_cast('<i8', '>i4', 'same_kind'))
assert_(not np.can_cast('<i8', '>u4', 'same_kind'))
assert_(np.can_cast('<i8', '>u4', 'unsafe'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'S4'))
assert_(not np.can_cast('b', 'S3'))
assert_(np.can_cast('u1', 'S3'))
assert_(not np.can_cast('u1', 'S2'))
assert_(np.can_cast('u2', 'S5'))
assert_(not np.can_cast('u2', 'S4'))
assert_(np.can_cast('u4', 'S10'))
assert_(not np.can_cast('u4', 'S9'))
assert_(np.can_cast('u8', 'S20'))
assert_(not np.can_cast('u8', 'S19'))
assert_(np.can_cast('i1', 'S4'))
assert_(not np.can_cast('i1', 'S3'))
assert_(np.can_cast('i2', 'S6'))
assert_(not np.can_cast('i2', 'S5'))
assert_(np.can_cast('i4', 'S11'))
assert_(not np.can_cast('i4', 'S10'))
assert_(np.can_cast('i8', 'S21'))
assert_(not np.can_cast('i8', 'S20'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'U4'))
assert_(not np.can_cast('b', 'U3'))
assert_(np.can_cast('u1', 'U3'))
assert_(not np.can_cast('u1', 'U2'))
assert_(np.can_cast('u2', 'U5'))
assert_(not np.can_cast('u2', 'U4'))
assert_(np.can_cast('u4', 'U10'))
assert_(not np.can_cast('u4', 'U9'))
assert_(np.can_cast('u8', 'U20'))
assert_(not np.can_cast('u8', 'U19'))
assert_(np.can_cast('i1', 'U4'))
assert_(not np.can_cast('i1', 'U3'))
assert_(np.can_cast('i2', 'U6'))
assert_(not np.can_cast('i2', 'U5'))
assert_(np.can_cast('i4', 'U11'))
assert_(not np.can_cast('i4', 'U10'))
assert_(np.can_cast('i8', 'U21'))
assert_(not np.can_cast('i8', 'U20'))
assert_raises(TypeError, np.can_cast, 'i4', None)
assert_raises(TypeError, np.can_cast, None, 'i4')
# Also test keyword arguments
assert_(np.can_cast(from_=np.int32, to=np.int64))
def test_can_cast_simple_to_structured(self):
# Non-structured can only be cast to structured in 'unsafe' mode.
assert_(not np.can_cast('i4', 'i4,i4'))
assert_(not np.can_cast('i4', 'i4,i2'))
assert_(np.can_cast('i4', 'i4,i4', casting='unsafe'))
assert_(np.can_cast('i4', 'i4,i2', casting='unsafe'))
# Even if there is just a single field which is OK.
assert_(not np.can_cast('i2', [('f1', 'i4')]))
assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind'))
assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe'))
# It should be the same for recursive structured or subarrays.
assert_(not np.can_cast('i2', [('f1', 'i4,i4')]))
assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe'))
assert_(not np.can_cast('i2', [('f1', '(2,3)i4')]))
assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe'))
def test_can_cast_structured_to_simple(self):
# Need unsafe casting for structured to simple.
assert_(not np.can_cast([('f1', 'i4')], 'i4'))
assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe'))
assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe'))
# Since it is unclear what is being cast, multiple fields to
# single should not work even for unsafe casting.
assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe'))
# But a single field inside a single field is OK.
assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4'))
assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe'))
# And a subarray is fine too - it will just take the first element
# (arguably not very consistently; might also take the first field).
assert_(not np.can_cast([('f0', '(3,)i4')], 'i4'))
assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe'))
# But a structured subarray with multiple fields should fail.
assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4',
casting='unsafe'))
def test_can_cast_values(self):
# gh-5917
for dt in np.sctypes['int'] + np.sctypes['uint']:
ii = np.iinfo(dt)
assert_(np.can_cast(ii.min, dt))
assert_(np.can_cast(ii.max, dt))
assert_(not np.can_cast(ii.min - 1, dt))
assert_(not np.can_cast(ii.max + 1, dt))
for dt in np.sctypes['float']:
fi = np.finfo(dt)
assert_(np.can_cast(fi.min, dt))
assert_(np.can_cast(fi.max, dt))
# Custom exception class to test exception propagation in fromiter
class NIterError(Exception):
pass
class TestFromiter:
def makegen(self):
return (x**2 for x in range(24))
def test_types(self):
ai32 = np.fromiter(self.makegen(), np.int32)
ai64 = np.fromiter(self.makegen(), np.int64)
af = np.fromiter(self.makegen(), float)
assert_(ai32.dtype == np.dtype(np.int32))
assert_(ai64.dtype == np.dtype(np.int64))
assert_(af.dtype == np.dtype(float))
def test_lengths(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
assert_(len(a) == len(expected))
assert_(len(a20) == 20)
assert_raises(ValueError, np.fromiter,
self.makegen(), int, len(expected) + 10)
def test_values(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
assert_(np.alltrue(a == expected, axis=0))
assert_(np.alltrue(a20 == expected[:20], axis=0))
def load_data(self, n, eindex):
# Utility method for the issue 2592 tests.
# Raise an exception at the desired index in the iterator.
for e in range(n):
if e == eindex:
raise NIterError('error at index %s' % eindex)
yield e
def test_2592(self):
# Test iteration exceptions are correctly raised.
count, eindex = 10, 5
assert_raises(NIterError, np.fromiter,
self.load_data(count, eindex), dtype=int, count=count)
def test_2592_edge(self):
# Test iter. exceptions, edge case (exception at end of iterator).
count = 10
eindex = count-1
assert_raises(NIterError, np.fromiter,
self.load_data(count, eindex), dtype=int, count=count)
class TestNonzero:
def test_nonzero_trivial(self):
assert_equal(np.count_nonzero(np.array([])), 0)
assert_equal(np.count_nonzero(np.array([], dtype='?')), 0)
assert_equal(np.nonzero(np.array([])), ([],))
assert_equal(np.count_nonzero(np.array([0])), 0)
assert_equal(np.count_nonzero(np.array([0], dtype='?')), 0)
assert_equal(np.nonzero(np.array([0])), ([],))
assert_equal(np.count_nonzero(np.array([1])), 1)
assert_equal(np.count_nonzero(np.array([1], dtype='?')), 1)
assert_equal(np.nonzero(np.array([1])), ([0],))
def test_nonzero_zerod(self):
assert_equal(np.count_nonzero(np.array(0)), 0)
assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0)
with assert_warns(DeprecationWarning):
assert_equal(np.nonzero(np.array(0)), ([],))
assert_equal(np.count_nonzero(np.array(1)), 1)
assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1)
with assert_warns(DeprecationWarning):
assert_equal(np.nonzero(np.array(1)), ([0],))
def test_nonzero_onedim(self):
x = np.array([1, 0, 2, -1, 0, 0, 8])
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.nonzero(x), ([0, 2, 3, 6],))
# x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)],
# dtype=[('a', 'i4'), ('b', 'i2')])
x = np.array([(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)],
dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')])
assert_equal(np.count_nonzero(x['a']), 3)
assert_equal(np.count_nonzero(x['b']), 4)
assert_equal(np.count_nonzero(x['c']), 3)
assert_equal(np.count_nonzero(x['d']), 4)
assert_equal(np.nonzero(x['a']), ([0, 2, 3],))
assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],))
def test_nonzero_twodim(self):
x = np.array([[0, 1, 0], [2, 0, 3]])
assert_equal(np.count_nonzero(x.astype('i1')), 3)
assert_equal(np.count_nonzero(x.astype('i2')), 3)
assert_equal(np.count_nonzero(x.astype('i4')), 3)
assert_equal(np.count_nonzero(x.astype('i8')), 3)
assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2]))
x = np.eye(3)
assert_equal(np.count_nonzero(x.astype('i1')), 3)
assert_equal(np.count_nonzero(x.astype('i2')), 3)
assert_equal(np.count_nonzero(x.astype('i4')), 3)
assert_equal(np.count_nonzero(x.astype('i8')), 3)
assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2]))
x = np.array([[(0, 1), (0, 0), (1, 11)],
[(1, 1), (1, 0), (0, 0)],
[(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')])
assert_equal(np.count_nonzero(x['a']), 4)
assert_equal(np.count_nonzero(x['b']), 5)
assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1]))
assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2]))
assert_(not x['a'].T.flags.aligned)
assert_equal(np.count_nonzero(x['a'].T), 4)
assert_equal(np.count_nonzero(x['b'].T), 5)
assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0]))
assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2]))
def test_sparse(self):
# test special sparse condition boolean code path
for i in range(20):
c = np.zeros(200, dtype=bool)
c[i::20] = True
assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20))
c = np.zeros(400, dtype=bool)
c[10 + i:20 + i] = True
c[20 + i*2] = True
assert_equal(np.nonzero(c)[0],
np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2])))
def test_return_type(self):
class C(np.ndarray):
pass
for view in (C, np.ndarray):
for nd in range(1, 4):
shape = tuple(range(2, 2+nd))
x = np.arange(np.prod(shape)).reshape(shape).view(view)
for nzx in (np.nonzero(x), x.nonzero()):
for nzx_i in nzx:
assert_(type(nzx_i) is np.ndarray)
assert_(nzx_i.flags.writeable)
def test_count_nonzero_axis(self):
# Basic check of functionality
m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]])
expected = np.array([1, 1, 1, 1, 1])
assert_equal(np.count_nonzero(m, axis=0), expected)
expected = np.array([2, 3])
assert_equal(np.count_nonzero(m, axis=1), expected)
assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1))
assert_raises(TypeError, np.count_nonzero, m, axis='foo')
assert_raises(np.AxisError, np.count_nonzero, m, axis=3)
assert_raises(TypeError, np.count_nonzero,
m, axis=np.array([[1], [2]]))
def test_count_nonzero_axis_all_dtypes(self):
# More thorough test that the axis argument is respected
# for all dtypes and responds correctly when presented with
# either integer or tuple arguments for axis
msg = "Mismatch for dtype: %s"
def assert_equal_w_dt(a, b, err_msg):
assert_equal(a.dtype, b.dtype, err_msg=err_msg)
assert_equal(a, b, err_msg=err_msg)
for dt in np.typecodes['All']:
err_msg = msg % (np.dtype(dt).name,)
if dt != 'V':
if dt != 'M':
m = np.zeros((3, 3), dtype=dt)
n = np.ones(1, dtype=dt)
m[0, 0] = n[0]
m[1, 0] = n[0]
else: # np.zeros doesn't work for np.datetime64
m = np.array(['1970-01-01'] * 9)
m = m.reshape((3, 3))
m[0, 0] = '1970-01-12'
m[1, 0] = '1970-01-12'
m = m.astype(dt)
expected = np.array([2, 0, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=0),
expected, err_msg=err_msg)
expected = np.array([1, 1, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=1),
expected, err_msg=err_msg)
expected = np.array(2)
assert_equal(np.count_nonzero(m, axis=(0, 1)),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m, axis=None),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m),
expected, err_msg=err_msg)
if dt == 'V':
# There are no 'nonzero' objects for np.void, so the testing
# setup is slightly different for this dtype
m = np.array([np.void(1)] * 6).reshape((2, 3))
expected = np.array([0, 0, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=0),
expected, err_msg=err_msg)
expected = np.array([0, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=1),
expected, err_msg=err_msg)
expected = np.array(0)
assert_equal(np.count_nonzero(m, axis=(0, 1)),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m, axis=None),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m),
expected, err_msg=err_msg)
def test_count_nonzero_axis_consistent(self):
# Check that the axis behaviour for valid axes in
# non-special cases is consistent (and therefore
# correct) by checking it against an integer array
# that is then casted to the generic object dtype
from itertools import combinations, permutations
axis = (0, 1, 2, 3)
size = (5, 5, 5, 5)
msg = "Mismatch for axis: %s"
rng = np.random.RandomState(1234)
m = rng.randint(-100, 100, size=size)
n = m.astype(object)
for length in range(len(axis)):
for combo in combinations(axis, length):
for perm in permutations(combo):
assert_equal(
np.count_nonzero(m, axis=perm),
np.count_nonzero(n, axis=perm),
err_msg=msg % (perm,))
def test_countnonzero_axis_empty(self):
a = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(np.count_nonzero(a, axis=()), a.astype(bool))
def test_countnonzero_keepdims(self):
a = np.array([[0, 0, 1, 0],
[0, 3, 5, 0],
[7, 9, 2, 0]])
assert_equal(np.count_nonzero(a, axis=0, keepdims=True),
[[1, 2, 3, 0]])
assert_equal(np.count_nonzero(a, axis=1, keepdims=True),
[[1], [2], [3]])
assert_equal(np.count_nonzero(a, keepdims=True),
[[6]])
def test_array_method(self):
# Tests that the array method
# call to nonzero works
m = np.array([[1, 0, 0], [4, 0, 6]])
tgt = [[0, 1, 1], [0, 0, 2]]
assert_equal(m.nonzero(), tgt)
def test_nonzero_invalid_object(self):
# gh-9295
a = np.array([np.array([1, 2]), 3], dtype=object)
assert_raises(ValueError, np.nonzero, a)
class BoolErrors:
def __bool__(self):
raise ValueError("Not allowed")
assert_raises(ValueError, np.nonzero, np.array([BoolErrors()]))
def test_nonzero_sideeffect_safety(self):
# gh-13631
class FalseThenTrue:
_val = False
def __bool__(self):
try:
return self._val
finally:
self._val = True
class TrueThenFalse:
_val = True
def __bool__(self):
try:
return self._val
finally:
self._val = False
# result grows on the second pass
a = np.array([True, FalseThenTrue()])
assert_raises(RuntimeError, np.nonzero, a)
a = np.array([[True], [FalseThenTrue()]])
assert_raises(RuntimeError, np.nonzero, a)
# result shrinks on the second pass
a = np.array([False, TrueThenFalse()])
assert_raises(RuntimeError, np.nonzero, a)
a = np.array([[False], [TrueThenFalse()]])
assert_raises(RuntimeError, np.nonzero, a)
def test_nonzero_exception_safe(self):
# gh-13930
class ThrowsAfter:
def __init__(self, iters):
self.iters_left = iters
def __bool__(self):
if self.iters_left == 0:
raise ValueError("called `iters` times")
self.iters_left -= 1
return True
"""
Test that a ValueError is raised instead of a SystemError
If the __bool__ function is called after the error state is set,
Python (cpython) will raise a SystemError.
"""
# assert that an exception in first pass is handled correctly
a = np.array([ThrowsAfter(5)]*10)
assert_raises(ValueError, np.nonzero, a)
# raise exception in second pass for 1-dimensional loop
a = np.array([ThrowsAfter(15)]*10)
assert_raises(ValueError, np.nonzero, a)
# raise exception in second pass for n-dimensional loop
a = np.array([[ThrowsAfter(15)]]*10)
assert_raises(ValueError, np.nonzero, a)
def test_structured_threadsafety(self):
# Nonzero (and some other functions) should be threadsafe for
# structured datatypes, see gh-15387. This test can behave randomly.
from concurrent.futures import ThreadPoolExecutor
# Create a deeply nested dtype to make a failure more likely:
dt = np.dtype([("", "f8")])
dt = np.dtype([("", dt)])
dt = np.dtype([("", dt)] * 2)
# The array should be large enough to likely run into threading issues
arr = np.random.uniform(size=(5000, 4)).view(dt)[:, 0]
def func(arr):
arr.nonzero()
tpe = ThreadPoolExecutor(max_workers=8)
futures = [tpe.submit(func, arr) for _ in range(10)]
for f in futures:
f.result()
assert arr.dtype is dt
class TestIndex:
def test_boolean(self):
a = rand(3, 5, 8)
V = rand(5, 8)
g1 = randint(0, 5, size=15)
g2 = randint(0, 8, size=15)
V[g1, g2] = -V[g1, g2]
assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all())
def test_boolean_edgecase(self):
a = np.array([], dtype='int32')
b = np.array([], dtype='bool')
c = a[b]
assert_equal(c, [])
assert_equal(c.dtype, np.dtype('int32'))
class TestBinaryRepr:
def test_zero(self):
assert_equal(np.binary_repr(0), '0')
def test_positive(self):
assert_equal(np.binary_repr(10), '1010')
assert_equal(np.binary_repr(12522),
'11000011101010')
assert_equal(np.binary_repr(10736848),
'101000111101010011010000')
def test_negative(self):
assert_equal(np.binary_repr(-1), '-1')
assert_equal(np.binary_repr(-10), '-1010')
assert_equal(np.binary_repr(-12522),
'-11000011101010')
assert_equal(np.binary_repr(-10736848),
'-101000111101010011010000')
def test_sufficient_width(self):
assert_equal(np.binary_repr(0, width=5), '00000')
assert_equal(np.binary_repr(10, width=7), '0001010')
assert_equal(np.binary_repr(-5, width=7), '1111011')
def test_neg_width_boundaries(self):
# see gh-8670
# Ensure that the example in the issue does not
# break before proceeding to a more thorough test.
assert_equal(np.binary_repr(-128, width=8), '10000000')
for width in range(1, 11):
num = -2**(width - 1)
exp = '1' + (width - 1) * '0'
assert_equal(np.binary_repr(num, width=width), exp)
def test_large_neg_int64(self):
# See gh-14289.
assert_equal(np.binary_repr(np.int64(-2**62), width=64),
'11' + '0'*62)
class TestBaseRepr:
def test_base3(self):
assert_equal(np.base_repr(3**5, 3), '100000')
def test_positive(self):
assert_equal(np.base_repr(12, 10), '12')
assert_equal(np.base_repr(12, 10, 4), '000012')
assert_equal(np.base_repr(12, 4), '30')
assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW')
def test_negative(self):
assert_equal(np.base_repr(-12, 10), '-12')
assert_equal(np.base_repr(-12, 10, 4), '-000012')
assert_equal(np.base_repr(-12, 4), '-30')
def test_base_range(self):
with assert_raises(ValueError):
np.base_repr(1, 1)
with assert_raises(ValueError):
np.base_repr(1, 37)
class TestArrayComparisons:
def test_array_equal(self):
res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([3, 4]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1'))
assert_(res)
assert_(type(res) is bool)
res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'),
np.array([('a', 1)], dtype='S1,u4'))
assert_(res)
assert_(type(res) is bool)
def test_array_equal_equal_nan(self):
# Test array_equal with equal_nan kwarg
a1 = np.array([1, 2, np.nan])
a2 = np.array([1, np.nan, 2])
a3 = np.array([1, 2, np.inf])
# equal_nan=False by default
assert_(not np.array_equal(a1, a1))
assert_(np.array_equal(a1, a1, equal_nan=True))
assert_(not np.array_equal(a1, a2, equal_nan=True))
# nan's not conflated with inf's
assert_(not np.array_equal(a1, a3, equal_nan=True))
# 0-D arrays
a = np.array(np.nan)
assert_(not np.array_equal(a, a))
assert_(np.array_equal(a, a, equal_nan=True))
# Non-float dtype - equal_nan should have no effect
a = np.array([1, 2, 3], dtype=int)
assert_(np.array_equal(a, a))
assert_(np.array_equal(a, a, equal_nan=True))
# Multi-dimensional array
a = np.array([[0, 1], [np.nan, 1]])
assert_(not np.array_equal(a, a))
assert_(np.array_equal(a, a, equal_nan=True))
# Complex values
a, b = [np.array([1 + 1j])]*2
a.real, b.imag = np.nan, np.nan
assert_(not np.array_equal(a, b, equal_nan=False))
assert_(np.array_equal(a, b, equal_nan=True))
def test_none_compares_elementwise(self):
a = np.array([None, 1, None], dtype=object)
assert_equal(a == None, [True, False, True])
assert_equal(a != None, [False, True, False])
a = np.ones(3)
assert_equal(a == None, [False, False, False])
assert_equal(a != None, [True, True, True])
def test_array_equiv(self):
res = np.array_equiv(np.array([1, 2]), np.array([1, 2]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([3, 4]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([1, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 1]), np.array([1]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([2]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
assert_(not res)
assert_(type(res) is bool)
@pytest.mark.parametrize("dtype", ["V0", "V3", "V10"])
def test_compare_unstructured_voids(self, dtype):
zeros = np.zeros(3, dtype=dtype)
assert_array_equal(zeros, zeros)
assert not (zeros != zeros).any()
if dtype == "V0":
# Can't test != of actually different data
return
nonzeros = np.array([b"1", b"2", b"3"], dtype=dtype)
assert not (zeros == nonzeros).any()
assert (zeros != nonzeros).all()
def assert_array_strict_equal(x, y):
assert_array_equal(x, y)
# Check flags, 32 bit arches typically don't provide 16 byte alignment
if ((x.dtype.alignment <= 8 or
np.intp().dtype.itemsize != 4) and
sys.platform != 'win32'):
assert_(x.flags == y.flags)
else:
assert_(x.flags.owndata == y.flags.owndata)
assert_(x.flags.writeable == y.flags.writeable)
assert_(x.flags.c_contiguous == y.flags.c_contiguous)
assert_(x.flags.f_contiguous == y.flags.f_contiguous)
assert_(x.flags.writebackifcopy == y.flags.writebackifcopy)
# check endianness
assert_(x.dtype.isnative == y.dtype.isnative)
class TestClip:
def setup(self):
self.nr = 5
self.nc = 3
def fastclip(self, a, m, M, out=None, casting=None):
if out is None:
if casting is None:
return a.clip(m, M)
else:
return a.clip(m, M, casting=casting)
else:
if casting is None:
return a.clip(m, M, out)
else:
return a.clip(m, M, out, casting=casting)
def clip(self, a, m, M, out=None):
# use slow-clip
selector = np.less(a, m) + 2*np.greater(a, M)
return selector.choose((a, m, M), out=out)
# Handy functions
def _generate_data(self, n, m):
return randn(n, m)
def _generate_data_complex(self, n, m):
return randn(n, m) + 1.j * rand(n, m)
def _generate_flt_data(self, n, m):
return (randn(n, m)).astype(np.float32)
def _neg_byteorder(self, a):
a = np.asarray(a)
if sys.byteorder == 'little':
a = a.astype(a.dtype.newbyteorder('>'))
else:
a = a.astype(a.dtype.newbyteorder('<'))
return a
def _generate_non_native_data(self, n, m):
data = randn(n, m)
data = self._neg_byteorder(data)
assert_(not data.dtype.isnative)
return data
def _generate_int_data(self, n, m):
return (10 * rand(n, m)).astype(np.int64)
def _generate_int32_data(self, n, m):
return (10 * rand(n, m)).astype(np.int32)
# Now the real test cases
@pytest.mark.parametrize("dtype", '?bhilqpBHILQPefdgFDGO')
def test_ones_pathological(self, dtype):
# for preservation of behavior described in
# gh-12519; amin > amax behavior may still change
# in the future
arr = np.ones(10, dtype=dtype)
expected = np.zeros(10, dtype=dtype)
actual = np.clip(arr, 1, 0)
if dtype == 'O':
assert actual.tolist() == expected.tolist()
else:
assert_equal(actual, expected)
def test_simple_double(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.1
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_int(self):
# Test native int input with scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(int)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_array_double(self):
# Test native double input with array min/max.
a = self._generate_data(self.nr, self.nc)
m = np.zeros(a.shape)
M = m + 0.5
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_nonnative(self):
# Test non native double input with scalar min/max.
# Test native double input with non native double scalar min/max.
a = self._generate_non_native_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
# Test native double input with non native double scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = self._neg_byteorder(0.6)
assert_(not M.dtype.isnative)
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
def test_simple_complex(self):
# Test native complex input with native double scalar min/max.
# Test native input with complex double scalar min/max.
a = 3 * self._generate_data_complex(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
# Test native input with complex double scalar min/max.
a = 3 * self._generate_data(self.nr, self.nc)
m = -0.5 + 1.j
M = 1. + 2.j
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_clip_complex(self):
# Address Issue gh-5354 for clipping complex arrays
# Test native complex input without explicit min/max
# ie, either min=None or max=None
a = np.ones(10, dtype=complex)
m = a.min()
M = a.max()
am = self.fastclip(a, m, None)
aM = self.fastclip(a, None, M)
assert_array_strict_equal(am, a)
assert_array_strict_equal(aM, a)
def test_clip_non_contig(self):
# Test clip for non contiguous native input and native scalar min/max.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = self.fastclip(a, -1.6, 1.7)
act = self.clip(a, -1.6, 1.7)
assert_array_strict_equal(ac, act)
def test_simple_out(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = np.zeros(a.shape)
act = np.zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@pytest.mark.parametrize("casting", [None, "unsafe"])
def test_simple_int32_inout(self, casting):
# Test native int32 input with double min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
if casting is None:
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac, casting=casting)
else:
# explicitly passing "unsafe" will silence warning
self.fastclip(a, m, M, ac, casting=casting)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_out(self):
# Test native int32 input with int32 scalar min/max and int64 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.int32(-1)
M = np.int32(1)
ac = np.zeros(a.shape, dtype=np.int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_inout(self):
# Test native int32 input with double array min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.zeros(a.shape, np.float64)
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int32_out(self):
# Test native double input with scalar min/max and int out.
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_inplace_01(self):
# Test native double input with array min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = np.zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_simple_inplace_02(self):
# Test native double input with scalar min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(ac, m, M, ac)
assert_array_strict_equal(a, ac)
def test_noncontig_inplace(self):
# Test non contiguous double input with double scalar min/max in-place.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(ac, m, M, ac)
assert_array_equal(a, ac)
def test_type_cast_01(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_02(self):
# Test native int32 input with int32 scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(np.int32)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_03(self):
# Test native int32 input with float64 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = -2
M = 4
ac = self.fastclip(a, np.float64(m), np.float64(M))
act = self.clip(a, np.float64(m), np.float64(M))
assert_array_strict_equal(ac, act)
def test_type_cast_04(self):
# Test native int32 input with float32 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float32(-2)
M = np.float32(4)
act = self.fastclip(a, m, M)
ac = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_05(self):
# Test native int32 with double arrays min/max.
a = self._generate_int_data(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m * np.zeros(a.shape), M)
act = self.clip(a, m * np.zeros(a.shape), M)
assert_array_strict_equal(ac, act)
def test_type_cast_06(self):
# Test native with NON native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.5
m_s = self._neg_byteorder(m)
M = 1.
act = self.clip(a, m_s, M)
ac = self.fastclip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_07(self):
# Test NON native with native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * np.ones(a.shape)
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
act = a_s.clip(m, M)
ac = self.fastclip(a_s, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_08(self):
# Test NON native with native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
ac = self.fastclip(a_s, m, M)
act = a_s.clip(m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_09(self):
# Test native with NON native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * np.ones(a.shape)
M = 1.
m_s = self._neg_byteorder(m)
assert_(not m_s.dtype.isnative)
ac = self.fastclip(a, m_s, M)
act = self.clip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_10(self):
# Test native int32 with float min/max and float out for output argument.
a = self._generate_int_data(self.nr, self.nc)
b = np.zeros(a.shape, dtype=np.float32)
m = np.float32(-0.5)
M = np.float32(1)
act = self.clip(a, m, M, out=b)
ac = self.fastclip(a, m, M, out=b)
assert_array_strict_equal(ac, act)
def test_type_cast_11(self):
# Test non native with native scalar, min/max, out non native
a = self._generate_non_native_data(self.nr, self.nc)
b = a.copy()
b = b.astype(b.dtype.newbyteorder('>'))
bt = b.copy()
m = -0.5
M = 1.
self.fastclip(a, m, M, out=b)
self.clip(a, m, M, out=bt)
assert_array_strict_equal(b, bt)
def test_type_cast_12(self):
# Test native int32 input and min/max and float out
a = self._generate_int_data(self.nr, self.nc)
b = np.zeros(a.shape, dtype=np.float32)
m = np.int32(0)
M = np.int32(1)
act = self.clip(a, m, M, out=b)
ac = self.fastclip(a, m, M, out=b)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple(self):
# Test native double input with scalar min/max
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = np.zeros(a.shape)
act = np.zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple2(self):
# Test native int32 input with double min/max and int32 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple_int32(self):
# Test native int32 input with int32 scalar min/max and int64 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.int32(-1)
M = np.int32(1)
ac = np.zeros(a.shape, dtype=np.int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_int32(self):
# Test native int32 input with double array min/max and int32 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.zeros(a.shape, np.float64)
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_outint32(self):
# Test native double input with scalar min/max and int out
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_transposed(self):
# Test that the out argument works when transposed
a = np.arange(16).reshape(4, 4)
out = np.empty_like(a).T
a.clip(4, 10, out=out)
expected = self.clip(a, 4, 10)
assert_array_equal(out, expected)
def test_clip_with_out_memory_overlap(self):
# Test that the out argument works when it has memory overlap
a = np.arange(16).reshape(4, 4)
ac = a.copy()
a[:-1].clip(4, 10, out=a[1:])
expected = self.clip(ac[:-1], 4, 10)
assert_array_equal(a[1:], expected)
def test_clip_inplace_array(self):
# Test native double input with array min/max
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = np.zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_inplace_simple(self):
# Test native double input with scalar min/max
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_func_takes_out(self):
# Ensure that the clip() function takes an out=argument.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
a2 = np.clip(a, m, M, out=a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a2, ac)
assert_(a2 is a)
def test_clip_nan(self):
d = np.arange(7.)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(min=np.nan), d)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(max=np.nan), d)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(min=np.nan, max=np.nan), d)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(min=-2, max=np.nan), d)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(min=np.nan, max=10), d)
def test_object_clip(self):
a = np.arange(10, dtype=object)
actual = np.clip(a, 1, 5)
expected = np.array([1, 1, 2, 3, 4, 5, 5, 5, 5, 5])
assert actual.tolist() == expected.tolist()
def test_clip_all_none(self):
a = np.arange(10, dtype=object)
with assert_raises_regex(ValueError, 'max or min'):
np.clip(a, None, None)
def test_clip_invalid_casting(self):
a = np.arange(10, dtype=object)
with assert_raises_regex(ValueError,
'casting must be one of'):
self.fastclip(a, 1, 8, casting="garbage")
@pytest.mark.parametrize("amin, amax", [
# two scalars
(1, 0),
# mix scalar and array
(1, np.zeros(10)),
# two arrays
(np.ones(10), np.zeros(10)),
])
def test_clip_value_min_max_flip(self, amin, amax):
a = np.arange(10, dtype=np.int64)
# requirement from ufunc_docstrings.py
expected = np.minimum(np.maximum(a, amin), amax)
actual = np.clip(a, amin, amax)
assert_equal(actual, expected)
@pytest.mark.parametrize("arr, amin, amax, exp", [
# for a bug in npy_ObjectClip, based on a
# case produced by hypothesis
(np.zeros(10, dtype=np.int64),
0,
-2**64+1,
np.full(10, -2**64+1, dtype=object)),
# for bugs in NPY_TIMEDELTA_MAX, based on a case
# produced by hypothesis
(np.zeros(10, dtype='m8') - 1,
0,
0,
np.zeros(10, dtype='m8')),
])
def test_clip_problem_cases(self, arr, amin, amax, exp):
actual = np.clip(arr, amin, amax)
assert_equal(actual, exp)
@pytest.mark.xfail(reason="no scalar nan propagation yet",
raises=AssertionError,
strict=True)
@pytest.mark.parametrize("arr, amin, amax", [
# problematic scalar nan case from hypothesis
(np.zeros(10, dtype=np.int64),
np.array(np.nan),
np.zeros(10, dtype=np.int32)),
])
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_clip_scalar_nan_propagation(self, arr, amin, amax):
# enforcement of scalar nan propagation for comparisons
# called through clip()
expected = np.minimum(np.maximum(arr, amin), amax)
actual = np.clip(arr, amin, amax)
assert_equal(actual, expected)
@pytest.mark.xfail(reason="propagation doesn't match spec")
@pytest.mark.parametrize("arr, amin, amax", [
(np.array([1] * 10, dtype='m8'),
np.timedelta64('NaT'),
np.zeros(10, dtype=np.int32)),
])
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_NaT_propagation(self, arr, amin, amax):
# NOTE: the expected function spec doesn't
# propagate NaT, but clip() now does
expected = np.minimum(np.maximum(arr, amin), amax)
actual = np.clip(arr, amin, amax)
assert_equal(actual, expected)
@given(
data=st.data(),
arr=hynp.arrays(
dtype=hynp.integer_dtypes() | hynp.floating_dtypes(),
shape=hynp.array_shapes()
)
)
def test_clip_property(self, data, arr):
"""A property-based test using Hypothesis.
This aims for maximum generality: it could in principle generate *any*
valid inputs to np.clip, and in practice generates much more varied
inputs than human testers come up with.
Because many of the inputs have tricky dependencies - compatible dtypes
and mutually-broadcastable shapes - we use `st.data()` strategy draw
values *inside* the test function, from strategies we construct based
on previous values. An alternative would be to define a custom strategy
with `@st.composite`, but until we have duplicated code inline is fine.
That accounts for most of the function; the actual test is just three
lines to calculate and compare actual vs expected results!
"""
numeric_dtypes = hynp.integer_dtypes() | hynp.floating_dtypes()
# Generate shapes for the bounds which can be broadcast with each other
# and with the base shape. Below, we might decide to use scalar bounds,
# but it's clearer to generate these shapes unconditionally in advance.
in_shapes, result_shape = data.draw(
hynp.mutually_broadcastable_shapes(
num_shapes=2, base_shape=arr.shape
)
)
# Scalar `nan` is deprecated due to the differing behaviour it shows.
s = numeric_dtypes.flatmap(
lambda x: hynp.from_dtype(x, allow_nan=False))
amin = data.draw(s | hynp.arrays(dtype=numeric_dtypes,
shape=in_shapes[0], elements={"allow_nan": False}))
amax = data.draw(s | hynp.arrays(dtype=numeric_dtypes,
shape=in_shapes[1], elements={"allow_nan": False}))
# Then calculate our result and expected result and check that they're
# equal! See gh-12519 and gh-19457 for discussion deciding on this
# property and the result_type argument.
result = np.clip(arr, amin, amax)
t = np.result_type(arr, amin, amax)
expected = np.minimum(amax, np.maximum(arr, amin, dtype=t), dtype=t)
assert result.dtype == t
assert_array_equal(result, expected)
class TestAllclose:
rtol = 1e-5
atol = 1e-8
def setup(self):
self.olderr = np.seterr(invalid='ignore')
def teardown(self):
np.seterr(**self.olderr)
def tst_allclose(self, x, y):
assert_(np.allclose(x, y), "%s and %s not close" % (x, y))
def tst_not_allclose(self, x, y):
assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y))
def test_ip_allclose(self):
# Parametric test factory.
arr = np.array([100, 1000])
aran = np.arange(125).reshape((5, 5, 5))
atol = self.atol
rtol = self.rtol
data = [([1, 0], [1, 0]),
([atol], [0]),
([1], [1+rtol+atol]),
(arr, arr + arr*rtol),
(arr, arr + arr*rtol + atol*2),
(aran, aran + aran*rtol),
(np.inf, np.inf),
(np.inf, [np.inf])]
for (x, y) in data:
self.tst_allclose(x, y)
def test_ip_not_allclose(self):
# Parametric test factory.
aran = np.arange(125).reshape((5, 5, 5))
atol = self.atol
rtol = self.rtol
data = [([np.inf, 0], [1, np.inf]),
([np.inf, 0], [1, 0]),
([np.inf, np.inf], [1, np.inf]),
([np.inf, np.inf], [1, 0]),
([-np.inf, 0], [np.inf, 0]),
([np.nan, 0], [np.nan, 0]),
([atol*2], [0]),
([1], [1+rtol+atol*2]),
(aran, aran + aran*atol + atol*2),
(np.array([np.inf, 1]), np.array([0, np.inf]))]
for (x, y) in data:
self.tst_not_allclose(x, y)
def test_no_parameter_modification(self):
x = np.array([np.inf, 1])
y = np.array([0, np.inf])
np.allclose(x, y)
assert_array_equal(x, np.array([np.inf, 1]))
assert_array_equal(y, np.array([0, np.inf]))
def test_min_int(self):
# Could make problems because of abs(min_int) == min_int
min_int = np.iinfo(np.int_).min
a = np.array([min_int], dtype=np.int_)
assert_(np.allclose(a, a))
def test_equalnan(self):
x = np.array([1.0, np.nan])
assert_(np.allclose(x, x, equal_nan=True))
def test_return_class_is_ndarray(self):
# Issue gh-6475
# Check that allclose does not preserve subtypes
class Foo(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
a = Foo([1])
assert_(type(np.allclose(a, a)) is bool)
class TestIsclose:
rtol = 1e-5
atol = 1e-8
def setup(self):
atol = self.atol
rtol = self.rtol
arr = np.array([100, 1000])
aran = np.arange(125).reshape((5, 5, 5))
self.all_close_tests = [
([1, 0], [1, 0]),
([atol], [0]),
([1], [1 + rtol + atol]),
(arr, arr + arr*rtol),
(arr, arr + arr*rtol + atol),
(aran, aran + aran*rtol),
(np.inf, np.inf),
(np.inf, [np.inf]),
([np.inf, -np.inf], [np.inf, -np.inf]),
]
self.none_close_tests = [
([np.inf, 0], [1, np.inf]),
([np.inf, -np.inf], [1, 0]),
([np.inf, np.inf], [1, -np.inf]),
([np.inf, np.inf], [1, 0]),
([np.nan, 0], [np.nan, -np.inf]),
([atol*2], [0]),
([1], [1 + rtol + atol*2]),
(aran, aran + rtol*1.1*aran + atol*1.1),
(np.array([np.inf, 1]), np.array([0, np.inf])),
]
self.some_close_tests = [
([np.inf, 0], [np.inf, atol*2]),
([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]),
(np.arange(3), [0, 1, 2.1]),
(np.nan, [np.nan, np.nan, np.nan]),
([0], [atol, np.inf, -np.inf, np.nan]),
(0, [atol, np.inf, -np.inf, np.nan]),
]
self.some_close_results = [
[True, False],
[True, False, False],
[True, True, False],
[False, False, False],
[True, False, False, False],
[True, False, False, False],
]
def test_ip_isclose(self):
self.setup()
tests = self.some_close_tests
results = self.some_close_results
for (x, y), result in zip(tests, results):
assert_array_equal(np.isclose(x, y), result)
def tst_all_isclose(self, x, y):
assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y))
def tst_none_isclose(self, x, y):
msg = "%s and %s shouldn't be close"
assert_(not np.any(np.isclose(x, y)), msg % (x, y))
def tst_isclose_allclose(self, x, y):
msg = "isclose.all() and allclose aren't same for %s and %s"
msg2 = "isclose and allclose aren't same for %s and %s"
if np.isscalar(x) and np.isscalar(y):
assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y))
else:
assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y))
def test_ip_all_isclose(self):
self.setup()
for (x, y) in self.all_close_tests:
self.tst_all_isclose(x, y)
def test_ip_none_isclose(self):
self.setup()
for (x, y) in self.none_close_tests:
self.tst_none_isclose(x, y)
def test_ip_isclose_allclose(self):
self.setup()
tests = (self.all_close_tests + self.none_close_tests +
self.some_close_tests)
for (x, y) in tests:
self.tst_isclose_allclose(x, y)
def test_equal_nan(self):
assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True])
arr = np.array([1.0, np.nan])
assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True])
def test_masked_arrays(self):
# Make sure to test the output type when arguments are interchanged.
x = np.ma.masked_where([True, True, False], np.arange(3))
assert_(type(x) is type(np.isclose(2, x)))
assert_(type(x) is type(np.isclose(x, 2)))
x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan])
assert_(type(x) is type(np.isclose(np.inf, x)))
assert_(type(x) is type(np.isclose(x, np.inf)))
x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
y = np.isclose(np.nan, x, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
y = np.isclose(x, np.nan, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
y = np.isclose(x, x, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
def test_scalar_return(self):
assert_(np.isscalar(np.isclose(1, 1)))
def test_no_parameter_modification(self):
x = np.array([np.inf, 1])
y = np.array([0, np.inf])
np.isclose(x, y)
assert_array_equal(x, np.array([np.inf, 1]))
assert_array_equal(y, np.array([0, np.inf]))
def test_non_finite_scalar(self):
# GH7014, when two scalars are compared the output should also be a
# scalar
assert_(np.isclose(np.inf, -np.inf) is np.False_)
assert_(np.isclose(0, np.inf) is np.False_)
assert_(type(np.isclose(0, np.inf)) is np.bool_)
def test_timedelta(self):
# Allclose currently works for timedelta64 as long as `atol` is
# an integer or also a timedelta64
a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]")
assert np.isclose(a, a, atol=0, equal_nan=True).all()
assert np.isclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True).all()
assert np.allclose(a, a, atol=0, equal_nan=True)
assert np.allclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True)
class TestStdVar:
def setup(self):
self.A = np.array([1, -1, 1, -1])
self.real_var = 1
def test_basic(self):
assert_almost_equal(np.var(self.A), self.real_var)
assert_almost_equal(np.std(self.A)**2, self.real_var)
def test_scalars(self):
assert_equal(np.var(1), 0)
assert_equal(np.std(1), 0)
def test_ddof1(self):
assert_almost_equal(np.var(self.A, ddof=1),
self.real_var * len(self.A) / (len(self.A) - 1))
assert_almost_equal(np.std(self.A, ddof=1)**2,
self.real_var*len(self.A) / (len(self.A) - 1))
def test_ddof2(self):
assert_almost_equal(np.var(self.A, ddof=2),
self.real_var * len(self.A) / (len(self.A) - 2))
assert_almost_equal(np.std(self.A, ddof=2)**2,
self.real_var * len(self.A) / (len(self.A) - 2))
def test_out_scalar(self):
d = np.arange(10)
out = np.array(0.)
r = np.std(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
r = np.var(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
r = np.mean(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
class TestStdVarComplex:
def test_basic(self):
A = np.array([1, 1.j, -1, -1.j])
real_var = 1
assert_almost_equal(np.var(A), real_var)
assert_almost_equal(np.std(A)**2, real_var)
def test_scalars(self):
assert_equal(np.var(1j), 0)
assert_equal(np.std(1j), 0)
class TestCreationFuncs:
# Test ones, zeros, empty and full.
def setup(self):
dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())}
# void, bytes, str
variable_sized = {tp for tp in dtypes if tp.str.endswith('0')}
self.dtypes = sorted(dtypes - variable_sized |
{np.dtype(tp.str.replace("0", str(i)))
for tp in variable_sized for i in range(1, 10)},
key=lambda dtype: dtype.str)
self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'}
self.ndims = 10
def check_function(self, func, fill_value=None):
par = ((0, 1, 2),
range(self.ndims),
self.orders,
self.dtypes)
fill_kwarg = {}
if fill_value is not None:
fill_kwarg = {'fill_value': fill_value}
for size, ndims, order, dtype in itertools.product(*par):
shape = ndims * [size]
# do not fill void type
if fill_kwarg and dtype.str.startswith('|V'):
continue
arr = func(shape, order=order, dtype=dtype,
**fill_kwarg)
assert_equal(arr.dtype, dtype)
assert_(getattr(arr.flags, self.orders[order]))
if fill_value is not None:
if dtype.str.startswith('|S'):
val = str(fill_value)
else:
val = fill_value
assert_equal(arr, dtype.type(val))
def test_zeros(self):
self.check_function(np.zeros)
def test_ones(self):
self.check_function(np.ones)
def test_empty(self):
self.check_function(np.empty)
def test_full(self):
self.check_function(np.full, 0)
self.check_function(np.full, 1)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_for_reference_leak(self):
# Make sure we have an object for reference
dim = 1
beg = sys.getrefcount(dim)
np.zeros([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.ones([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.empty([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.full([dim]*10, 0)
assert_(sys.getrefcount(dim) == beg)
class TestLikeFuncs:
'''Test ones_like, zeros_like, empty_like and full_like'''
def setup(self):
self.data = [
# Array scalars
(np.array(3.), None),
(np.array(3), 'f8'),
# 1D arrays
(np.arange(6, dtype='f4'), None),
(np.arange(6), 'c16'),
# 2D C-layout arrays
(np.arange(6).reshape(2, 3), None),
(np.arange(6).reshape(3, 2), 'i1'),
# 2D F-layout arrays
(np.arange(6).reshape((2, 3), order='F'), None),
(np.arange(6).reshape((3, 2), order='F'), 'i1'),
# 3D C-layout arrays
(np.arange(24).reshape(2, 3, 4), None),
(np.arange(24).reshape(4, 3, 2), 'f4'),
# 3D F-layout arrays
(np.arange(24).reshape((2, 3, 4), order='F'), None),
(np.arange(24).reshape((4, 3, 2), order='F'), 'f4'),
# 3D non-C/F-layout arrays
(np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None),
(np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'),
]
self.shapes = [(), (5,), (5,6,), (5,6,7,)]
def compare_array_value(self, dz, value, fill_value):
if value is not None:
if fill_value:
try:
z = dz.dtype.type(value)
except OverflowError:
pass
else:
assert_(np.all(dz == z))
else:
assert_(np.all(dz == value))
def check_like_function(self, like_function, value, fill_value=False):
if fill_value:
fill_kwarg = {'fill_value': value}
else:
fill_kwarg = {}
for d, dtype in self.data:
# default (K) order, dtype
dz = like_function(d, dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_equal(np.array(dz.strides)*d.dtype.itemsize,
np.array(d.strides)*dz.dtype.itemsize)
assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous)
assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# C order, default dtype
dz = like_function(d, order='C', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# F order, default dtype
dz = like_function(d, order='F', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# A order
dz = like_function(d, order='A', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
if d.flags.f_contiguous:
assert_(dz.flags.f_contiguous)
else:
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# Test the 'shape' parameter
for s in self.shapes:
for o in 'CFA':
sz = like_function(d, dtype=dtype, shape=s, order=o,
**fill_kwarg)
assert_equal(sz.shape, s)
if dtype is None:
assert_equal(sz.dtype, d.dtype)
else:
assert_equal(sz.dtype, np.dtype(dtype))
if o == 'C' or (o == 'A' and d.flags.c_contiguous):
assert_(sz.flags.c_contiguous)
elif o == 'F' or (o == 'A' and d.flags.f_contiguous):
assert_(sz.flags.f_contiguous)
self.compare_array_value(sz, value, fill_value)
if (d.ndim != len(s)):
assert_equal(np.argsort(like_function(d, dtype=dtype,
shape=s, order='K',
**fill_kwarg).strides),
np.argsort(np.empty(s, dtype=dtype,
order='C').strides))
else:
assert_equal(np.argsort(like_function(d, dtype=dtype,
shape=s, order='K',
**fill_kwarg).strides),
np.argsort(d.strides))
# Test the 'subok' parameter
class MyNDArray(np.ndarray):
pass
a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
b = like_function(a, **fill_kwarg)
assert_(type(b) is MyNDArray)
b = like_function(a, subok=False, **fill_kwarg)
assert_(type(b) is not MyNDArray)
def test_ones_like(self):
self.check_like_function(np.ones_like, 1)
def test_zeros_like(self):
self.check_like_function(np.zeros_like, 0)
def test_empty_like(self):
self.check_like_function(np.empty_like, None)
def test_filled_like(self):
self.check_like_function(np.full_like, 0, True)
self.check_like_function(np.full_like, 1, True)
self.check_like_function(np.full_like, 1000, True)
self.check_like_function(np.full_like, 123.456, True)
self.check_like_function(np.full_like, np.inf, True)
@pytest.mark.parametrize('likefunc', [np.empty_like, np.full_like,
np.zeros_like, np.ones_like])
@pytest.mark.parametrize('dtype', [str, bytes])
def test_dtype_str_bytes(self, likefunc, dtype):
# Regression test for gh-19860
a = np.arange(16).reshape(2, 8)
b = a[:, ::2] # Ensure b is not contiguous.
kwargs = {'fill_value': ''} if likefunc == np.full_like else {}
result = likefunc(b, dtype=dtype, **kwargs)
if dtype == str:
assert result.strides == (16, 4)
else:
# dtype is bytes
assert result.strides == (4, 1)
class TestCorrelate:
def _setup(self, dt):
self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
self.xs = np.arange(1, 20)[::3]
self.y = np.array([-1, -2, -3], dtype=dt)
self.z1 = np.array([-3., -8., -14., -20., -26., -14., -5.], dtype=dt)
self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt)
self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt)
self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt)
self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt)
self.zs = np.array([-3., -14., -30., -48., -66., -84.,
-102., -54., -19.], dtype=dt)
def test_float(self):
self._setup(float)
z = np.correlate(self.x, self.y, 'full')
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.x, self.y[:-1], 'full')
assert_array_almost_equal(z, self.z1_4)
z = np.correlate(self.y, self.x, 'full')
assert_array_almost_equal(z, self.z2)
z = np.correlate(self.x[::-1], self.y, 'full')
assert_array_almost_equal(z, self.z1r)
z = np.correlate(self.y, self.x[::-1], 'full')
assert_array_almost_equal(z, self.z2r)
z = np.correlate(self.xs, self.y, 'full')
assert_array_almost_equal(z, self.zs)
def test_object(self):
self._setup(Decimal)
z = np.correlate(self.x, self.y, 'full')
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.y, self.x, 'full')
assert_array_almost_equal(z, self.z2)
def test_no_overwrite(self):
d = np.ones(100)
k = np.ones(3)
np.correlate(d, k)
assert_array_equal(d, np.ones(100))
assert_array_equal(k, np.ones(3))
def test_complex(self):
x = np.array([1, 2, 3, 4+1j], dtype=complex)
y = np.array([-1, -2j, 3+1j], dtype=complex)
r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex)
r_z = r_z[::-1].conjugate()
z = np.correlate(y, x, mode='full')
assert_array_almost_equal(z, r_z)
def test_zero_size(self):
with pytest.raises(ValueError):
np.correlate(np.array([]), np.ones(1000), mode='full')
with pytest.raises(ValueError):
np.correlate(np.ones(1000), np.array([]), mode='full')
def test_mode(self):
d = np.ones(100)
k = np.ones(3)
default_mode = np.correlate(d, k, mode='valid')
with assert_warns(DeprecationWarning):
valid_mode = np.correlate(d, k, mode='v')
assert_array_equal(valid_mode, default_mode)
# integer mode
with assert_raises(ValueError):
np.correlate(d, k, mode=-1)
assert_array_equal(np.correlate(d, k, mode=0), valid_mode)
# illegal arguments
with assert_raises(TypeError):
np.correlate(d, k, mode=None)
class TestConvolve:
def test_object(self):
d = [1.] * 100
k = [1.] * 3
assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3))
def test_no_overwrite(self):
d = np.ones(100)
k = np.ones(3)
np.convolve(d, k)
assert_array_equal(d, np.ones(100))
assert_array_equal(k, np.ones(3))
def test_mode(self):
d = np.ones(100)
k = np.ones(3)
default_mode = np.convolve(d, k, mode='full')
with assert_warns(DeprecationWarning):
full_mode = np.convolve(d, k, mode='f')
assert_array_equal(full_mode, default_mode)
# integer mode
with assert_raises(ValueError):
np.convolve(d, k, mode=-1)
assert_array_equal(np.convolve(d, k, mode=2), full_mode)
# illegal arguments
with assert_raises(TypeError):
np.convolve(d, k, mode=None)
class TestArgwhere:
@pytest.mark.parametrize('nd', [0, 1, 2])
def test_nd(self, nd):
# get an nd array with multiple elements in every dimension
x = np.empty((2,)*nd, bool)
# none
x[...] = False
assert_equal(np.argwhere(x).shape, (0, nd))
# only one
x[...] = False
x.flat[0] = True
assert_equal(np.argwhere(x).shape, (1, nd))
# all but one
x[...] = True
x.flat[0] = False
assert_equal(np.argwhere(x).shape, (x.size - 1, nd))
# all
x[...] = True
assert_equal(np.argwhere(x).shape, (x.size, nd))
def test_2D(self):
x = np.arange(6).reshape((2, 3))
assert_array_equal(np.argwhere(x > 1),
[[0, 2],
[1, 0],
[1, 1],
[1, 2]])
def test_list(self):
assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])
class TestStringFunction:
def test_set_string_function(self):
a = np.array([1])
np.set_string_function(lambda x: "FOO", repr=True)
assert_equal(repr(a), "FOO")
np.set_string_function(None, repr=True)
assert_equal(repr(a), "array([1])")
np.set_string_function(lambda x: "FOO", repr=False)
assert_equal(str(a), "FOO")
np.set_string_function(None, repr=False)
assert_equal(str(a), "[1]")
class TestRoll:
def test_roll1d(self):
x = np.arange(10)
xr = np.roll(x, 2)
assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]))
def test_roll2d(self):
x2 = np.reshape(np.arange(10), (2, 5))
x2r = np.roll(x2, 1)
assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]]))
x2r = np.roll(x2, 1, axis=0)
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, 1, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
# Roll multiple axes at once.
x2r = np.roll(x2, 1, axis=(0, 1))
assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
x2r = np.roll(x2, (1, 0), axis=(0, 1))
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, (-1, 0), axis=(0, 1))
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, (0, 1), axis=(0, 1))
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
x2r = np.roll(x2, (0, -1), axis=(0, 1))
assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]))
x2r = np.roll(x2, (1, 1), axis=(0, 1))
assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
x2r = np.roll(x2, (-1, -1), axis=(0, 1))
assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]]))
# Roll the same axis multiple times.
x2r = np.roll(x2, 1, axis=(0, 0))
assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]))
x2r = np.roll(x2, 1, axis=(1, 1))
assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]]))
# Roll more than one turn in either direction.
x2r = np.roll(x2, 6, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
x2r = np.roll(x2, -4, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
def test_roll_empty(self):
x = np.array([])
assert_equal(np.roll(x, 1), np.array([]))
class TestRollaxis:
# expected shape indexed by (axis, start) for array of
# shape (1, 2, 3, 4)
tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4),
(0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4),
(0, 4): (2, 3, 4, 1),
(1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4),
(1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4),
(1, 4): (1, 3, 4, 2),
(2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4),
(2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4),
(2, 4): (1, 2, 4, 3),
(3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3),
(3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4),
(3, 4): (1, 2, 3, 4)}
def test_exceptions(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4)
assert_raises(np.AxisError, np.rollaxis, a, -5, 0)
assert_raises(np.AxisError, np.rollaxis, a, 0, -5)
assert_raises(np.AxisError, np.rollaxis, a, 4, 0)
assert_raises(np.AxisError, np.rollaxis, a, 0, 5)
def test_results(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
aind = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
for (i, j) in self.tgtshape:
# positive axis, positive start
res = np.rollaxis(a, axis=i, start=j)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(i, j)], str((i,j)))
assert_(not res.flags['OWNDATA'])
# negative axis, positive start
ip = i + 1
res = np.rollaxis(a, axis=-ip, start=j)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(4 - ip, j)])
assert_(not res.flags['OWNDATA'])
# positive axis, negative start
jp = j + 1 if j < 4 else j
res = np.rollaxis(a, axis=i, start=-jp)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(i, 4 - jp)])
assert_(not res.flags['OWNDATA'])
# negative axis, negative start
ip = i + 1
jp = j + 1 if j < 4 else j
res = np.rollaxis(a, axis=-ip, start=-jp)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)])
assert_(not res.flags['OWNDATA'])
class TestMoveaxis:
def test_move_to_end(self):
x = np.random.randn(5, 6, 7)
for source, expected in [(0, (6, 7, 5)),
(1, (5, 7, 6)),
(2, (5, 6, 7)),
(-1, (5, 6, 7))]:
actual = np.moveaxis(x, source, -1).shape
assert_(actual, expected)
def test_move_new_position(self):
x = np.random.randn(1, 2, 3, 4)
for source, destination, expected in [
(0, 1, (2, 1, 3, 4)),
(1, 2, (1, 3, 2, 4)),
(1, -1, (1, 3, 4, 2)),
]:
actual = np.moveaxis(x, source, destination).shape
assert_(actual, expected)
def test_preserve_order(self):
x = np.zeros((1, 2, 3, 4))
for source, destination in [
(0, 0),
(3, -1),
(-1, 3),
([0, -1], [0, -1]),
([2, 0], [2, 0]),
(range(4), range(4)),
]:
actual = np.moveaxis(x, source, destination).shape
assert_(actual, (1, 2, 3, 4))
def test_move_multiples(self):
x = np.zeros((0, 1, 2, 3))
for source, destination, expected in [
([0, 1], [2, 3], (2, 3, 0, 1)),
([2, 3], [0, 1], (2, 3, 0, 1)),
([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)),
([3, 0], [1, 0], (0, 3, 1, 2)),
([0, 3], [0, 1], (0, 3, 1, 2)),
]:
actual = np.moveaxis(x, source, destination).shape
assert_(actual, expected)
def test_errors(self):
x = np.random.randn(1, 2, 3)
assert_raises_regex(np.AxisError, 'source.*out of bounds',
np.moveaxis, x, 3, 0)
assert_raises_regex(np.AxisError, 'source.*out of bounds',
np.moveaxis, x, -4, 0)
assert_raises_regex(np.AxisError, 'destination.*out of bounds',
np.moveaxis, x, 0, 5)
assert_raises_regex(ValueError, 'repeated axis in `source`',
np.moveaxis, x, [0, 0], [0, 1])
assert_raises_regex(ValueError, 'repeated axis in `destination`',
np.moveaxis, x, [0, 1], [1, 1])
assert_raises_regex(ValueError, 'must have the same number',
np.moveaxis, x, 0, [0, 1])
assert_raises_regex(ValueError, 'must have the same number',
np.moveaxis, x, [0, 1], [0])
def test_array_likes(self):
x = np.ma.zeros((1, 2, 3))
result = np.moveaxis(x, 0, 0)
assert_(x.shape, result.shape)
assert_(isinstance(result, np.ma.MaskedArray))
x = [1, 2, 3]
result = np.moveaxis(x, 0, 0)
assert_(x, list(result))
assert_(isinstance(result, np.ndarray))
class TestCross:
def test_2x2(self):
u = [1, 2]
v = [3, 4]
z = -2
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_2x3(self):
u = [1, 2]
v = [3, 4, 5]
z = np.array([10, -5, -2])
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_3x3(self):
u = [1, 2, 3]
v = [4, 5, 6]
z = np.array([-3, 6, -3])
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_broadcasting(self):
# Ticket #2624 (Trac #2032)
u = np.tile([1, 2], (11, 1))
v = np.tile([3, 4], (11, 1))
z = -2
assert_equal(np.cross(u, v), z)
assert_equal(np.cross(v, u), -z)
assert_equal(np.cross(u, u), 0)
u = np.tile([1, 2], (11, 1)).T
v = np.tile([3, 4, 5], (11, 1))
z = np.tile([10, -5, -2], (11, 1))
assert_equal(np.cross(u, v, axisa=0), z)
assert_equal(np.cross(v, u.T), -z)
assert_equal(np.cross(v, v), 0)
u = np.tile([1, 2, 3], (11, 1)).T
v = np.tile([3, 4], (11, 1)).T
z = np.tile([-12, 9, -2], (11, 1))
assert_equal(np.cross(u, v, axisa=0, axisb=0), z)
assert_equal(np.cross(v.T, u.T), -z)
assert_equal(np.cross(u.T, u.T), 0)
u = np.tile([1, 2, 3], (5, 1))
v = np.tile([4, 5, 6], (5, 1)).T
z = np.tile([-3, 6, -3], (5, 1))
assert_equal(np.cross(u, v, axisb=0), z)
assert_equal(np.cross(v.T, u), -z)
assert_equal(np.cross(u, u), 0)
def test_broadcasting_shapes(self):
u = np.ones((2, 1, 3))
v = np.ones((5, 3))
assert_equal(np.cross(u, v).shape, (2, 5, 3))
u = np.ones((10, 3, 5))
v = np.ones((2, 5))
assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3))
assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=2)
assert_raises(np.AxisError, np.cross, u, v, axisa=3, axisb=0)
u = np.ones((10, 3, 5, 7))
v = np.ones((5, 7, 2))
assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7))
assert_raises(np.AxisError, np.cross, u, v, axisa=-5, axisb=2)
assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=-4)
# gh-5885
u = np.ones((3, 4, 2))
for axisc in range(-2, 2):
assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4))
def test_outer_out_param():
arr1 = np.ones((5,))
arr2 = np.ones((2,))
arr3 = np.linspace(-2, 2, 5)
out1 = np.ndarray(shape=(5,5))
out2 = np.ndarray(shape=(2, 5))
res1 = np.outer(arr1, arr3, out1)
assert_equal(res1, out1)
assert_equal(np.outer(arr2, arr3, out2), out2)
class TestIndices:
def test_simple(self):
[x, y] = np.indices((4, 3))
assert_array_equal(x, np.array([[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[3, 3, 3]]))
assert_array_equal(y, np.array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2]]))
def test_single_input(self):
[x] = np.indices((4,))
assert_array_equal(x, np.array([0, 1, 2, 3]))
[x] = np.indices((4,), sparse=True)
assert_array_equal(x, np.array([0, 1, 2, 3]))
def test_scalar_input(self):
assert_array_equal([], np.indices(()))
assert_array_equal([], np.indices((), sparse=True))
assert_array_equal([[]], np.indices((0,)))
assert_array_equal([[]], np.indices((0,), sparse=True))
def test_sparse(self):
[x, y] = np.indices((4,3), sparse=True)
assert_array_equal(x, np.array([[0], [1], [2], [3]]))
assert_array_equal(y, np.array([[0, 1, 2]]))
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("dims", [(), (0,), (4, 3)])
def test_return_type(self, dtype, dims):
inds = np.indices(dims, dtype=dtype)
assert_(inds.dtype == dtype)
for arr in np.indices(dims, dtype=dtype, sparse=True):
assert_(arr.dtype == dtype)
class TestRequire:
flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS',
'F', 'F_CONTIGUOUS', 'FORTRAN',
'A', 'ALIGNED',
'W', 'WRITEABLE',
'O', 'OWNDATA']
def generate_all_false(self, dtype):
arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)])
arr.setflags(write=False)
a = arr['a']
assert_(not a.flags['C'])
assert_(not a.flags['F'])
assert_(not a.flags['O'])
assert_(not a.flags['W'])
assert_(not a.flags['A'])
return a
def set_and_check_flag(self, flag, dtype, arr):
if dtype is None:
dtype = arr.dtype
b = np.require(arr, dtype, [flag])
assert_(b.flags[flag])
assert_(b.dtype == dtype)
# a further call to np.require ought to return the same array
# unless OWNDATA is specified.
c = np.require(b, None, [flag])
if flag[0] != 'O':
assert_(c is b)
else:
assert_(c.flags[flag])
def test_require_each(self):
id = ['f8', 'i4']
fd = [None, 'f8', 'c16']
for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names):
a = self.generate_all_false(idtype)
self.set_and_check_flag(flag, fdtype, a)
def test_unknown_requirement(self):
a = self.generate_all_false('f8')
assert_raises(KeyError, np.require, a, None, 'Q')
def test_non_array_input(self):
a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O'])
assert_(a.flags['O'])
assert_(a.flags['C'])
assert_(a.flags['A'])
assert_(a.dtype == 'i4')
assert_equal(a, [1, 2, 3, 4])
def test_C_and_F_simul(self):
a = self.generate_all_false('f8')
assert_raises(ValueError, np.require, a, None, ['C', 'F'])
def test_ensure_array(self):
class ArraySubclass(np.ndarray):
pass
a = ArraySubclass((2, 2))
b = np.require(a, None, ['E'])
assert_(type(b) is np.ndarray)
def test_preserve_subtype(self):
class ArraySubclass(np.ndarray):
pass
for flag in self.flag_names:
a = ArraySubclass((2, 2))
self.set_and_check_flag(flag, None, a)
class TestBroadcast:
def test_broadcast_in_args(self):
# gh-5881
arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)),
np.empty((5, 1, 7))]
mits = [np.broadcast(*arrs),
np.broadcast(np.broadcast(*arrs[:0]), np.broadcast(*arrs[0:])),
np.broadcast(np.broadcast(*arrs[:1]), np.broadcast(*arrs[1:])),
np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])),
np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])]
for mit in mits:
assert_equal(mit.shape, (5, 6, 7))
assert_equal(mit.ndim, 3)
assert_equal(mit.nd, 3)
assert_equal(mit.numiter, 4)
for a, ia in zip(arrs, mit.iters):
assert_(a is ia.base)
def test_broadcast_single_arg(self):
# gh-6899
arrs = [np.empty((5, 6, 7))]
mit = np.broadcast(*arrs)
assert_equal(mit.shape, (5, 6, 7))
assert_equal(mit.ndim, 3)
assert_equal(mit.nd, 3)
assert_equal(mit.numiter, 1)
assert_(arrs[0] is mit.iters[0].base)
def test_number_of_arguments(self):
arr = np.empty((5,))
for j in range(35):
arrs = [arr] * j
if j > 32:
assert_raises(ValueError, np.broadcast, *arrs)
else:
mit = np.broadcast(*arrs)
assert_equal(mit.numiter, j)
def test_broadcast_error_kwargs(self):
#gh-13455
arrs = [np.empty((5, 6, 7))]
mit = np.broadcast(*arrs)
mit2 = np.broadcast(*arrs, **{})
assert_equal(mit.shape, mit2.shape)
assert_equal(mit.ndim, mit2.ndim)
assert_equal(mit.nd, mit2.nd)
assert_equal(mit.numiter, mit2.numiter)
assert_(mit.iters[0].base is mit2.iters[0].base)
assert_raises(ValueError, np.broadcast, 1, **{'x': 1})
class TestKeepdims:
class sub_array(np.ndarray):
def sum(self, axis=None, dtype=None, out=None):
return np.ndarray.sum(self, axis, dtype, out, keepdims=True)
def test_raise(self):
sub_class = self.sub_array
x = np.arange(30).view(sub_class)
assert_raises(TypeError, np.sum, x, keepdims=True)
class TestTensordot:
def test_zero_dimension(self):
# Test resolution to issue #5663
a = np.ndarray((3,0))
b = np.ndarray((0,4))
td = np.tensordot(a, b, (1, 0))
assert_array_equal(td, np.dot(a, b))
assert_array_equal(td, np.einsum('ij,jk', a, b))
def test_zero_dimensional(self):
# gh-12130
arr_0d = np.array(1)
ret = np.tensordot(arr_0d, arr_0d, ([], [])) # contracting no axes is well defined
assert_array_equal(ret, arr_0d)
| simongibbons/numpy | numpy/core/tests/test_numeric.py | Python | bsd-3-clause | 134,624 |
###################################################
# header_scenes.py
# This file contains declarations for scenes
# DO NOT EDIT THIS FILE!
###################################################
scene_name_pos = 0
passages_pos = 8
#flags
sf_indoors = 0x00000001 #The scene shouldn't have a skybox and lighting by sun.
sf_force_skybox = 0x00000002 #Force adding a skybox even if indoors flag is set.
sf_generate = 0x00000100 #Generate terrain by terran-generator
sf_randomize = 0x00000200 #Randomize terrain generator key
sf_auto_entry_points = 0x00000400 #Automatically create entry points
sf_no_horses = 0x00000800 #Horses are not avaible
sf_muddy_water = 0x00001000 #Changes the shader of the river mesh
| Sw4T/Warband-Development | mb_warband_module_system_1166/Module_system 1.166/headers/header_scenes.py | Python | mit | 768 |
"""OtpProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
#from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
url(r'^register/$', views.otp_register, name='register'),
url(r'^login/$', views.otp_login, name='login'),
url(r'^verify/$', views.otp_verify, name='verify'),
url(r'^status/$', views.otp_status, name='status'),
url(r'^logout/$', views.otp_logout, name='logout'),
url(r'^token/$', views.otp_token, name='token'),
]
| NandeeshHD/Django_OTP_Twilio | OtpProject/OtpApp/urls.py | Python | gpl-2.0 | 1,080 |
import unittest
import os
from test.aiml_tests.client import TestClient
from programy.config.brain import BrainFileConfiguration
class BasicTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_configuration(self, arguments):
super(BasicTestClient, self).load_configuration(arguments)
self.configuration.brain_configuration._aiml_files = BrainFileConfiguration(os.path.dirname(__file__), ".aiml", False)
class UnderlineAIMLTests(unittest.TestCase):
def setUp(cls):
UnderlineAIMLTests.test_client = BasicTestClient()
def test_underline_first(self):
response = UnderlineAIMLTests.test_client.bot.ask_question("test", "SAY HEY")
self.assertIsNotNone(response)
self.assertEqual(response, 'STAR IS SAY')
def test_underline_first_multi_words(self):
response = UnderlineAIMLTests.test_client.bot.ask_question("test", "THE MAN SAYS HEY")
self.assertIsNotNone(response)
self.assertEqual(response, 'STAR IS THE MAN SAYS')
def test_underline_last(self):
response = UnderlineAIMLTests.test_client.bot.ask_question("test", "HELLO KEIFFBOT")
self.assertIsNotNone(response)
self.assertEqual(response, 'HI KEIFFBOT')
def test_underline_last_multi_words(self):
response = UnderlineAIMLTests.test_client.bot.ask_question("test", "HELLO KEIFFBOT MATE")
self.assertIsNotNone(response)
self.assertEqual(response, 'HI KEIFFBOT MATE')
def test_multi_underline(self):
response = UnderlineAIMLTests.test_client.bot.ask_question("test", "WELL HI THERE")
self.assertIsNotNone(response)
self.assertEqual(response, 'YOU SAID WELL AND THERE')
def test_multi_underline_mulit_words(self):
response = UnderlineAIMLTests.test_client.bot.ask_question("test", "WELL THEN HI THERE MATE")
self.assertIsNotNone(response)
self.assertEqual(response, 'YOU SAID WELL THEN AND THERE MATE')
def test_underline_middle(self):
response = UnderlineAIMLTests.test_client.bot.ask_question("test", "GOODBYE KEIFF SEEYA")
self.assertIsNotNone(response)
self.assertEqual(response, 'LATER KEIFF')
def test_underline_middle_mulit_words(self):
response = UnderlineAIMLTests.test_client.bot.ask_question("test", "GOODBYE KEIFF MATE SEEYA")
self.assertIsNotNone(response)
self.assertEqual(response, 'LATER KEIFF MATE') | JustArchi/program-y | src/test/aiml_tests/underline_tests/test_underline_aiml.py | Python | mit | 2,469 |
#!/usr/bin/python
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
In-tree paste-based test server.
This uses the *Python Paste* WSGI server. For more info, see:
http://pythonpaste.org/
Unfortunately, SSL support is broken under Python 2.6 with paste 1.7.2, see:
http://trac.pythonpaste.org/pythonpaste/ticket/314
"""
from os import path, getcwd
import optparse
from paste import httpserver
import paste.gzipper
from paste.urlmap import URLMap
from ipalib import api
class KRBCheater(object):
def __init__(self, app):
self.app = app
self.url = app.url
self.ccname = api.Backend.krb.default_ccname()
def __call__(self, environ, start_response):
environ['KRB5CCNAME'] = self.ccname
return self.app(environ, start_response)
class WebUIApp(object):
INDEX_FILE = 'index.html'
EXTENSION_TO_MIME_MAP = {
'xhtml': 'text/html',
'html': 'text/html',
'js': 'text/javascript',
'inc': 'text/html',
'css': 'text/css',
'png': 'image/png',
'json': 'text/javascript',
}
def __init__(self):
self.url = '/ipa/ui'
def __call__(self, environ, start_response):
path_info = environ['PATH_INFO'].lstrip('/')
if path_info == '':
path_info = self.INDEX_FILE
requested_file = path.join(getcwd(), 'install/ui/', path_info)
extension = requested_file.rsplit('.', 1)[-1]
if extension not in self.EXTENSION_TO_MIME_MAP:
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return ['NOT FOUND']
mime_type = self.EXTENSION_TO_MIME_MAP[extension]
f = None
try:
f = open(requested_file, 'r')
api.log.info('Request file %s' % requested_file)
start_response('200 OK', [('Content-Type', mime_type)])
return [f.read()]
except IOError:
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return ['NOT FOUND']
finally:
if f is not None:
f.close()
api.log.info('Request done')
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('--dev',
help='Run WebUI in development mode (requires FireBug)',
default=True,
action='store_false',
dest='prod',
)
parser.add_option('--host',
help='Listen on address HOST (default 127.0.0.1)',
default='127.0.0.1',
)
parser.add_option('--port',
help='Listen on PORT (default 8888)',
default=8888,
type='int',
)
api.env.in_server = True
api.env.startup_traceback = True
(options, args) = api.bootstrap_with_global_options(parser, context='lite')
api.env._merge(
lite_port=options.port,
lite_host=options.host,
webui_prod=options.prod,
lite_pem=api.env._join('dot_ipa', 'lite.pem'),
)
api.finalize()
urlmap = URLMap()
apps = [
('IPA', KRBCheater(api.Backend.wsgi_dispatch)),
('webUI', KRBCheater(WebUIApp())),
]
for (name, app) in apps:
urlmap[app.url] = app
api.log.info('Mounting %s at %s', name, app.url)
if path.isfile(api.env.lite_pem):
pem = api.env.lite_pem
else:
api.log.info('To enable SSL, place PEM file at %r', api.env.lite_pem)
pem = None
httpserver.serve(paste.gzipper.middleware(urlmap),
host=api.env.lite_host,
port=api.env.lite_port,
ssl_pem=pem,
)
| hatchetation/freeipa | lite-server.py | Python | gpl-3.0 | 4,299 |
# POVME Pocket ID 1.0 is released under the GNU General Public License
# (see http://www.gnu.org/licenses/gpl.html).
# If you have any questions, comments, or suggestions, please don't hesitate to contact me,
# Jacob Durrant, at jdurrant [at] ucsd [dot] edu.
import sys
import numpy
from scipy import spatial
from scipy.cluster.vq import kmeans2
from scipy.spatial.distance import cdist
import textwrap
import getopt
from numpy.lib.recfunctions import append_fields
import multiprocessing
import warnings
# POVME Pocket ID 1.0 is a program for identifying protein pockets and generating
# appropriate pocket-encompassing inclusion spheres. These spheres, modified as required,
# can then be used as POVME input.
# Supress errors
numpy.seterr(all='ignore')
warnings.simplefilter("ignore") # no "One of the clusters is empty." warnings
# Some classes are required to support the loading and manipulation of 3D molecular information
class Information():
"""A class for storing and accessing information about the elements of a Molecule object"""
def __init__(self, parent_molecule_object):
"""Initializes the Information class.
Arguments:
parent_molecule_object -- The Molecule object associated with this class.
"""
self.__parent_molecule = parent_molecule_object
self.__constants = {}
self.__constants['i8_fields'] = ['serial','resseq']
self.__constants['f8_fields']= ['x','y','z','occupancy','tempfactor']
self.__atom_information = None
self.__coordinates = None
def get_atom_information(self): return self.__atom_information
def get_coordinates(self): return self.__coordinates
def get_constants(self): return self.__constants
def set_atom_information(self,atom_information): self.__atom_information = atom_information
def set_coordinates(self,coordinates): self.__coordinates = coordinates
def get_bounding_box(self, selection = None, padding=0.0):
"""Calculates a box that bounds (encompasses) a set of atoms.
Arguments:
selection -- An optional numpy.array containing the indices of the atoms to consider. If ommitted, all atoms of the Molecule object will be considered.
padding -- An optional float. The bounding box will extend this many angstroms beyond the atoms being considered.
Returns:
A numpy array representing two 3D points, (min_x, min_y, min_z) and (max_x, max_y, max_z), that bound the molecule.
"""
if selection is None: selection = self.__parent_molecule.select_all()
return numpy.vstack((numpy.min(self.__coordinates[selection],0), numpy.max(self.__coordinates[selection],0)))
class FileIO():
"""A class for saving and loading molecular data into a Molecule object"""
def __init__(self, parent_molecule_object):
"""Initializes the FileIO class.
Arguments:
parent_molecule_object -- The Molecule object associated with this class.
"""
self.__parent_molecule = parent_molecule_object
def load_pdb_into(self, filename):
"""Loads the molecular data contained in a pdb file into the current Molecule object.
Arguments:
filename -- A string, the filename of the pdb file.
"""
# open/read the file
afile = open(filename,"r")
self.load_pdb_into_using_file_object(afile)
afile.close()
def load_pdb_into_using_file_object(self, file_obj):
"""Loads molecular data from a python file object (pdb formatted) into the current Molecule object. Note that most users will want to use the load_pdb_into() function instead, which is identical except that it accepts a filename string instead of a python file object.
Arguments:
file_obj -- A python file object, containing pdb-formatted data.
"""
#source_data = numpy.genfromtxt(file_obj, dtype="S6,S5,S5,S4,S2,S4,S4,S8,S8,S8,S6,S6,S10,S2,S2", names=['record_name', 'serial', 'name', 'resname', 'chainid', 'resseq', 'empty', 'x', 'y', 'z', 'occupancy', 'tempfactor', 'empty2', 'element', 'charge'], delimiter=[6, 5, 5, 4, 2, 4, 4, 8, 8, 8, 6, 6, 10, 2, 2])
source_data = numpy.genfromtxt(file_obj, dtype="S6,S5,S5,S5,S1,S4,S4,S8,S8,S8,S6,S6,S10,S2,S3", names=['record_name', 'serial', 'name', 'resname', 'chainid', 'resseq', 'empty', 'x', 'y', 'z', 'occupancy', 'tempfactor', 'empty2', 'element', 'charge'], delimiter=[6, 5, 5, 5, 1, 4, 4, 8, 8, 8, 6, 6, 10, 2, 3])
if source_data.ndim == 0: source_data = source_data.reshape(1, -1) # in case the pdb file has only one line
# get the ones that are ATOM or HETATOM in the record_name
or_matrix = numpy.logical_or((source_data['record_name'] == "ATOM "), (source_data['record_name'] == "HETATM"))
indices_of_atom_or_hetatom = numpy.nonzero(or_matrix)[0]
self.__parent_molecule.set_atom_information(source_data[indices_of_atom_or_hetatom])
# now, some of the data needs to change types
# first, fields that should be numbers cannot be empty strings
for field in self.__parent_molecule.get_constants()['i8_fields'] + self.__parent_molecule.get_constants()['f8_fields']:
check_fields = self.__parent_molecule.get_atom_information()[field]
check_fields = numpy.core.defchararray.strip(check_fields)
indices_of_empty = numpy.nonzero(check_fields == '')[0]
self.__parent_molecule.get_atom_information()[field][indices_of_empty] = '0'
# now actually change the type
old_types = self.__parent_molecule.get_atom_information().dtype
descr = old_types.descr
for field in self.__parent_molecule.get_constants()['i8_fields']:
index = self.__parent_molecule.get_atom_information().dtype.names.index(field)
descr[index] = (descr[index][0], 'i8')
for field in self.__parent_molecule.get_constants()['f8_fields']:
index = self.__parent_molecule.get_atom_information().dtype.names.index(field)
descr[index] = (descr[index][0], 'f8')
new_types = numpy.dtype(descr)
self.__parent_molecule.set_atom_information(self.__parent_molecule.get_atom_information().astype(new_types))
# remove some of the fields that just contain empty data
self.__parent_molecule.set_atom_information(self.__parent_molecule.numpy_structured_array_remove_field(self.__parent_molecule.get_atom_information(), ['empty', 'empty2']))
# the coordinates need to be placed in their own special numpy array to facilitate later manipulation
self.__parent_molecule.set_coordinates(numpy.vstack([self.__parent_molecule.get_atom_information()['x'], self.__parent_molecule.get_atom_information()['y'], self.__parent_molecule.get_atom_information()['z']]).T)
self.__parent_molecule.set_atom_information(self.__parent_molecule.numpy_structured_array_remove_field(self.__parent_molecule.get_atom_information(), ['x', 'y', 'z'])) # now remove the coordinates from the atom_information object to save memory
# string values in self.__parent_molecule.information.get_atom_information() should also be provided in stripped format for easier comparison
fields_to_strip = ['name', 'resname', 'chainid', 'element']
for f in fields_to_strip: self.__parent_molecule.set_atom_information(append_fields(self.__parent_molecule.get_atom_information(), f + '_stripped', data=numpy.core.defchararray.strip(self.__parent_molecule.get_atom_information()[f])))
class Selections():
"""A class for selecting atoms"""
######## selections ########
def __init__(self, parent_molecule_object):
"""Initializes the Selections class.
Arguments:
parent_molecule_object -- The Molecule object associated with this class.
"""
self.__parent_molecule = parent_molecule_object
def select_atoms(self, selection_criteria):
"""Select a set of atoms based on user-specified criteria.
Arguments:
selection_criteria -- An dictionary, where the keys correspond to keys in the self.__parent_molecule.information.get_atom_information() structured numpy array, and the values are lists of acceptable matches.
The selection is a logical "AND" between dictionary entries, but "OR" within the value lists themselves.
For example: {'atom':['CA','O'], 'chain':'A', 'resname':'PRO'} would select all atoms with the names CA or O that are located in the PRO residues of chain A.
Returns:
A numpy.array containing the indices of the atoms of the selection.
"""
try:
selection = numpy.ones(len(self.__parent_molecule.get_atom_information()), dtype=bool) # start assuming everything is selected
for key in selection_criteria.keys():
vals = selection_criteria[key]
# make sure the vals are in a list
if not type(vals) is list and not type(vals) is tuple: vals = [vals] # if it's a single value, put it in a list
# make sure the vals are in the right format
if key in self.__parent_molecule.get_constants()['f8_fields']: vals = [float(v) for v in vals]
elif key in self.__parent_molecule.get_constants()['i8_fields']: vals = [int(v) for v in vals]
else: vals = [v.strip() for v in vals]
# "or" all the vals together
subselection = numpy.zeros(len(self.__parent_molecule.get_atom_information()), dtype=bool) # start assuming nothing is selected
for val in vals: subselection = numpy.logical_or(subselection, (self.__parent_molecule.get_atom_information()[key] == val))
# now "and" that with everything else
selection = numpy.logical_and(selection, subselection)
# now get the indices of the selection
return numpy.nonzero(selection)[0]
except:
print "ERROR: Could not make the selection. Existing fields:"
print "\t" + ", ".join(self.__parent_molecule.get_atom_information().dtype.names)
sys.exit(0)
def invert_selection(self, selection):
"""Inverts a user-defined selection (i.e., identifies all atoms that are not in the seleciton).
Arguments:
selection -- A numpy.array containing the indices of the user-defined selection.
Returns:
A numpy.array containing the indices of all atoms that are not in the user-defined seleciton.
"""
# selection is a list of atom indices
all_atoms = numpy.arange(0,len(self.__parent_molecule.get_atom_information()), 1, dtype=int)
remaining_indicies = numpy.delete(all_atoms, selection)
return remaining_indicies
def select_all(self):
"""Selects all the atoms in a Molecule object.
Returns:
A numpy.array containing the indices of all atoms in the Molecule object.
"""
return self.select_atoms({})
def get_molecule_from_selection(self, selection):
"""Creates a Molecule from a user-defined atom selection.
Arguments
selection -- A numpy.array containing the indices of the atoms in the user-defined selection.
Returns:
A Molecule object containing the atoms of the user-defined selection.
"""
new_mol = Molecule()
new_mol.set_coordinates(self.__parent_molecule.get_coordinates()[selection])
new_mol.set_atom_information(self.__parent_molecule.get_atom_information()[selection])
# note that hierarchy will have to be recalculated
return new_mol
# here's the actual Molecule class
class Molecule:
"""Loads, saves, and manupulates molecuar models. The main pymolecule class."""
def __init__ (self):
"""Initializes the variables of the Molecule class."""
self.fileio = FileIO(self)
self.selections = Selections(self)
self.information = Information(self)
# Information methods
def get_coordinates(self): return self.information.get_coordinates()
def get_atom_information(self): return self.information.get_atom_information()
def get_constants(self): return self.information.get_constants()
def get_bounding_box(self, selection=None, padding=0.0): return self.information.get_bounding_box(selection, padding)
def set_atom_information(self,atom_information): self.information.set_atom_information(atom_information)
def set_coordinates(self,coordinates): self.information.set_coordinates(coordinates)
# File I/O class methods
def load_pdb_into(self, filename): self.fileio.load_pdb_into(filename)
def load_pdb_into_using_file_object(self, file_obj): self.fileio.load_pdb_into_using_file_object(file_obj)
# Selections class
def get_molecule_from_selection(self, selection): return self.selections.get_molecule_from_selection(selection)
def select_atoms(self, selection_criteria): return self.selections.select_atoms(selection_criteria)
def invert_selection(self, selection): return self.selections.invert_selection(selection)
def select_all(self): return self.selections.select_all()
######## supporting functions ########
def numpy_structured_array_remove_field(self, narray, field_names): # surprised this doesn't come with numpy
"""Removes a specific field name from a structured numpy array.
Arguments:
narray -- A structured numpy array.
field_names -- A list of strings, where each string is one of the field names of narray.
Returns:
A structured numpy array identical to narray, but with the field names in field_names removed.
"""
names = list(narray.dtype.names) # now remove the coordinates from the atom_information object to save memory
for f in field_names: names.remove(f)
return narray[names]
# Some classes are required for calculating convex hulls
class ConvexHull():
"""A class to handle convex-hull calculations"""
def __init__(self, pts):
"""Initializes the ConvexHull class."""
akl_toussaint_pts = self.akl_toussaint(pts) # quickly reduces input size
self.hull = self.gift_wrapping_3d(akl_toussaint_pts) # calculate convex hull using gift wrapping algorithm
def inside_hull(self, our_point):
"""Determines if a point is inside the hull
Arguments:
our_point -- An x,y,z array
Returns:
A boolean, True if the point is inside the hull, False otherwise
"""
return not self.outside_hull(our_point, self.hull)
def outside_hull(self, our_point, triangles, epsilon=1.0e-5): # this one used internally
"""Given the hull as defined by a list of triangles, this definition will return whether a point is within these or not.
Arguments:
our_point -- an x,y,z array
epsilon -- needed for imprecisions in the floating-point operations.
Returns:
True if our_point exists outside of the hull, False otherwise
"""
our_point = numpy.array(our_point) # convert it to an numpy.array
for triangle in triangles:
rel_point = our_point - triangle[0] # vector from triangle corner 0 to point
vec1 = triangle[1] - triangle[0] # vector from triangle corner 0 to corner 1
vec2 = triangle[2] - triangle[1] # vector from triangle corner 1 to corner 2
our_cross = numpy.cross(vec1, vec2) # cross product between vec1 and vec2
our_dot = numpy.dot(rel_point,our_cross) # dot product to determine whether cross is point inward or outward
if numpy.dot(rel_point,our_cross) > epsilon: # if the dot is greater than 0, then its outside
return True
return False
def get_seg_dict_num(self, seg_dict, seg_index):
"""seg_dict is a dictionary object that contains information about segments within the convex hull. The keys are 2x3 tuples, which represent two ends of a segment in space. The values of seg_dict are the number of times a segment has been part of a triangle, either 1 or 2. (Zero times would mean that the segment doesn't exist in the dictionary yet). This function looks up and returns the value of a seg_index from seg_dict
Arguments:
seg_dict -- the dictionary of segment 2x3 tuples as keys, integers as values
seg_index -- the key of the dictionary member we are going to retrieve
Returns:
if seg_index exists in the keys of seg_dict, return the value. Otherwise, return 0
"""
if seg_index[0][0] > seg_index[1][0]: # we want the index with the greater x-value, so we don't get identical segments in the dictionary more than once
index = seg_index
else:
index = seg_index[::-1]
if index in seg_dict:
return seg_dict[index]
else:
return 0
def increment_seg_dict(self, seg_dict, seg_index):
"""seg_dict is a dictionary object that contains information about segments within the convex hull. The keys are 2x3 tuples, which represent two ends of a segment in space. The values of seg_dict are the number of times a segment has been part of a triangle, either 1 or 2. (Zero times would mean that the segment doesn't exist in the dictionary yet). This function increments the values within seg_dict, or initiates them if they dont exist yet.
Arguments:
seg_dict -- the dictionary of segment 2x3 tuples as keys, integers as values
seg_index -- the key of the dictionary member we are going to increment
"""
if seg_index[0][0] > seg_index[1][0]: # we want the index with the greater x-value, so we don't get identical segments in the dictionary more than once
index = seg_index
else:
index = seg_index[::-1]
#"putting index:", index, "into seg_dict because", index[0][0], ">", index[1][0]
if index in seg_dict: # if the entry already exists in seg_dict
seg_dict[index] += 1 # increment
else:
seg_dict[index] = 1 # initiate with a value of 1 because it now exists on a triangle
return
def gift_wrapping_3d(self, raw_points):
"""Gift wrapping for 3d convex hull
Arguments:
raw_points -- A nx3 array of points, where each row corresponds to an x,y,z point coordinate
Returns:
A convex hull represented by a list of triangles. Each triangle is a 3x3 array, where each row is an x,y,z coordinate in space. The 3 rows describe the location of the 3 corners of the triangle. Each of the 3 points are arranged so that a cross product will point outwards from the hull
"""
n = numpy.shape(raw_points)[0] # number of points
point1 = raw_points[0] # take the first point
xaxis = numpy.array([1,0,0]) # create a ref vector pointing along x axis
maxx = raw_points[0][0] # initiate highest x value
points = [] # a list of tuples for easy dictionary lookup
seg_dict = {} # a dictionary that contains the number of triangles a seg is in
for i in range(n): # find the n with the largest x value
point = tuple(raw_points[i])
points.append(point)
if point[0] > maxx:
maxx = point[0]
point1 = raw_points[i]
best_dot = -1.0 # initiate dot relative to x-axis
point2 = numpy.array(raw_points[1]) # initiate best segment
# find first/best segment
for i in range(n):
pointi = raw_points[i]
if numpy.array_equal(pointi, point1): continue
diff_vec = pointi - point1
diff_len = numpy.linalg.norm(diff_vec)
test_dot = numpy.dot(diff_vec/diff_len,xaxis)
if test_dot > best_dot:
best_dot = test_dot
point2 = pointi
point1 = tuple(point1)
point2 = tuple(point2)
ref_vec = xaxis
# now find the best triangle
triangles = []
seg_list = set([(point1, point2),])
norm_dict = {(point1,point2):xaxis}
self.increment_seg_dict( seg_dict, (point1,point2) )
counter = 0
first_time = True
section1 = 0.0
section2 = 0.0
section3 = 0.0
while seg_list: # as long as there are unexplored edges of triangles in the hull...
counter += 1
seg = seg_list.pop() # take a segment out of the seg_list
tuple1 = seg[0] # the two ends of the segment
tuple2 = seg[1]
point1 = numpy.array(seg[0])
point2 = numpy.array(seg[1])
result = self.get_seg_dict_num( seg_dict, (seg[0],seg[1]) )
if result >= 2: # then we already have 2 triangles on this segment
continue # forget about drawing a triangle for this seg
ref_vec = norm_dict[(seg[0],seg[1])] # get the norm for a triangle that the segment is part of
best_dot_cross = -1.0
best_point = None
for i in range(n): # look at each point
pointi = raw_points[i]
diff_vec1 = point2 - point1
diff_vec2 = pointi - point2
test_cross = numpy.array([diff_vec1[1]*diff_vec2[2]-diff_vec1[2]*diff_vec2[1], diff_vec1[2]*diff_vec2[0]-diff_vec1[0]*diff_vec2[2], diff_vec1[0]*diff_vec2[1]-diff_vec1[1]*diff_vec2[0]]) # cross product
test_cross_len = numpy.sqrt(test_cross[0]*test_cross[0] + test_cross[1]*test_cross[1] + test_cross[2]*test_cross[2]) #numpy.linalg.norm(test_cross) # get the norm of the cross product
if test_cross_len <= 0.0: continue
test_cross = test_cross / test_cross_len
dot_cross = numpy.dot(test_cross, ref_vec)
if dot_cross > best_dot_cross:
best_cross = test_cross
best_dot_cross = dot_cross
best_point = pointi
tuple3 = points[i]
point3 = best_point
if self.get_seg_dict_num( seg_dict, (tuple2,tuple1) ) > 2: continue
if self.get_seg_dict_num( seg_dict, (tuple3,tuple2) ) > 2: continue
if self.get_seg_dict_num( seg_dict, (tuple1,tuple3) ) > 2: continue
# now we have a triangle from point1 -> point2 -> point3
# must test each edge
if first_time:
self.increment_seg_dict( seg_dict, (tuple2,tuple1) )
seg_list.add((tuple2, tuple1))
norm_dict[(tuple2,tuple1)] = best_cross
self.increment_seg_dict( seg_dict, (tuple3,tuple2) )
seg_list.add((tuple3, tuple2))
norm_dict[(tuple3,tuple2)] = best_cross
self.increment_seg_dict( seg_dict, (tuple1,tuple3) )
seg_list.add((tuple1, tuple3))
norm_dict[(tuple1,tuple3)] = best_cross
triangles.append((numpy.array(tuple1),numpy.array(tuple2),numpy.array(tuple3)))
first_time = False
return triangles
def akl_toussaint(self, points):
"""The Akl-Toussaint Heuristic. Given a set of points, this definition will create an octahedron whose corners are the extremes in x, y, and z directions. Every point within this octahedron will be removed because they are not part of the convex hull. This causes any expected running time for a convex hull algorithm to be reduced to linear time.
Arguments:
points -- An nx3 array of x,y,z coordinates
Returns:
All members of original set of points that fall outside the Akl-Toussaint octahedron
"""
x_high = (-1e99,0,0); x_low = (1e99,0,0); y_high = (0,-1e99,0); y_low = (0,1e99,0); z_high = (0,0,-1e99); z_low = (0,0,1e99)
for point in points: # find the corners of the octahedron
if point[0] > x_high[0]: x_high = point
if point[0] < x_low[0]: x_low = point
if point[1] > y_high[1]: y_high = point
if point[1] < y_low[1]: y_low = point
if point[2] > z_high[2]: z_high = point
if point[2] < z_low[2]: z_low = point
octahedron = [ # define the triangles of the surfaces of the octahedron
numpy.array((x_high,y_high,z_high)),
numpy.array((x_high,z_low,y_high)),
numpy.array((x_high,y_low,z_low)),
numpy.array((x_high,z_high,y_low)),
numpy.array((x_low,y_low,z_high)),
numpy.array((x_low,z_low,y_low)),
numpy.array((x_low,y_high,z_low)),
numpy.array((x_low,z_high,y_high)),
]
new_points = [] # everything outside of the octahedron
for point in points: # now check to see if a point is inside or outside the octahedron
outside = self.outside_hull(point, octahedron, epsilon=-1.0e-5)
if outside:
new_points.append(point)
return numpy.array(new_points) # convert back to an array
# Some classes are required for multiprocessing
class MultiThreading():
"""A class for multi-processor support."""
results = []
def __init__(self, inputs, num_processors, task_class_name):
"""Initializes the MultiThreading class."""
self.results = []
# first, if num_processors <= 0, determine the number of processors to use programatically
if num_processors <= 0: num_processors = multiprocessing.cpu_count()
# reduce the number of processors if too many have been specified
if len(inputs) < num_processors: num_processors = len(inputs)
# now, divide the inputs into the appropriate number of processors
inputs_divided = {}
for t in range(num_processors): inputs_divided[t] = []
for t in range(0, len(inputs), num_processors):
for t2 in range(num_processors):
index = t + t2
if index < len(inputs): inputs_divided[t2].append(inputs[index])
# now, run each division on its own processor
running = multiprocessing.Value('i', num_processors)
mutex = multiprocessing.Lock()
arrays = []
threads = []
for i in range(num_processors):
threads.append(task_class_name())
arrays.append(multiprocessing.Array('i',[0, 1]))
results_queue = multiprocessing.Queue() # to keep track of the results
processes = []
for i in range(num_processors):
p = multiprocessing.Process(target=threads[i].runit, args=(running, mutex, results_queue, inputs_divided[i]))
p.start()
#p.join()
processes.append(p)
while running.value > 0: is_running = 0 # wait for everything to finish
# compile all results
for thread in threads:
chunk = results_queue.get()
self.results.extend(chunk)
class GeneralTask:
"""A class that determines the specific calculations that will be performed when multi-processor support is used. Other, more specific classes will inherit this one."""
results = []
def runit(self, running, mutex, results_queue, items):
for item in items: self.value_func(item, results_queue)
mutex.acquire()
running.value -= 1
mutex.release()
results_queue.put(self.results)
def value_func(self, item, results_queue): # this is the function that changes through inheritance
print item # here's where you do something
self.results.append(item) # here save the results for later compilation
# You'll also need a class representing a box of points, with associated definitions
class BoxOfPoints():
"""A class representing a box of equidistant points"""
def __init__(self, box, reso):
"""Initialize the class.
Arguments:
box -- A numpy array representing two 3D points, (min_x, min_y, min_z) and (max_x, max_y, max_z), that define a box.
reso -- The space between the points of the box, in the X, Y, and Z direction.
"""
self.write_pdbs = write_pdbs()
min_x = self.__snap_float(box[0][0], reso)
min_y = self.__snap_float(box[0][1], reso)
min_z = self.__snap_float(box[0][2], reso)
max_x = self.__snap_float(box[1][0], reso) + 1.1 * reso
max_y = self.__snap_float(box[1][1], reso) + 1.1 * reso
max_z = self.__snap_float(box[1][2], reso) + 1.1 * reso
x, y, z = numpy.mgrid[min_x:max_x:reso, min_y:max_y:reso, min_z:max_z:reso]
self.points = numpy.array(zip(x.ravel(), y.ravel(), z.ravel()))
def __snap_float(self, val, reso):
"""Snaps an arbitrary point to the nearest grid point.
Arguments:
val -- A numpy array corresponding to a 3D point.
reso -- The resolution (distance in the X, Y, and Z directions between adjacent points) of the grid.
Returns:
A numpy array corresponding to a 3D point near val that is on a nearby grid point.
"""
return numpy.floor(val / reso) * reso
def remove_points_outside_convex_hull(self, hull):
"""Removes box points that are outside a convex hull.
Arguments:
hull -- The convex hull.
"""
chunks = [(hull, t) for t in numpy.array_split(self.points, params['processors'])]
tmp = MultiThreading(chunks, params['processors'], self.__MultiIdHullPts)
self.points = numpy.vstack(tmp.results)
class __MultiIdHullPts(GeneralTask):
"""A class to remove points outside a convex hull using multiple processors."""
def value_func(self, items, results_queue): # so overwriting this function
"""The calculations that will run on a single processor to remove points outside a convex hull."""
hull = items[0]
some_points = items[1]
# Note this would be much faster if it were matrix-based intead of point-by-point based.
new_pts = [] # Can preallocate numpy array size because I don't know beforehand how many points will be in the hull
for pt in some_points:
if hull.inside_hull(pt) == True: new_pts.append(pt)
if len(new_pts) == 0: pass # here save the results for later compilation
else: self.results.append(numpy.array(new_pts))
def remove_all_points_close_to_other_points(self, other_points, dist_cutoff):
"""Removes all points in this box that come within the points specified in a numpy array
Arguments:
other_points -- A numpy array containing the other points.
dist_cutoff -- A float, the cutoff distance to use in determining whether or not box points will be removed.
"""
box_of_pts_distance_tree = spatial.KDTree(self.points) # note, in newer versions of scipy use cKDTree
chunks = [(box_of_pts_distance_tree, dist_cutoff, t) for t in numpy.array_split(other_points, params['processors'])]
tmp = MultiThreading(chunks, params['processors'], self.__MultiGetClosePoints)
indicies_of_box_pts_close_to_molecule_points = numpy.unique(numpy.hstack(tmp.results))
self.points = numpy.delete(self.points, indicies_of_box_pts_close_to_molecule_points, axis=0) # remove the ones that are too close to molecule atoms
class __MultiGetClosePoints(GeneralTask):
"""A class to remove box points that are near other, user-specified points, using multiple processors."""
def value_func(self, items, results_queue): # so overwriting this function
"""The calculations that will run on a single processor."""
box_of_pts_distance_tree = items[0]
dist_cutoff = items[1]
other_points = items[2]
other_points_distance_tree = spatial.KDTree(other_points) # note, in newer versions of scipy use cKDTree
sparce_distance_matrix = other_points_distance_tree.sparse_distance_matrix(box_of_pts_distance_tree, dist_cutoff)
indicies_of_box_pts_close_to_molecule_points = numpy.unique(sparce_distance_matrix.tocsr().indices) #tocsr()
self.results.append(indicies_of_box_pts_close_to_molecule_points)
def to_pdb(self, let="X"):
"""Converts the points in this box into a PDB representation.
Arguments:
let -- An optional string, the chain ID to use. "X" by default.
Returns:
A PDB-formatted string.
"""
return self.write_pdbs.numpy_to_pdb(self.points, let)
def expand_around_existing_points(self, num_pts, reso):
"""Add points to the current box that surround existing points, essentially increasing the resolution of the box.
Arguments:
num_pts -- An int, the number of points to place on each side of the existing points, in the X, Y, and Z directions.
reso -- The distance between adjacent added points.
"""
new_pts = []
i = numpy.arange(-num_pts * reso, num_pts * reso + reso*0.01, reso)
for xi in i:
for yi in i:
for zi in i:
vec = numpy.array([xi, yi, zi])
new_pts.append(self.points + vec)
self.points = numpy.vstack(new_pts)
self.__unique_points()
def __unique_points(self):
"""Identifies unique points (rows) in an array of points.
Arguments:
a -- A nx3 numpy.array representing 3D points.
Returns:
A nx2 numpy.array containing the 3D points that are unique.
"""
b = numpy.ascontiguousarray(self.points).view(numpy.dtype((numpy.void, self.points.dtype.itemsize * self.points.shape[1])))
unique_points = numpy.unique(b).view(self.points.dtype).reshape(-1, self.points.shape[1])
self.points = unique_points
def filter_isolated_points_until_no_change(self, reso, number_of_neighbors):
"""Keep removing points that don't have enough neighbors, until no such points exist.
Arguments:
reso -- The distance between adjacent points.
number_of_neighbors -- The minimum number of permissible neighbors.
"""
# calculate the pairwise distances between all box points
box_of_pts_distance_tree = spatial.KDTree(self.points) # note, in newer versions of scipy use cKDTree
print self.points
self.dist_matrix = box_of_pts_distance_tree.sparse_distance_matrix(box_of_pts_distance_tree, reso * numpy.sqrt(3.0) * 1.1).todense() # so kiddy-corner counted as a neighbor
# note that the diagnol of self.dist_matrix is zero, as expected, but ones with dist > reso * numpy.sqrt(3.0) * 1.1 are also 0. Pretty convenient.
num_pts = 0
while num_pts != len(self.points): # keep running the pass until there are no changes (points are stable)
num_pts = len(self.points)
# identify the points that have enough neighbors
columns_nonzero_count = numpy.array((self.dist_matrix != 0).sum(0))[0]
columns_nonzero_count_match_criteria = (columns_nonzero_count >= number_of_neighbors)
columns_nonzero_count_match_criteria_index = numpy.nonzero(columns_nonzero_count_match_criteria)
self.__keep_limited_points(columns_nonzero_count_match_criteria_index)
def __keep_limited_points(self, pt_indices):
"""A support function"""
# keep only those points
self.points = self.points[pt_indices]
# update the distance matrix so it doesn't need to be recalculated
self.dist_matrix = self.dist_matrix[pt_indices,:][0]
self.dist_matrix = self.dist_matrix.T
self.dist_matrix = self.dist_matrix[pt_indices,:][0]
#self.dist_matrix = self.dist_matrix.T # not necessary because it's a symetrical matrix
def separate_out_pockets(self):
"""Separate the points according to the pocket they belong to. Determined by looking at patches of contiguous points.
Returns:
A list of point arrays, each array corresponding to the points of a separate pocket.
"""
all_pockets = []
while len(self.points) != 0:
pocket_indexes = numpy.array([0])
num_pts_in_pocket = 0
while num_pts_in_pocket != len(pocket_indexes):
num_pts_in_pocket = len(pocket_indexes)
# get all the adjacent points
pocket_indexes = numpy.hstack((pocket_indexes,numpy.array(numpy.nonzero(self.dist_matrix[pocket_indexes, :])[1])[0]))
pocket_indexes = numpy.unique(pocket_indexes)
pocket = self.points[pocket_indexes,:]
all_pockets.append(pocket)
self.__delete_limited_points(pocket_indexes)
# sort the pockets by size
all_pockets = sorted(all_pockets, key=lambda pts: -len(pts))
return all_pockets
def __delete_limited_points(self, pt_indices):
"""A support function"""
# keep only those points
self.points = numpy.delete(self.points, pt_indices, axis=0)
# update the distance matrix so it doesn't need to be recalculated
self.dist_matrix = numpy.delete(self.dist_matrix,pt_indices, axis=0)
self.dist_matrix = self.dist_matrix.T
self.dist_matrix = numpy.delete(self.dist_matrix,pt_indices, axis=0)
# Also, you need a class to save numpy arrays as PDB files
class write_pdbs():
"""A class for converting numpy arrays into PDB-formatted strings"""
def __create_pdb_line(self, numpy_array, index, resname, letter):
"""Create a string formatted according to the PDB standard.
Arguments:
numpy_array -- A 1x3 numpy.array representing a 3D point.
index -- An integer, the atom index to use in the string.
resname -- A string, the RESNAME to use.
letter -- A string, the atom name/chain/etc to use for the output.
Returns:
A string, formatted according to the PDB standard.
"""
if len(numpy_array) == 2: numpy_array = numpy.array([numpy_array[0], numpy_array[1], 0.0])
if numpy_array.shape == (1, 3): numpy_array = numpy_array[0]
output = "ATOM "
output = output + str(index % 999999).rjust(6) + letter.rjust(5) + resname.rjust(4) + letter.rjust(2) + str(index % 9999).rjust(4)
output = output + ("%.3f" % numpy_array[0]).rjust(12)
output = output + ("%.3f" % numpy_array[1]).rjust(8)
output = output + ("%.3f" % numpy_array[2]).rjust(8)
output = output + letter.rjust(24)
return output
def numpy_to_pdb(self, narray, letter, resname=""):
"""Create a string formatted according to the PDB standard.
Arguments:
narray -- A nx3 numpy.array representing a 3D point.
letter -- A string, the atom name/chain/etc to use for the output.
resname -- An optional string, the RESNAME to use for the output.
Returns:
A string, formatted according to the PDB standard.
"""
if len(narray.flatten()) == 3:
return self.__create_pdb_line(narray, 1, "AAA", letter) + "\n"
else:
if resname == "":
letters = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
resnames = []
for l1 in letters:
for l2 in letters:
for l3 in letters:
resnames.append(l1+l2+l3)
resnames.remove("XXX") # because this is reserved for empty atoms
else:
resnames = [resname]
t = ""
for i, item in enumerate(narray): t = t + self.__create_pdb_line(item, i+1, resnames[i % len(resnames)], letter) + "\n"
return t
####### Now the meat of the program ########
# First, show a brief help file describing the command-line arguments.
help_lines = []
help_lines.append('')
help_lines.append('POVME Pocket ID 1.0')
help_lines.append('===================')
help_lines.append('')
help_lines.append('Required command-line parameters:')
help_lines.append('')
help_lines.append('--filename: The PDB filename to be analyzed.')
help_lines.append('')
help_lines.append('Optional command-line parameters:')
help_lines.append('')
help_lines.append('--pocket_detection_resolution: The distance between probe points used to initially find the pockets (4.0 by default).')
help_lines.append('--pocket_measuring_resolution: The distance between probe points used to measure identified pockets in greater detail. Should divide --pocket_detection_resolution evenly. (1.0 by default).')
help_lines.append('--clashing_cutoff: In measuring the pockets, any points closer than this cutoff to receptor atoms will be removed. (3.0 by default).')
help_lines.append('--number_of_neighbors: In measuring the pockets, any points with fewer than this number of neighbors will be deleted. These are usually just stray points that don\'t belong to any real pocket. (4 by default).')
help_lines.append('--processors: The number of processors to use. (1 by default).')
help_lines.append('--number_of_spheres: The number of inclusion spheres to generate for each pocket. (5 by default).')
help_lines.append('--sphere_padding: How much larger the radius of the inclusion spheres should be, beyond what is required to encompass the identified pockets. (5.0 by default).')
help_lines.append('')
help_lines.append('Example:')
help_lines.append('')
help_lines.append('python pocket_id.py --filename rel1_example.pdb --pocket_detection_resolution 4.0 --pocket_measuring_resolution 1.0 --clashing_cutoff 3.0 --number_of_neighbors 4 --processors 1 --number_of_spheres 5 --sphere_padding 5.0 ')
help_lines.append('')
def printit(text): print textwrap.fill(text, initial_indent='', subsequent_indent=' ')
for line in help_lines: printit(line)
if len(sys.argv[1:]) == 0: sys.exit(0)
# Now, parse the command-line arguments
params = {
'filename': '',
'pocket_detection_resolution': 4.0,
'pocket_measuring_resolution': 1.0,
'clashing_cutoff': 3.0,
'number_of_neighbors': 4,
'processors': 1,
'number_of_spheres': 5,
'sphere_padding': 5.0
}
for item in getopt.getopt(sys.argv[1:], '', [ 'filename=', 'pocket_detection_resolution=', 'pocket_measuring_resolution=', 'clashing_cutoff=', 'number_of_neighbors=', 'processors=', 'number_of_spheres=', 'sphere_padding=' ])[0]: params[item[0].replace('--','')] = item[1]
if params['filename'] == '':
print "ERROR: Must specify the --filename parameter!"
print
sys.exit(0)
for key in ['number_of_neighbors', 'processors', 'number_of_spheres']: params[key] = int(params[key])
for key in ['pocket_detection_resolution', 'pocket_measuring_resolution', 'clashing_cutoff', 'sphere_padding']: params[key] = float(params[key])
print 'Specified command-line arguments:'
print
for key in params: print " --" + key + ': ' + str(params[key])
print
# Step 1: Load in the protein
printit("Step 1. Loading the PDB file " + params['filename'] + "...")
molecule = Molecule()
molecule.load_pdb_into(params['filename'])
# Step 2: Get rid of hydogen atoms. They just slow stuff down.
print "Step 2. Removing hydrogen atoms..."
sel = molecule.selections.select_atoms({'element_stripped':'H'})
sel = molecule.selections.invert_selection(sel)
molecule = molecule.selections.get_molecule_from_selection(sel)
# Step 3: Calculate the convex hull of the protein alpha carbons.
print "Step 3. Calculating the convex hull of the PDB file..."
molecule_alpha_carbons = molecule.selections.get_molecule_from_selection(molecule.selections.select_atoms({'name_stripped':'CA'})) # Get a version of the protein with just the alpha carbons. In my experience, that's better for convex hull identification. Otherwise the program identifies shallow contors in the protein surface as pockets.
convex_hull_3d = ConvexHull(molecule_alpha_carbons.get_coordinates())
# Step 4. Get a box of equispaced points that surround the protein, snapped to reso. I'm putting a whole bunch of other functions in this class as well to manipulate the points of this box.
printit("Step 4. Making a box of points spaced " + str(params['pocket_detection_resolution']) + " A apart that entirely encompasses the protein...")
box_pts = BoxOfPoints(molecule.get_bounding_box(), params['pocket_detection_resolution'] * 4) # note that the initial box is low resolution (* 4) so convex hull will be very fast
# Step 5. Remove points outside the convex hull. Gradually fill in protein-occupying region with denser point fields. Faster this way, I think.
printit("Step 5. Removing points that fall outside the protein's convex hull...")
box_pts.remove_points_outside_convex_hull(convex_hull_3d)
box_pts.expand_around_existing_points(2, params['pocket_detection_resolution'] * 2)
box_pts.remove_points_outside_convex_hull(convex_hull_3d)
box_pts.expand_around_existing_points(2, params['pocket_detection_resolution'])
box_pts.remove_points_outside_convex_hull(convex_hull_3d)
# Step 6. Remove the points in this box that are too close to protein atoms.
# For simplicity's sake, don't worry about atomic radii. Just a simple cutoff.
printit("Step 6. Removing points that come within " + str(params['clashing_cutoff']) + " A of any protein atom...")
box_pts.remove_all_points_close_to_other_points(molecule.get_coordinates(), params['clashing_cutoff'])
# Step 7. Now surround each of these points with higher density points that in the same regions. This is for getting a more detailed view of the identified pockets.
if params['pocket_measuring_resolution'] != params['pocket_detection_resolution']:
printit("Step 7. Flooding the identified pockets with points spaced " + str(params['pocket_measuring_resolution']) + " A apart for a more detailed measurement of the pocket volume...")
print "\tAdding points..."
box_pts.expand_around_existing_points(params['pocket_detection_resolution']/params['pocket_measuring_resolution'], params['pocket_measuring_resolution'])
printit("\tRemoving points that fall outside the convex hull...")
box_pts.remove_points_outside_convex_hull(convex_hull_3d)
printit("\tRemoving points within " + str(params['clashing_cutoff']) + " A of any protein atom...")
box_pts.remove_all_points_close_to_other_points(molecule.get_coordinates(), params['clashing_cutoff'])
# Step 8. Now start doing a repeated pass filter (keep repeating until no change). Don't know if this is a high pass or low pass filter. I've heard these terms, though, and they sound cool.
printit("Step 8. Removing points until all points have at least " + str(params['number_of_neighbors']) + " neighbors...")
box_pts.filter_isolated_points_until_no_change(params['pocket_measuring_resolution'], params['number_of_neighbors'])
# Step 9. Separate out the pockets so they can be considered in isolation.
printit("Step 9. Partitioning the remaining points by pocket...")
all_pockets = box_pts.separate_out_pockets()
# Step 10. Get povme spheres that encompass each pocket, write pockets to seprate pdb files
printit("Step 10. Saving the points of each pocket...")
let_ids = ['A','B','C','D','E','F','G','H','I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
write_some_pdbs = write_pdbs()
for i,pts in enumerate(all_pockets):
filename = 'pocket' + str(i+1) + '.pdb'
printit("\tSaving " + filename + "...")
f = open(filename,'w')
f.write("REMARK Pocket #" + str(i+1) + "\n")
# do I need to whiten stuff here? not sure what whitening is.
centroids, idx = kmeans2(pts, params['number_of_spheres'])
pts_string = ""
for cluster_num in range(params['number_of_spheres']):
indexes_for_this_cluster = numpy.nonzero(idx == cluster_num)[0]
cluster_pts = pts[indexes_for_this_cluster]
cluster_center = numpy.mean(cluster_pts, axis=0)
try:
cluster_radius = numpy.max(cdist(numpy.array([cluster_center]), cluster_pts))
f.write("REMARK CHAIN " + let_ids[cluster_num] + ": PointsInclusionSphere " + str(numpy.round(cluster_center[0],2)) + ' ' + str(numpy.round(cluster_center[1],2)) + ' ' + str(numpy.round(cluster_center[2],2)) + ' ' + str(numpy.round(cluster_radius + params['sphere_padding'],2)) + "\n")
pts_string = pts_string + write_some_pdbs.numpy_to_pdb(cluster_pts, let_ids[cluster_num])
except:
print
printit("There was an error, but I don't think it was catastrophic. Could be that one of the pocket clusters was empty.")
print
f.write(pts_string)
f.close()
print
printit("Done. See the pocket{n}.pdb files. Using a visualization program like VMD, identify which of these files includes the pocket you wish to measure. POVME Pocket ID has divided each pocket volume into " + str(params['number_of_spheres']) + " sections (i.e., PDB chains). In some cases, the pocket you're interested in might be included in a larger identified pocket, so feel free to use only certain sections of a given pocket as well.")
printit("The POVME PointsInclusionSphere commands are located in the header of each pocket{n}.pdb file. A text editor can be used to copy and paste these commands into a POVME input file.")
print
| POVME/POVME | POVME/POVME_pocket_id.py | Python | mit | 51,539 |
# $Id: __init__.py 4802 2006-11-12 18:02:17Z goodger $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This is ``docutils.parsers.rst`` package. It exports a single class, `Parser`,
the reStructuredText parser.
Usage
=====
1. Create a parser::
parser = docutils.parsers.rst.Parser()
Several optional arguments may be passed to modify the parser's behavior.
Please see `Customizing the Parser`_ below for details.
2. Gather input (a multi-line string), by reading a file or the standard
input::
input = sys.stdin.read()
3. Create a new empty `docutils.nodes.document` tree::
document = docutils.utils.new_document(source, settings)
See `docutils.utils.new_document()` for parameter details.
4. Run the parser, populating the document tree::
parser.parse(input, document)
Parser Overview
===============
The reStructuredText parser is implemented as a state machine, examining its
input one line at a time. To understand how the parser works, please first
become familiar with the `docutils.statemachine` module, then see the
`states` module.
Customizing the Parser
----------------------
Anything that isn't already customizable is that way simply because that type
of customizability hasn't been implemented yet. Patches welcome!
When instantiating an object of the `Parser` class, two parameters may be
passed: ``rfc2822`` and ``inliner``. Pass ``rfc2822=1`` to enable an initial
RFC-2822 style header block, parsed as a "field_list" element (with "class"
attribute set to "rfc2822"). Currently this is the only body-level element
which is customizable without subclassing. (Tip: subclass `Parser` and change
its "state_classes" and "initial_state" attributes to refer to new classes.
Contact the author if you need more details.)
The ``inliner`` parameter takes an instance of `states.Inliner` or a subclass.
It handles inline markup recognition. A common extension is the addition of
further implicit hyperlinks, like "RFC 2822". This can be done by subclassing
`states.Inliner`, adding a new method for the implicit markup, and adding a
``(pattern, method)`` pair to the "implicit_dispatch" attribute of the
subclass. See `states.Inliner.implicit_inline()` for details. Explicit
inline markup can be customized in a `states.Inliner` subclass via the
``patterns.initial`` and ``dispatch`` attributes (and new methods as
appropriate).
"""
__docformat__ = 'reStructuredText'
import docutils.parsers
import docutils.statemachine
from docutils.parsers.rst import states
from docutils import frontend, nodes
class Parser(docutils.parsers.Parser):
"""The reStructuredText parser."""
supported = ('restructuredtext', 'rst', 'rest', 'restx', 'rtxt', 'rstx')
"""Aliases this parser supports."""
settings_spec = (
'reStructuredText Parser Options',
None,
(('Recognize and link to standalone PEP references (like "PEP 258").',
['--pep-references'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Base URL for PEP references '
'(default "http://www.python.org/dev/peps/").',
['--pep-base-url'],
{'metavar': '<URL>', 'default': 'http://www.python.org/dev/peps/',
'validator': frontend.validate_url_trailing_slash}),
('Template for PEP file part of URL. (default "pep-%04d")',
['--pep-file-url-template'],
{'metavar': '<URL>', 'default': 'pep-%04d'}),
('Recognize and link to standalone RFC references (like "RFC 822").',
['--rfc-references'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Base URL for RFC references (default "http://www.faqs.org/rfcs/").',
['--rfc-base-url'],
{'metavar': '<URL>', 'default': 'http://www.faqs.org/rfcs/',
'validator': frontend.validate_url_trailing_slash}),
('Set number of spaces for tab expansion (default 8).',
['--tab-width'],
{'metavar': '<width>', 'type': 'int', 'default': 8,
'validator': frontend.validate_nonnegative_int}),
('Remove spaces before footnote references.',
['--trim-footnote-reference-space'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Leave spaces before footnote references.',
['--leave-footnote-reference-space'],
{'action': 'store_false', 'dest': 'trim_footnote_reference_space'}),
('Disable directives that insert the contents of external file '
'("include" & "raw"); replaced with a "warning" system message.',
['--no-file-insertion'],
{'action': 'store_false', 'default': 1,
'dest': 'file_insertion_enabled',
'validator': frontend.validate_boolean}),
('Enable directives that insert the contents of external file '
'("include" & "raw"). Enabled by default.',
['--file-insertion-enabled'],
{'action': 'store_true'}),
('Disable the "raw" directives; replaced with a "warning" '
'system message.',
['--no-raw'],
{'action': 'store_false', 'default': 1, 'dest': 'raw_enabled',
'validator': frontend.validate_boolean}),
('Enable the "raw" directive. Enabled by default.',
['--raw-enabled'],
{'action': 'store_true'}),))
config_section = 'restructuredtext parser'
config_section_dependencies = ('parsers',)
def __init__(self, rfc2822=None, inliner=None):
if rfc2822:
self.initial_state = 'RFC2822Body'
else:
self.initial_state = 'Body'
self.state_classes = states.state_classes
self.inliner = inliner
def parse(self, inputstring, document):
"""Parse `inputstring` and populate `document`, a document tree."""
self.setup_parse(inputstring, document)
self.statemachine = states.RSTStateMachine(
state_classes=self.state_classes,
initial_state=self.initial_state,
debug=document.reporter.debug_flag)
inputlines = docutils.statemachine.string2lines(
inputstring, tab_width=document.settings.tab_width,
convert_whitespace=1)
self.statemachine.run(inputlines, document, inliner=self.inliner)
self.finish_parse()
class DirectiveError(Exception):
"""
Store a message and a system message level.
To be thrown from inside directive code.
Do not instantiate directly -- use `Directive.directive_error()`
instead!
"""
def __init__(self, level, message):
"""
Initialize with message `message`. `level` is a system message level.
"""
Exception.__init__(self)
self.level = level
self.message = message
class Directive:
"""
Base class for reStructuredText directives.
The following attributes may be set by subclasses. They are
interpreted by the directive parser (which runs the directive
class):
- `required_arguments`: The number of required arguments (default:
0).
- `optional_arguments`: The number of optional arguments (default:
0).
- `final_argument_whitespace`: A boolean, indicating if the final
argument may contain whitespace (default: False).
- `option_spec`: A dictionary, mapping known option names to
conversion functions such as `int` or `float` (default: {}, no
options). Several conversion functions are defined in the
directives/__init__.py module.
Option conversion functions take a single parameter, the option
argument (a string or ``None``), validate it and/or convert it
to the appropriate form. Conversion functions may raise
`ValueError` and `TypeError` exceptions.
- `has_content`: A boolean; True if content is allowed. Client
code must handle the case where content is required but not
supplied (an empty content list will be supplied).
Arguments are normally single whitespace-separated words. The
final argument may contain whitespace and/or newlines if
`final_argument_whitespace` is True.
If the form of the arguments is more complex, specify only one
argument (either required or optional) and set
`final_argument_whitespace` to True; the client code must do any
context-sensitive parsing.
When a directive implementation is being run, the directive class
is instantiated, and the `run()` method is executed. During
instantiation, the following instance variables are set:
- ``name`` is the directive type or name (string).
- ``arguments`` is the list of positional arguments (strings).
- ``options`` is a dictionary mapping option names (strings) to
values (type depends on option conversion functions; see
`option_spec` above).
- ``content`` is a list of strings, the directive content line by line.
- ``lineno`` is the line number of the first line of the directive.
- ``content_offset`` is the line offset of the first line of the content from
the beginning of the current input. Used when initiating a nested parse.
- ``block_text`` is a string containing the entire directive.
- ``state`` is the state which called the directive function.
- ``state_machine`` is the state machine which controls the state which called
the directive function.
Directive functions return a list of nodes which will be inserted
into the document tree at the point where the directive was
encountered. This can be an empty list if there is nothing to
insert.
For ordinary directives, the list must contain body elements or
structural elements. Some directives are intended specifically
for substitution definitions, and must return a list of `Text`
nodes and/or inline elements (suitable for inline insertion, in
place of the substitution reference). Such directives must verify
substitution definition context, typically using code like this::
if not isinstance(state, states.SubstitutionDef):
error = state_machine.reporter.error(
'Invalid context: the "%s" directive can only be used '
'within a substitution definition.' % (name),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
"""
# There is a "Creating reStructuredText Directives" how-to at
# <http://docutils.sf.net/docs/howto/rst-directives.html>. If you
# update this docstring, please update the how-to as well.
required_arguments = 0
"""Number of required directive arguments."""
optional_arguments = 0
"""Number of optional arguments after the required arguments."""
final_argument_whitespace = False
"""May the final argument contain whitespace?"""
option_spec = None
"""Mapping of option names to validator functions."""
has_content = False
"""May the directive have content?"""
def __init__(self, name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
self.name = name
self.arguments = arguments
self.options = options
self.content = content
self.lineno = lineno
self.content_offset = content_offset
self.block_text = block_text
self.state = state
self.state_machine = state_machine
def run(self):
raise NotImplementedError('Must override run() is subclass.')
# Directive errors:
def directive_error(self, level, message):
"""
Return a DirectiveError suitable for being thrown as an exception.
Call "raise self.directive_error(level, message)" from within
a directive implementation to return one single system message
at level `level`, which automatically gets the directive block
and the line number added.
You'd often use self.error(message) instead, which will
generate an ERROR-level directive error.
"""
return DirectiveError(level, message)
def debug(self, message):
return self.directive_error(0, message)
def info(self, message):
return self.directive_error(1, message)
def warning(self, message):
return self.directive_error(2, message)
def error(self, message):
return self.directive_error(3, message)
def severe(self, message):
return self.directive_error(4, message)
# Convenience methods:
def assert_has_content(self):
"""
Throw an ERROR-level DirectiveError if the directive doesn't
have contents.
"""
if not self.content:
raise self.error('Content block expected for the "%s" directive; '
'none found.' % self.name)
def convert_directive_function(directive_fn):
"""
Define & return a directive class generated from `directive_fn`.
`directive_fn` uses the old-style, functional interface.
"""
class FunctionalDirective(Directive):
option_spec = getattr(directive_fn, 'options', None)
has_content = getattr(directive_fn, 'content', False)
_argument_spec = getattr(directive_fn, 'arguments', (0, 0, False))
required_arguments, optional_arguments, final_argument_whitespace \
= _argument_spec
def run(self):
return directive_fn(
self.name, self.arguments, self.options, self.content,
self.lineno, self.content_offset, self.block_text,
self.state, self.state_machine)
# Return new-style directive.
return FunctionalDirective
| spreeker/democracygame | external_apps/docutils-snapshot/docutils/parsers/rst/__init__.py | Python | bsd-3-clause | 13,825 |
from unittest import TestCase
import numpy as np
import theano
from theano import tensor as T
from teafacto.blocks.seq.rnu import GRU
class TestGRUBasic(TestCase):
def test_output_shape(self):
indim = 20
innerdim = 50
batsize = 200
seqlen = 5
data = np.random.random((batsize, seqlen, indim)).astype("float32")
gru = GRU(innerdim=innerdim, dim=indim)
grupred = gru.predict(data)
self.assertEqual(grupred.shape, (batsize, seqlen, innerdim))
def test_get_params(self):
gru = GRU(innerdim=100, dim=20)
params = {gru.um, gru.wm, gru.uhf, gru.whf, gru.u, gru.w, gru.bm, gru.bhf, gru.b}
self.assertEqual(params, gru.get_params())
def test_gru_with_mask(self):
indim = 2
innerdim = 5
batsize = 4
seqlen = 3
data = np.random.random((batsize, seqlen, indim)).astype("float32")
mask = np.zeros((batsize, seqlen)).astype("float32")
mask[:, 0] = 1.
mask[0, :] = 1.
gru = GRU(innerdim=innerdim, dim=indim)
grupred = gru.predict(data, mask)
print grupred
self.assertEqual(grupred.shape, (batsize, seqlen, innerdim))
#self.assertTrue(np.allclose(grupred[1:, 1:, :], np.zeros_like(grupred[1:, 1:, :])))
self.assertTrue(np.all(abs(grupred[0, ...]) > 0))
self.assertTrue(np.all(abs(grupred[:, 0, :]) > 0))
def test_gru_noinput(self):
gru = GRU(innerdim=50, dim=0, noinput=True)
class TestGRU(TestCase):
def test_if_prediction_is_equivalent_to_manually_constructed_theano_graph(self):
indim = 20
innerdim = 50
batsize = 200
seqlen = 5
data = np.random.random((batsize, seqlen, indim)).astype("float32")
gru = GRU(innerdim=innerdim, dim=indim)
grupred = gru.predict(data)[:, -1, :]
tgru_in, tgru_out = self.build_theano_gru(innerdim, indim, batsize, gru)
tgrupred = tgru_out.eval({tgru_in: data.astype("float32")})
print np.sum(np.abs(tgrupred-grupred))
self.assertTrue(np.allclose(grupred, tgrupred))
def build_theano_gru(self, innerdim, indim, batsize, gru):
u = theano.shared(gru.u.d.get_value())
w = theano.shared(gru.w.d.get_value())
um = theano.shared(gru.um.d.get_value())
wm = theano.shared(gru.wm.d.get_value())
uhf = theano.shared(gru.uhf.d.get_value())
whf = theano.shared(gru.whf.d.get_value())
b = theano.shared(gru.b.d.get_value())
bm = theano.shared(gru.bm.d.get_value())
bhf = theano.shared(gru.bhf.d.get_value())
def rec(x_t, h_tm1):
mgate = T.nnet.sigmoid(T.dot(h_tm1, um) + T.dot(x_t, wm) + bm)
hfgate = T.nnet.sigmoid(T.dot(h_tm1, uhf) + T.dot(x_t, whf) + bhf)
canh = T.tanh(T.dot(h_tm1 * hfgate, u) + T.dot(x_t, w) + b)
h = mgate * h_tm1 + (1-mgate) * canh
return [h, h]
def apply(x):
inputs = x.dimshuffle(1, 0, 2) # inputs is (seq_len, batsize, dim)
init_h = T.zeros((batsize, innerdim))
outputs, _ = theano.scan(fn=rec,
sequences=inputs,
outputs_info=[None, init_h])
output = outputs[0]
return output[-1, :, :] #.dimshuffle(1, 0, 2) # return is (batsize, seqlen, dim)
inp = T.ftensor3()
return inp, apply(inp)
| lukovnikov/teafacto | test/test_gru.py | Python | mit | 3,451 |
from test_plus.test import TestCase
from bootcamp.news.models import News
from bootcamp.notifications.models import Notification, notification_handler
class NotificationsModelsTest(TestCase):
def setUp(self):
self.user = self.make_user("test_user")
self.other_user = self.make_user("other_test_user")
self.first_news = News.objects.create(
user=self.user, content="This is a short content."
)
self.second_news = News.objects.create(
user=self.other_user,
content="This is an answer to the first news.",
reply=True,
parent=self.first_news,
)
self.first_notification = Notification.objects.create(
actor=self.user, recipient=self.other_user, verb="L"
)
self.second_notification = Notification.objects.create(
actor=self.user, recipient=self.other_user, verb="C"
)
self.third_notification = Notification.objects.create(
actor=self.other_user, recipient=self.user, verb="A"
)
self.fourth_notification = Notification.objects.create(
actor=self.other_user,
recipient=self.user,
action_object=self.first_news,
verb="A",
)
def test_return_values(self):
assert isinstance(self.first_notification, Notification)
assert isinstance(self.second_notification, Notification)
assert isinstance(self.third_notification, Notification)
assert isinstance(self.fourth_notification, Notification)
assert str(self.first_notification) == "test_user liked 0 minutes ago"
assert str(self.second_notification) == "test_user commented 0 minutes ago"
assert str(self.third_notification) == "other_test_user answered 0 minutes ago"
assert (
str(self.fourth_notification)
== "other_test_user answered This is a short content. 0 minutes ago"
)
def test_return_unread(self):
assert Notification.objects.unread().count() == 4
assert self.first_notification in Notification.objects.unread()
def test_mark_as_read_and_return(self):
self.first_notification.mark_as_read()
assert Notification.objects.read().count() == 1
assert self.first_notification in Notification.objects.read()
self.first_notification.mark_as_unread()
assert Notification.objects.read().count() == 0
def test_mark_all_as_read(self):
Notification.objects.mark_all_as_read()
assert Notification.objects.read().count() == 4
Notification.objects.mark_all_as_unread(self.other_user)
assert Notification.objects.read().count() == 2
Notification.objects.mark_all_as_unread()
assert Notification.objects.unread().count() == 4
Notification.objects.mark_all_as_read(self.other_user)
assert Notification.objects.read().count() == 2
def test_get_most_recent(self):
assert Notification.objects.get_most_recent().count() == 4
def test_single_notification(self):
Notification.objects.mark_all_as_read()
obj = News.objects.create(user=self.user, content="This is a short content.")
notification_handler(self.user, self.other_user, "C", action_object=obj)
assert Notification.objects.unread().count() == 1
def test_global_notification(self):
Notification.objects.mark_all_as_read()
notification_handler(self.user, "global", "C")
assert Notification.objects.unread().count() == 1
def test_list_notification(self):
Notification.objects.mark_all_as_read()
notification_handler(self.user, [self.user, self.other_user], "C")
assert Notification.objects.unread().count() == 2
def test_icon_comment(self):
notification_one = Notification.objects.create(
actor=self.user, recipient=self.other_user, verb="C"
)
notification_two = Notification.objects.create(
actor=self.user, recipient=self.other_user, verb="A"
)
notification_three = Notification.objects.create(
actor=self.user, recipient=self.other_user, verb="K"
)
assert notification_one.get_icon() == "fa-comment"
assert notification_two.get_icon() == "fa-comment"
assert notification_three.get_icon() == "fa-comment"
def test_icon_users(self):
notification_one = Notification.objects.create(
actor=self.user, recipient=self.other_user, verb="I"
)
notification_two = Notification.objects.create(
actor=self.user, recipient=self.other_user, verb="U"
)
notification_three = Notification.objects.create(
actor=self.user, recipient=self.other_user, verb="O"
)
assert notification_one.get_icon() == "fa-users"
assert notification_two.get_icon() == "fa-users"
assert notification_three.get_icon() == "fa-users"
def test_icon_hearth(self):
notification = Notification.objects.create(
actor=self.user, recipient=self.other_user, verb="L"
)
assert notification.get_icon() == "fa-heart"
def test_icon_star(self):
notification = Notification.objects.create(
actor=self.user, recipient=self.other_user, verb="F"
)
assert notification.get_icon() == "fa-star"
def test_icon_check_circle(self):
notification = Notification.objects.create(
actor=self.user, recipient=self.other_user, verb="W"
)
assert notification.get_icon() == "fa-check-circle"
def test_icon_pencil(self):
notification = Notification.objects.create(
actor=self.user, recipient=self.other_user, verb="E"
)
assert notification.get_icon() == "fa-pencil"
def test_icon_plus(self):
notification = Notification.objects.create(
actor=self.user, recipient=self.other_user, verb="V"
)
assert notification.get_icon() == "fa-plus"
def test_icon_share(self):
notification = Notification.objects.create(
actor=self.user, recipient=self.other_user, verb="S"
)
assert notification.get_icon() == "fa-share-alt"
def test_icon_reply(self):
notification = Notification.objects.create(
actor=self.user, recipient=self.other_user, verb="R"
)
assert notification.get_icon() == "fa-reply"
| vitorfs/bootcamp | bootcamp/notifications/tests/test_models.py | Python | mit | 6,507 |
#
# Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import os
import import_basic
from mysql.utilities.exception import MUTLibError, UtilDBError
class test(import_basic.test):
"""check parameters for import utility
This test executes a basic check of parameters for mysqldbimport.
It uses the import_basic test as a parent for setup and teardown methods.
"""
def check_prerequisites(self):
return import_basic.test.check_prerequisites(self)
def setup(self):
return import_basic.test.setup(self)
def do_skip_test(self, cmd_str, comment, expected_res=0):
# Precheck: check db and save the results.
self.results.append("BEFORE:\n")
self.results.append(self.check_objects(self.server2, "util_test"))
res = self.run_test_case(expected_res, cmd_str, comment)
if not res:
raise MUTLibError("%s: failed" % comment)
# Now, check db and save the results.
self.results.append("AFTER:\n")
res = self.server2.exec_query("SHOW DATABASES LIKE 'util_test'")
if res == () or res == []:
self.results.append("Database was NOT created.\n")
else:
self.results.append("Database was created.\n")
self.results.append(self.check_objects(self.server2, "util_test"))
try:
self.drop_db(self.server2, "util_test")
except:
pass # ok if this fails - it is a spawned server
def run(self):
self.res_fname = "result.txt"
from_conn = "--server=" + self.build_connection_string(self.server1)
to_conn = "--server=" + self.build_connection_string(self.server2)
cmd_str = "mysqldbimport.py %s %s --import=definitions " % \
(to_conn, self.export_import_file)
cmd_opts = " --help"
comment = "Test case 1 - help"
res = self.run_test_case(0, cmd_str + cmd_opts, comment)
if not res:
raise MUTLibError("%s: failed" % comment)
# Now test the skips
# Note: data and blobs must be done separately
_SKIPS = ("grants", "events", "functions", "procedures",
"triggers", "views", "tables", "create_db")
_FORMATS = ("CSV", "SQL")
case_num = 2
for format in _FORMATS:
# Create an import file
export_cmd = "mysqldbexport.py %s util_test --export=BOTH " % \
from_conn + " --skip-gtid "
export_cmd += "--format=%s --display=BRIEF > %s " % \
(format, self.export_import_file)
comment = "Generating import file"
res = self.run_test_case(0, export_cmd, comment)
if not res:
raise MUTLibError("%s: failed" % comment)
cmd_opts = "%s --format=%s --skip=" % (cmd_str, format)
for skip in _SKIPS:
if case_num != 2 and case_num != 2 + len(_SKIPS):
cmd_opts += ","
cmd_opts += "%s" % skip
comment = "Test case %d - no %s" % (case_num, skip)
self.do_skip_test(cmd_opts, comment)
case_num += 1
# Now test --skip=data, --skip-blobs
# Create an import file with blobs
try:
res = self.server1.exec_query("ALTER TABLE util_test.t3 "
"ADD COLUMN me_blob BLOB")
res = self.server1.exec_query("UPDATE util_test.t3 SET "
"me_blob = 'This, is a BLOB!'")
except UtilDBError, e:
raise MUTLibError("Failed to add blob column: %s" % e.errmsg)
export_cmd = "mysqldbexport.py %s util_test --export=BOTH " % \
from_conn + " --skip-gtid "
export_cmd += "--format=%s --display=BRIEF > %s " % \
("CSV", self.export_import_file)
comment = "Generating import file"
res = self.run_test_case(0, export_cmd, comment)
if not res:
raise MUTLibError("%s: failed" % comment)
# No skips for reference (must skip events for deterministic reasons
cmd_str = "mysqldbimport.py %s %s --import=both --dryrun " % \
(to_conn, self.export_import_file)
cmd_str += " --format=CSV --bulk-insert "
comment = "Test case %d - no %s" % (case_num, "events")
res = self.run_test_case(0, cmd_str+"--skip=events", comment)
if not res:
raise MUTLibError("%s: failed" % comment)
case_num += 1
cmd_str = "mysqldbimport.py %s %s --import=both --dryrun " % \
(to_conn, self.export_import_file)
cmd_str += " --format=CSV --bulk-insert "
comment = "Test case %d - no %s" % (case_num, "data")
res = self.run_test_case(0, cmd_str+"--skip=events,data", comment)
if not res:
raise MUTLibError("%s: failed" % comment)
case_num += 1
cmd_str = "mysqldbimport.py %s %s --import=both --dryrun " % \
(to_conn, self.export_import_file)
cmd_str += " --format=CSV --skip-blobs --bulk-insert "
comment = "Test case %d - no %s" % (case_num, "blobs")
res = self.run_test_case(0, cmd_str+"--skip=events", comment)
if not res:
raise MUTLibError("%s: failed" % comment)
case_num += 1
# Lastly, do a quiet import
cmd_str = "mysqldbimport.py %s %s --import=both --quiet " % \
(to_conn, self.export_import_file)
cmd_str += " --format=CSV --bulk-insert "
comment = "Test case %d - no %s" % (case_num, "messages (quiet)")
res = self.run_test_case(0, cmd_str, comment)
if not res:
raise MUTLibError("%s: failed" % comment)
case_num += 1
return True
def get_result(self):
return self.compare(__name__, self.results)
def record(self):
return self.save_result_file(__name__, self.results)
def cleanup(self):
return import_basic.test.cleanup(self)
| dannykopping/mysql-utilities | mysql-test/t/import_parameters.py | Python | gpl-2.0 | 6,759 |
# encoding: utf-8
from tornado.testing import gen_test
from mytor import Connection
from . import BaseTestCase
class TestInit(BaseTestCase):
@gen_test
def test0(self):
connection = yield Connection(**self.PARAMS)
cursor = connection.cursor()
yield cursor.execute('SELECT 1')
datas = cursor.fetchall()
yield cursor.close()
connection.close()
assert datas
| mosquito/mytor | tests/test_init.py | Python | mit | 421 |
import json
import logging
from math import ceil
import os
import click
import cligj
from .helpers import resolve_inout
from . import options
import rasterio
from rasterio.errors import CRSError
from rasterio.transform import Affine
from rasterio.coords import disjoint_bounds
logger = logging.getLogger('rio')
# Common options used below
# Unlike the version in cligj, this one doesn't require values.
files_inout_arg = click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
metavar="INPUTS... OUTPUT")
@click.command(short_help='Rasterize features.')
@files_inout_arg
@options.output_opt
@cligj.format_opt
@options.like_file_opt
@options.bounds_opt
@options.dimensions_opt
@options.resolution_opt
@click.option('--src-crs', '--src_crs', 'src_crs', default=None,
help='Source coordinate reference system. Limited to EPSG '
'codes for now. Used as output coordinate system if output '
'does not exist or --like option is not used. '
'Default: EPSG:4326')
@options.all_touched_opt
@click.option('--default-value', '--default_value', 'default_value',
type=float, default=1, help='Default value for rasterized pixels')
@click.option('--fill', type=float, default=0,
help='Fill value for all pixels not overlapping features. Will '
'be evaluated as NoData pixels for output. Default: 0')
@click.option('--property', 'prop', type=str, default=None, help='Property in '
'GeoJSON features to use for rasterized values. Any features '
'that lack this property will be given --default_value instead.')
@options.force_overwrite_opt
@options.creation_options
@click.pass_context
def rasterize(
ctx,
files,
output,
driver,
like,
bounds,
dimensions,
res,
src_crs,
all_touched,
default_value,
fill,
prop,
force_overwrite,
creation_options):
"""Rasterize GeoJSON into a new or existing raster.
If the output raster exists, rio-rasterize will rasterize feature values
into all bands of that raster. The GeoJSON is assumed to be in the same
coordinate reference system as the output unless --src-crs is provided.
--default_value or property values when using --property must be using a
data type valid for the data type of that raster.
If a template raster is provided using the --like option, the affine
transform and data type from that raster will be used to create the output.
Only a single band will be output.
The GeoJSON is assumed to be in the same coordinate reference system unless
--src-crs is provided.
--default_value or property values when using --property must be using a
data type valid for the data type of that raster.
--driver, --bounds, --dimensions, and --res are ignored when output exists
or --like raster is provided
If the output does not exist and --like raster is not provided, the input
GeoJSON will be used to determine the bounds of the output unless
provided using --bounds.
--dimensions or --res are required in this case.
If --res is provided, the bottom and right coordinates of bounds are
ignored.
Note:
The GeoJSON is not projected to match the coordinate reference system
of the output or --like rasters at this time. This functionality may be
added in the future.
"""
from rasterio.crs import CRS
from rasterio.features import rasterize
from rasterio.features import bounds as calculate_bounds
verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1
output, files = resolve_inout(
files=files, output=output, force_overwrite=force_overwrite)
bad_param = click.BadParameter('invalid CRS. Must be an EPSG code.',
ctx, param=src_crs, param_hint='--src_crs')
has_src_crs = src_crs is not None
try:
src_crs = CRS.from_string(src_crs) if has_src_crs else CRS.from_string('EPSG:4326')
except CRSError:
raise bad_param
# If values are actually meant to be integers, we need to cast them
# as such or rasterize creates floating point outputs
if default_value == int(default_value):
default_value = int(default_value)
if fill == int(fill):
fill = int(fill)
with rasterio.Env(CPL_DEBUG=verbosity > 2):
def feature_value(feature):
if prop and 'properties' in feature:
return feature['properties'].get(prop, default_value)
return default_value
with click.open_file(files.pop(0) if files else '-') as gj_f:
geojson = json.loads(gj_f.read())
if 'features' in geojson:
geometries = []
for f in geojson['features']:
geometries.append((f['geometry'], feature_value(f)))
elif 'geometry' in geojson:
geometries = ((geojson['geometry'], feature_value(geojson)), )
else:
raise click.BadParameter('Invalid GeoJSON', param=input,
param_hint='input')
geojson_bounds = geojson.get('bbox', calculate_bounds(geojson))
if os.path.exists(output):
with rasterio.open(output, 'r+') as out:
if has_src_crs and src_crs != out.crs:
raise click.BadParameter('GeoJSON does not match crs of '
'existing output raster',
param='input', param_hint='input')
if disjoint_bounds(geojson_bounds, out.bounds):
click.echo("GeoJSON outside bounds of existing output "
"raster. Are they in different coordinate "
"reference systems?",
err=True)
meta = out.meta.copy()
result = rasterize(
geometries,
out_shape=(meta['height'], meta['width']),
transform=meta.get('affine', meta['transform']),
all_touched=all_touched,
dtype=meta.get('dtype', None),
default_value=default_value,
fill=fill)
for bidx in range(1, meta['count'] + 1):
data = out.read(bidx, masked=True)
# Burn in any non-fill pixels, and update mask accordingly
ne = result != fill
data[ne] = result[ne]
data.mask[ne] = False
out.write(data, indexes=bidx)
else:
if like is not None:
template_ds = rasterio.open(like)
if has_src_crs and src_crs != template_ds.crs:
raise click.BadParameter('GeoJSON does not match crs of '
'--like raster',
param='input', param_hint='input')
if disjoint_bounds(geojson_bounds, template_ds.bounds):
click.echo("GeoJSON outside bounds of --like raster. "
"Are they in different coordinate reference "
"systems?",
err=True)
kwargs = template_ds.meta.copy()
kwargs['count'] = 1
# DEPRECATED
# upgrade transform to affine object or we may get an invalid
# transform set on output
kwargs['transform'] = template_ds.affine
template_ds.close()
else:
bounds = bounds or geojson_bounds
if src_crs.is_geographic:
if (bounds[0] < -180 or bounds[2] > 180 or
bounds[1] < -80 or bounds[3] > 80):
raise click.BadParameter(
"Bounds are beyond the valid extent for "
"EPSG:4326.",
ctx, param=bounds, param_hint='--bounds')
if dimensions:
width, height = dimensions
res = (
(bounds[2] - bounds[0]) / float(width),
(bounds[3] - bounds[1]) / float(height)
)
else:
if not res:
raise click.BadParameter(
'pixel dimensions are required',
ctx, param=res, param_hint='--res')
elif len(res) == 1:
res = (res[0], res[0])
width = max(int(ceil((bounds[2] - bounds[0]) /
float(res[0]))), 1)
height = max(int(ceil((bounds[3] - bounds[1]) /
float(res[1]))), 1)
kwargs = {
'count': 1,
'crs': src_crs,
'width': width,
'height': height,
'transform': Affine(res[0], 0, bounds[0], 0, -res[1],
bounds[3]),
'driver': driver
}
kwargs.update(**creation_options)
result = rasterize(
geometries,
out_shape=(kwargs['height'], kwargs['width']),
transform=kwargs.get('affine', kwargs['transform']),
all_touched=all_touched,
dtype=kwargs.get('dtype', None),
default_value=default_value,
fill=fill)
if 'dtype' not in kwargs:
kwargs['dtype'] = result.dtype
kwargs['nodata'] = fill
with rasterio.open(output, 'w', **kwargs) as out:
out.write(result, indexes=1)
| ryfeus/lambda-packs | Rasterio_osgeo_shapely_PIL_pyproj_numpy/source/rasterio/rio/rasterize.py | Python | mit | 10,025 |
class Solution:
"""
@param A: a sparse matrix
@param B: a sparse matrix
@return: the result of A * B
"""
def multiply(self, A, B):
# write your code here
na = len(A[0])
ma = len(A)
nb = len(B[0])
result = [[0 for i in range(nb)] for j in range(ma)]
#result = [[0] * nb for i in range(ma)]
for m in range(ma):
for n in range(na):
if A[m][n] != 0:
for j in range(nb):
if B[n][j] != 0:
result[m][j] += A[m][n] * B[n][j]
return result
if __name__ == "__main__":
sln = Solution()
#res = sln.multiply([[1,0,0],[-1,0,3]], [[7,0,0],[0,0,0],[0,0,1]])
#res = sln.multiply([[1,0,0],[-1,0,3]], [[7,0,0],[0,20,0],[11,0,1]])
res = sln.multiply([[1,2,3,4,5,6,7],[7,6,5,4,3,2,1]], [[1],[1],[2],[2],[2],[3],[8]])
print(res) | euccas/CodingPuzzles-Python | leet/source/datastructure/sparse_matrix_multiply.py | Python | mit | 918 |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 14 00:52:07 2017
@author: Shabaka
"""
import numpy as np
def draw_bs_pairs_linreg(x, y, size=1):
"""Perform pairs bootstrap for linear regression."""
# Set up array of indices to sample from: inds
inds = np.arange(len(x))
# Initialize replicates: bs_slope reps, bs_intercept_reps
bs_slope_reps = np.empty(size)
bs_intercept_reps = np.empty(size)
# Generate replicates
for i in range(size):
bs_inds = np.random.choice(inds, size=len(inds))
bs_x, bs_y = x[bs_inds], y[bs_inds]
bs_slope_reps[i], bs_intercept_reps[i] = np.polyfit(bs_x, bs_y, 1)
return bs_slope_reps, bs_intercept_reps
| qalhata/Python-Scripts-Repo-on-Data-Science | LinReg_BS_Pairs_func.py | Python | gpl-3.0 | 724 |
from preprocess.data import ReVerbTrainRaw
import codecs
from sys import stdout
FILE = './data/reverb-train.txt'
if __name__ == '__main__':
data = ReVerbTrainRaw()
i = 0
with open(FILE, 'a') as f:
for q, a in data:
i += 1
stdout.write("\rgenerated: %d" % i)
stdout.flush()
f.write("{}\t{}\n".format(q, a))
stdout.write("\nTotal: %d\n" % i) | tinyHui/QuestionAnswering | generate_reverb_train.py | Python | mit | 413 |
"""
The iter package contains a series of 3 iterators for traversing a piece of music:
- `note_iterator.py` iterates through notes in non-decreasing time order,
- `chord_iterator.py` iterates through a song by chord, and
- `time_iterator.py` iterates through a song by time instance (what is playing at a particular moment).
"""
from song_iterator import SongIterator
from chord_iterator import ChordIterator
from time_iterator import TimeIterator
| jasonsbrooks/ARTIST | src/artist_generator/iter/__init__.py | Python | mit | 450 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock
import adempier
import invoice | titasakgm/brc-stock | openerp/addons/bangkok_rubber/__init__.py | Python | agpl-3.0 | 1,022 |
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.test import tab_test_case
class InspectorMemoryTest(tab_test_case.TabTestCase):
def testGetDOMStats(self):
unittest_data_dir = os.path.join(os.path.dirname(__file__),
'..', '..', '..', 'unittest_data')
self._browser.SetHTTPServerDirectories(unittest_data_dir)
# Due to an issue with CrOS, we create a new tab here rather than
# using self._tab to get a consistent starting page on all platforms
tab = self._browser.tabs.New()
tab.Navigate(
self._browser.http_server.UrlOf('dom_counter_sample.html'))
tab.WaitForDocumentReadyStateToBeComplete()
counts = tab.dom_stats
self.assertEqual(counts['document_count'], 2)
self.assertEqual(counts['node_count'], 18)
self.assertEqual(counts['event_listener_count'], 2)
| loopCM/chromium | tools/telemetry/telemetry/core/chrome/inspector_memory_unittest.py | Python | bsd-3-clause | 993 |
#!/usr/bin/env python
"""
Console Command
"""
import sys
import types
import os
import gc
import signal
import cmd
import logging
from threading import Thread
from .debugging import bacpypes_debugging, function_debugging, Logging, ModuleLogger
from .consolelogging import ConsoleLogHandler
from . import core
# readline is used for history files
try:
import readline
except ImportError:
readline = None
# some debugging
_debug = 0
_log = ModuleLogger(globals())
#
# console_interrupt
#
@function_debugging
def console_interrupt(*args):
if _debug: console_interrupt._debug("console_interrupt %r", args)
sys.stderr.write("Keyboard interrupt trapped - use EOF to end\n")
#
# ConsoleCmd
#
@bacpypes_debugging
class ConsoleCmd(cmd.Cmd, Thread, Logging):
def __init__(self, prompt="> ", stdin=None, stdout=None):
if _debug: ConsoleCmd._debug("__init__")
cmd.Cmd.__init__(self, stdin=stdin, stdout=stdout)
Thread.__init__(self, name="ConsoleCmd")
# check to see if this is running interactive
self.interactive = sys.__stdin__.isatty()
# save the prompt for interactive sessions, otherwise be quiet
if self.interactive:
self.prompt = prompt
else:
self.prompt = ''
# gc counters
self.type2count = {}
self.type2all = {}
# logging handlers
self.handlers = {}
# set a INT signal handler, ^C will only get sent to the
# main thread and there's no way to break the readline
# call initiated by this thread - sigh
if hasattr(signal, 'SIGINT'):
signal.signal(signal.SIGINT, console_interrupt)
# start the thread
self.start()
def run(self):
if _debug: ConsoleCmd._debug("run")
# run the command loop
self.cmdloop()
if _debug: ConsoleCmd._debug(" - done cmdloop")
# tell the main thread to stop, this thread will exit
core.deferred(core.stop)
def onecmd(self, cmdString):
if _debug: ConsoleCmd._debug('onecmd %r', cmdString)
rslt = ""
# let the real command run, trapping errors
try:
rslt = cmd.Cmd.onecmd(self, cmdString)
except Exception as err:
ConsoleCmd._exception("exception: %r", err)
# return what the command returned
return rslt
#-----
def do_gc(self, args):
"""gc - print out garbage collection information"""
### humm...
instance_type = getattr(types, 'InstanceType', object)
# snapshot of counts
type2count = {}
type2all = {}
for o in gc.get_objects():
if type(o) == instance_type:
type2count[o.__class__] = type2count.get(o.__class__,0) + 1
type2all[o.__class__] = type2all.get(o.__class__,0) + sys.getrefcount(o)
# count the things that have changed
ct = [ ( t.__module__
, t.__name__
, type2count[t]
, type2count[t] - self.type2count.get(t,0)
, type2all[t] - self.type2all.get(t,0)
) for t in type2count.iterkeys()
]
# ready for the next time
self.type2count = type2count
self.type2all = type2all
fmt = "%-30s %-30s %6s %6s %6s\n"
self.stdout.write(fmt % ("Module", "Type", "Count", "dCount", "dRef"))
# sorted by count
ct.sort(lambda x, y: cmp(y[2], x[2]))
for i in range(min(10,len(ct))):
m, n, c, delta1, delta2 = ct[i]
self.stdout.write(fmt % (m, n, c, delta1, delta2))
self.stdout.write("\n")
self.stdout.write(fmt % ("Module", "Type", "Count", "dCount", "dRef"))
# sorted by module and class
ct.sort()
for m, n, c, delta1, delta2 in ct:
if delta1 or delta2:
self.stdout.write(fmt % (m, n, c, delta1, delta2))
self.stdout.write("\n")
def do_bugin(self, args):
"""bugin [ <logger> ] - add a console logging handler to a logger"""
args = args.split()
if _debug: ConsoleCmd._debug("do_bugin %r", args)
# get the logger name and logger
if args:
loggerName = args[0]
if loggerName in logging.Logger.manager.loggerDict:
logger = logging.getLogger(loggerName)
else:
logger = None
else:
loggerName = '__root__'
logger = logging.getLogger()
# add a logging handler
if not logger:
self.stdout.write("not a valid logger name\n")
elif loggerName in self.handlers:
self.stdout.write("%s already has a handler\n" % loggerName)
else:
handler = ConsoleLogHandler(logger)
self.handlers[loggerName] = handler
self.stdout.write("handler to %s added\n" % loggerName)
self.stdout.write("\n")
def do_bugout(self, args):
"""bugout [ <logger> ] - remove a console logging handler from a logger"""
args = args.split()
if _debug: ConsoleCmd._debug("do_bugout %r", args)
# get the logger name and logger
if args:
loggerName = args[0]
if loggerName in logging.Logger.manager.loggerDict:
logger = logging.getLogger(loggerName)
else:
logger = None
else:
loggerName = '__root__'
logger = logging.getLogger()
# remove the logging handler
if not logger:
self.stdout.write("not a valid logger name\n")
elif not loggerName in self.handlers:
self.stdout.write("no handler for %s\n" % loggerName)
else:
handler = self.handlers[loggerName]
del self.handlers[loggerName]
# see if this (or its parent) is a module level logger
if hasattr(logger, 'globs'):
logger.globs['_debug'] -= 1
elif hasattr(logger.parent, 'globs'):
logger.parent.globs['_debug'] -= 1
# remove it from the logger
logger.removeHandler(handler)
self.stdout.write("handler to %s removed\n" % loggerName)
self.stdout.write("\n")
def do_buggers(self, args):
"""buggers - list the console logging handlers"""
args = args.split()
if _debug: ConsoleCmd._debug("do_buggers %r", args)
if not self.handlers:
self.stdout.write("no handlers\n")
else:
self.stdout.write("handlers: ")
self.stdout.write(', '.join(loggerName or '__root__' for loggerName in self.handlers))
self.stdout.write("\n")
loggers = logging.Logger.manager.loggerDict.keys()
for loggerName in sorted(loggers):
if args and (not args[0] in loggerName):
continue
if loggerName in self.handlers:
self.stdout.write("* %s\n" % loggerName)
else:
self.stdout.write(" %s\n" % loggerName)
self.stdout.write("\n")
#-----
def do_exit(self, args):
"""Exits from the console."""
if _debug: ConsoleCmd._debug("do_exit %r", args)
return -1
def do_EOF(self, args):
"""Exit on system end of file character"""
if _debug: ConsoleCmd._debug("do_EOF %r", args)
return self.do_exit(args)
def do_shell(self, args):
"""Pass command to a system shell when line begins with '!'"""
if _debug: ConsoleCmd._debug("do_shell %r", args)
os.system(args)
def do_help(self, args):
"""Get help on commands
'help' or '?' with no arguments prints a list of commands for which help is available
'help <command>' or '? <command>' gives help on <command>
"""
if _debug: ConsoleCmd._debug("do_help %r", args)
# the only reason to define this method is for the help text in the doc string
cmd.Cmd.do_help(self, args)
def preloop(self):
"""Initialization before prompting user for commands.
Despite the claims in the Cmd documentaion, Cmd.preloop() is not a stub.
"""
cmd.Cmd.preloop(self) ## sets up command completion
try:
if readline:
readline.read_history_file(sys.argv[0] + ".history")
except Exception as err:
if not isinstance(err, IOError):
self.stdout.write("history error: %s\n" % err)
def postloop(self):
"""Take care of any unfinished business.
Despite the claims in the Cmd documentaion, Cmd.postloop() is not a stub.
"""
try:
if readline:
readline.write_history_file(sys.argv[0] + ".history")
except Exception as err:
if not isinstance(err, IOError):
self.stdout.write("history error: %s\n" % err)
# clean up command completion
cmd.Cmd.postloop(self)
if self.interactive:
self.stdout.write("Exiting...\n")
# tell the core we have stopped
core.deferred(core.stop)
def precmd(self, line):
""" This method is called after the line has been input but before
it has been interpreted. If you want to modify the input line
before execution (for example, variable substitution) do it here.
"""
return line.strip()
def postcmd(self, stop, line):
"""If you want to stop the console, return something that evaluates to true.
If you want to do some post command processing, do it here.
"""
return stop
def emptyline(self):
"""Do nothing on empty input line"""
pass
| JoelBender/bacpypes | py27/bacpypes/consolecmd.py | Python | mit | 9,792 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.