code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tempfile
import re
from .generic_manager import GenericManager, GenericManagerProperties
from .apt_parser import AptParser
from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_commons import shell
from resource_management.core.logger import Logger
def replace_underscores(function_to_decorate):
def wrapper(*args, **kwargs):
self = args[0]
name = args[1].replace("_", "-")
return function_to_decorate(self, name, *args[2:], **kwargs)
return wrapper
class AptManagerProperties(GenericManagerProperties):
"""
Class to keep all Package-manager depended properties
"""
locked_output = "Unable to lock the administration directory"
repo_error = "Failure when receiving data from the peer"
repo_manager_bin = "/usr/bin/apt-get"
repo_cache_bin = "/usr/bin/apt-cache"
pkg_manager_bin = "/usr/bin/dpkg"
repo_update_cmd = [repo_manager_bin, 'update', '-qq']
available_packages_cmd = [repo_cache_bin, "dump"]
installed_packages_cmd = ['COLUMNS=999', pkg_manager_bin, "-l"]
repo_definition_location = "/etc/apt/sources.list.d"
install_cmd = {
True: [repo_manager_bin, '-o', "Dpkg::Options::=--force-confdef", '--allow-unauthenticated', '--assume-yes', 'install'],
False: [repo_manager_bin, '-q', '-o', "Dpkg::Options::=--force-confdef", '--allow-unauthenticated', '--assume-yes', 'install']
}
remove_cmd = {
True: [repo_manager_bin, '-y', 'remove'],
False: [repo_manager_bin, '-y', '-q', 'remove']
}
verify_dependency_cmd = [repo_manager_bin, '-qq', 'check']
install_cmd_env = {'DEBIAN_FRONTEND': 'noninteractive'}
repo_url_exclude = "ubuntu.com"
configuration_dump_cmd = [AMBARI_SUDO_BINARY, "apt-config", "dump"]
class AptManager(GenericManager):
def get_installed_package_version(self, package_name):
r = shell.subprocess_executor("dpkg -s {0} | grep Version | awk '{{print $2}}'".format(package_name))
return r.out.strip(os.linesep)
@property
def properties(self):
return AptManagerProperties
def installed_packages(self, pkg_names=None, repo_filter=None):
"""
Return all installed packages in the system except packages in REPO_URL_EXCLUDE
:type pkg_names list|set
:type repo_filter str|None
:return formatted list of packages
"""
packages = []
available_packages = self._available_packages_dict(pkg_names, repo_filter)
with shell.process_executor(self.properties.installed_packages_cmd, error_callback=self._executor_error_handler,
strategy=shell.ReaderStrategy.BufferedChunks) as output:
for package, version in AptParser.packages_installed_reader(output):
if package in available_packages:
packages.append(available_packages[package])
if package not in available_packages:
packages.append([package, version, "installed"]) # case, when some package not belongs to any known repo
return packages
def _available_packages(self, pkg_names=None, repo_filter=None):
"""
Returning list of the installed packages with possibility to filter them by name
:type pkg_names list|set
:type repo_filter str|None
"""
with shell.process_executor(self.properties.available_packages_cmd, error_callback=self._executor_error_handler,
strategy=shell.ReaderStrategy.BufferedChunks) as output:
for pkg_item in AptParser.packages_reader(output):
if repo_filter and repo_filter not in pkg_item[2]:
continue
if self.properties.repo_url_exclude in pkg_item[2]:
continue
if pkg_names and pkg_item[0] not in pkg_names:
continue
yield pkg_item
def _available_packages_dict(self, pkg_names=None, repo_filter=None):
"""
Same as available packages, but result returns as dict and package name as key
:type pkg_names list|set
:type repo_filter str|None
"""
result = {}
for item in self._available_packages(pkg_names, repo_filter):
result[item[0]] = item
return result
def available_packages(self, pkg_names=None, repo_filter=None):
"""
Returning list of the installed packages with possibility to filter them by name
:type pkg_names list|set
:type repo_filter str|None
"""
return [item for item in self._available_packages(pkg_names, repo_filter)]
def all_packages(self, pkg_names=None, repo_filter=None):
return self.available_packages(pkg_names, repo_filter)
def transform_baseurl_to_repoid(self, base_url):
"""
Transforms the URL looking like proto://localhost/some/long/path to localhost_some_long_path
:type base_url str
:rtype str
"""
url_proto_mask = "://"
url_proto_pos = base_url.find(url_proto_mask)
if url_proto_pos > 0:
base_url = base_url[url_proto_pos+len(url_proto_mask):]
return base_url.replace("/", "_").replace(" ", "_")
def get_available_packages_in_repos(self, repos):
"""
Gets all (both installed and available) packages that are available at given repositories.
:type repos resource_management.libraries.functions.repository_util.CommandRepository
:return: installed and available packages from these repositories
"""
filtered_packages = []
packages = self.available_packages()
repo_ids = []
for repo in repos.items:
repo_ids.append(self.transform_baseurl_to_repoid(repo.base_url))
if repos.feat.scoped:
Logger.info("Looking for matching packages in the following repositories: {0}".format(", ".join(repo_ids)))
for repo_id in repo_ids:
for package in packages:
if repo_id in package[2]:
filtered_packages.append(package[0])
return filtered_packages
else:
Logger.info("Packages will be queried using all available repositories on the system.")
# this is the case where the hosts are marked as sysprepped, but
# search the repos on-system anyway. the url specified in ambari must match the one
# in the list file for this to work
for repo_id in repo_ids:
for package in packages:
if repo_id in package[2]:
filtered_packages.append(package[0])
if len(filtered_packages) > 0:
Logger.info("Found packages for repo {}".format(str(filtered_packages)))
return filtered_packages
else:
return [package[0] for package in packages]
def package_manager_configuration(self):
"""
Reading apt configuration
:return dict with apt properties
"""
with shell.process_executor(self.properties.configuration_dump_cmd, error_callback=self._executor_error_handler) as output:
configuration = list(AptParser.config_reader(output))
return dict(configuration)
def verify_dependencies(self):
"""
Verify that we have no dependency issues in package manager. Dependency issues could appear because of aborted or terminated
package installation process or invalid packages state after manual modification of packages list on the host
:return True if no dependency issues found, False if dependency issue present
:rtype bool
"""
r = shell.subprocess_executor(self.properties.verify_dependency_cmd)
pattern = re.compile("has missing dependency|E:")
if r.code or (r.out and pattern.search(r.out)):
err_msg = Logger.filter_text("Failed to verify package dependencies. Execution of '%s' returned %s. %s" % (self.properties.verify_dependency_cmd, r.code, r.out))
Logger.error(err_msg)
return False
return True
@replace_underscores
def install_package(self, name, context):
"""
Install package
:type name str
:type context ambari_commons.shell.RepoCallContext
:raise ValueError if name is empty
"""
from resource_management.core import sudo
apt_sources_list_tmp_dir = None
if not name:
raise ValueError("Installation command was executed with no package name")
elif not self._check_existence(name) or context.action_force:
cmd = self.properties.install_cmd[context.log_output]
copied_sources_files = []
is_tmp_dir_created = False
if context.use_repos:
if 'base' in context.use_repos:
use_repos = set([v for k, v in context.use_repos.items() if k != 'base'])
else:
cmd = cmd + ['-o', 'Dir::Etc::SourceList={0}'.format(self.properties.empty_file)]
use_repos = set(context.use_repos.values())
if use_repos:
is_tmp_dir_created = True
apt_sources_list_tmp_dir = tempfile.mkdtemp(suffix="-ambari-apt-sources-d")
Logger.info("Temporary sources directory was created: {}".format(apt_sources_list_tmp_dir))
for repo in use_repos:
new_sources_file = os.path.join(apt_sources_list_tmp_dir, repo + '.list')
Logger.info("Temporary sources file will be copied: {0}".format(new_sources_file))
sudo.copy(os.path.join(self.properties.repo_definition_location, repo + '.list'), new_sources_file)
copied_sources_files.append(new_sources_file)
cmd = cmd + ['-o', 'Dir::Etc::SourceParts='.format(apt_sources_list_tmp_dir)]
cmd = cmd + [name]
Logger.info("Installing package {0} ('{1}')".format(name, shell.string_cmd_from_args_list(cmd)))
shell.repository_manager_executor(cmd, self.properties, context, env=self.properties.install_cmd_env)
if is_tmp_dir_created:
for temporary_sources_file in copied_sources_files:
Logger.info("Removing temporary sources file: {0}".format(temporary_sources_file))
os.remove(temporary_sources_file)
if apt_sources_list_tmp_dir:
Logger.info("Removing temporary sources directory: {0}".format(apt_sources_list_tmp_dir))
os.rmdir(apt_sources_list_tmp_dir)
else:
Logger.info("Skipping installation of existing package {0}".format(name))
@replace_underscores
def upgrade_package(self, name, context):
"""
Install package
:type name str
:type context ambari_commons.shell.RepoCallContext
:raise ValueError if name is empty
"""
context.is_upgrade = True
return self.install_package(name, context)
@replace_underscores
def remove_package(self, name, context, ignore_dependencies=False):
"""
Remove package
:type name str
:type context ambari_commons.shell.RepoCallContext
:type ignore_dependencies bool
:raise ValueError if name is empty
"""
if not name:
raise ValueError("Installation command were executed with no package name passed")
elif self._check_existence(name):
cmd = self.properties.remove_cmd[context.log_output] + [name]
Logger.info("Removing package {0} ('{1}')".format(name, shell.string_cmd_from_args_list(cmd)))
shell.repository_manager_executor(cmd, self.properties, context)
else:
Logger.info("Skipping removal of non-existing package {0}".format(name))
@replace_underscores
def _check_existence(self, name):
"""
For regexp names:
If only part of packages were installed during early canceling.
Let's say:
1. install hbase-2-3-.*
2. Only hbase-2-3-1234 is installed, but is not hbase-2-3-1234-regionserver yet.
3. We cancel the apt-get
In that case this is bug of packages we require.
And hbase-2-3-*-regionserver should be added to metainfo.xml.
Checking existence should never fail in such a case for hbase-2-3-.*, otherwise it
gonna break things like removing packages and some other things.
Note: this method SHOULD NOT use apt-get (apt.cache is using dpkg not apt). Because a lot of issues we have, when customer have
apt-get in inconsistant state (locked, used, having invalid repo). Once packages are installed
we should not rely on that.
"""
# this method is more optimised than #installed_packages, as here we do not call available packages(as we do not
# interested in repository, from where package come)
cmd = self.properties.installed_packages_cmd + [name]
with shell.process_executor(cmd, strategy=shell.ReaderStrategy.BufferedChunks, silent=True) as output:
for package, version in AptParser.packages_installed_reader(output):
return package == name
return False
|
sekikn/ambari
|
ambari-common/src/main/python/ambari_commons/repo_manager/apt_manager.py
|
Python
|
apache-2.0
| 13,082
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import errno
import logging
import os
import posix
from functools import reduce
from typing import Optional, Set
logger = logging.getLogger(__name__)
OS_ALIASES = {
"darwin": {"macos", "darwin", "macosx", "mac os x", "mac"},
"linux": {"linux", "linux2"},
}
Pid = int
def get_os_name(uname_result: Optional[posix.uname_result] = None) -> str:
"""
:API: public
"""
if uname_result is None:
uname_result = os.uname()
return uname_result[0].lower()
def normalize_os_name(os_name: str) -> str:
"""
:API: public
"""
if os_name not in OS_ALIASES:
for proper_name, aliases in OS_ALIASES.items():
if os_name in aliases:
return proper_name
logger.warning(
"Unknown operating system name: {bad}, known names are: {known}".format(
bad=os_name, known=", ".join(sorted(known_os_names()))
)
)
return os_name
def get_normalized_os_name() -> str:
return normalize_os_name(get_os_name())
def known_os_names() -> Set[str]:
return reduce(set.union, OS_ALIASES.values())
# From kill(2) on OSX 10.13:
# [EINVAL] Sig is not a valid, supported signal number.
#
# [EPERM] The sending process is not the super-user and its effective user id does not match the effective user-id of the receiving process. When signaling a process group, this error is returned if
# any members of the group could not be signaled.
#
# [ESRCH] No process or process group can be found corresponding to that specified by pid.
#
# [ESRCH] The process id was given as 0, but the sending process does not have a process group.
def safe_kill(pid: Pid, signum: int) -> None:
"""Kill a process with the specified signal, catching nonfatal errors."""
assert isinstance(pid, Pid)
assert isinstance(signum, int)
try:
os.kill(pid, signum)
except (IOError, OSError) as e:
if e.errno in [errno.ESRCH, errno.EPERM]:
pass
elif e.errno == errno.EINVAL:
raise ValueError(f"Invalid signal number {signum}: {e}", e)
else:
raise
|
benjyw/pants
|
src/python/pants/util/osutil.py
|
Python
|
apache-2.0
| 2,336
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_messaging.rpc import dispatcher
import six
from heat.common import exception
from heat.common import identifier
from heat.engine.clients.os import keystone
from heat.engine import dependencies
from heat.engine import resource as res
from heat.engine import service
from heat.engine import stack
from heat.engine import template as templatem
from heat.objects import stack as stack_object
from heat.tests import common
from heat.tests.engine import tools
from heat.tests import fakes as test_fakes
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
policy_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "alarming",
"Resources" : {
"WebServerScaleDownPolicy" : {
"Type" : "AWS::AutoScaling::ScalingPolicy",
"Properties" : {
"AdjustmentType" : "ChangeInCapacity",
"AutoScalingGroupName" : "",
"Cooldown" : "60",
"ScalingAdjustment" : "-1"
}
},
"Random" : {
"Type" : "OS::Heat::RandomString"
}
}
}
'''
class StackResourcesServiceTest(common.HeatTestCase):
def setUp(self):
super(StackResourcesServiceTest, self).setUp()
self.ctx = utils.dummy_context(tenant_id='stack_resource_test_tenant')
self.eng = service.EngineService('a-host', 'a-topic')
self.eng.thread_group_mgr = tools.DummyThreadGroupManager()
self.eng.engine_id = 'engine-fake-uuid'
cfg.CONF.set_default('heat_stack_user_role', 'stack_user_role')
@mock.patch.object(stack.Stack, 'load')
def _test_describe_stack_resource(self, mock_load):
mock_load.return_value = self.stack
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertIn('resource_identity', r)
self.assertIn('description', r)
self.assertIn('updated_time', r)
self.assertIn('stack_identity', r)
self.assertIsNotNone(r['stack_identity'])
self.assertIn('stack_name', r)
self.assertEqual(self.stack.name, r['stack_name'])
self.assertIn('metadata', r)
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertIn('resource_type', r)
self.assertIn('physical_resource_id', r)
self.assertIn('resource_name', r)
self.assertIn('attributes', r)
self.assertEqual('WebServer', r['resource_name'])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@tools.stack_context('service_stack_resource_describe__test_stack')
def test_stack_resource_describe(self):
self._test_describe_stack_resource()
@mock.patch.object(service.EngineService, '_get_stack')
def test_stack_resource_describe_nonexist_stack(self, mock_get):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
mock_get.side_effect = exception.StackNotFound(stack_name='test')
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, non_exist_identifier, 'WebServer')
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
mock_get.assert_called_once_with(self.ctx, non_exist_identifier)
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resource_describe_nonexist_test_stack')
def test_stack_resource_describe_nonexist_resource(self, mock_load):
mock_load.return_value = self.stack
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, self.stack.identifier(), 'foo')
self.assertEqual(exception.ResourceNotFound, ex.exc_info[0])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@tools.stack_context('service_resource_describe_noncreated_test_stack',
create_res=False)
def test_stack_resource_describe_noncreated_resource(self):
self._test_describe_stack_resource()
@mock.patch.object(service.EngineService, '_authorize_stack_user')
@tools.stack_context('service_resource_describe_user_deny_test_stack')
def test_stack_resource_describe_stack_user_deny(self, mock_auth):
self.ctx.roles = [cfg.CONF.heat_stack_user_role]
mock_auth.return_value = False
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, self.stack.identifier(), 'foo')
self.assertEqual(exception.Forbidden, ex.exc_info[0])
mock_auth.assert_called_once_with(self.ctx, mock.ANY, 'foo')
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_describe_test_stack')
def test_stack_resources_describe(self, mock_load):
mock_load.return_value = self.stack
resources = self.eng.describe_stack_resources(self.ctx,
self.stack.identifier(),
'WebServer')
self.assertEqual(1, len(resources))
r = resources[0]
self.assertIn('resource_identity', r)
self.assertIn('description', r)
self.assertIn('updated_time', r)
self.assertIn('stack_identity', r)
self.assertIsNotNone(r['stack_identity'])
self.assertIn('stack_name', r)
self.assertEqual(self.stack.name, r['stack_name'])
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertIn('resource_type', r)
self.assertIn('physical_resource_id', r)
self.assertIn('resource_name', r)
self.assertEqual('WebServer', r['resource_name'])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_describe_no_filter_test_stack')
def test_stack_resources_describe_no_filter(self, mock_load):
mock_load.return_value = self.stack
resources = self.eng.describe_stack_resources(
self.ctx, self.stack.identifier(), None)
self.assertEqual(1, len(resources))
r = resources[0]
self.assertIn('resource_name', r)
self.assertEqual('WebServer', r['resource_name'])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@mock.patch.object(service.EngineService, '_get_stack')
def test_stack_resources_describe_bad_lookup(self, mock_get):
mock_get.side_effect = TypeError
self.assertRaises(TypeError,
self.eng.describe_stack_resources,
self.ctx, None, 'WebServer')
mock_get.assert_called_once_with(self.ctx, None)
def test_stack_resources_describe_nonexist_stack(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resources,
self.ctx, non_exist_identifier, 'WebServer')
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
@tools.stack_context('find_phys_res_stack')
def test_find_physical_resource(self):
resources = self.eng.describe_stack_resources(self.ctx,
self.stack.identifier(),
None)
phys_id = resources[0]['physical_resource_id']
result = self.eng.find_physical_resource(self.ctx, phys_id)
self.assertIsInstance(result, dict)
resource_identity = identifier.ResourceIdentifier(**result)
self.assertEqual(self.stack.identifier(), resource_identity.stack())
self.assertEqual('WebServer', resource_identity.resource_name)
def test_find_physical_resource_nonexist(self):
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.find_physical_resource,
self.ctx, 'foo')
self.assertEqual(exception.PhysicalResourceNotFound, ex.exc_info[0])
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_list_test_stack')
def test_stack_resources_list(self, mock_load):
mock_load.return_value = self.stack
resources = self.eng.list_stack_resources(self.ctx,
self.stack.identifier())
self.assertEqual(1, len(resources))
r = resources[0]
self.assertIn('resource_identity', r)
self.assertIn('updated_time', r)
self.assertIn('physical_resource_id', r)
self.assertIn('resource_name', r)
self.assertEqual('WebServer', r['resource_name'])
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertIn('resource_type', r)
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_list_test_stack_with_depth')
def test_stack_resources_list_with_depth(self, mock_load):
mock_load.return_value = self.stack
resources = six.itervalues(self.stack)
self.stack.iter_resources = mock.Mock(return_value=resources)
resources = self.eng.list_stack_resources(self.ctx,
self.stack.identifier(),
2)
self.stack.iter_resources.assert_called_once_with(2)
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_list_test_stack_with_max_depth')
def test_stack_resources_list_with_max_depth(self, mock_load):
mock_load.return_value = self.stack
resources = six.itervalues(self.stack)
self.stack.iter_resources = mock.Mock(return_value=resources)
resources = self.eng.list_stack_resources(self.ctx,
self.stack.identifier(),
99)
max_depth = cfg.CONF.max_nested_stack_depth
self.stack.iter_resources.assert_called_once_with(max_depth)
@mock.patch.object(stack.Stack, 'load')
def test_stack_resources_list_deleted_stack(self, mock_load):
stk = tools.setup_stack('resource_list_deleted_stack', self.ctx)
stack_id = stk.identifier()
mock_load.return_value = stk
tools.clean_up_stack(stk)
resources = self.eng.list_stack_resources(self.ctx, stack_id)
self.assertEqual(1, len(resources))
res = resources[0]
self.assertEqual('DELETE', res['resource_action'])
self.assertEqual('COMPLETE', res['resource_status'])
@mock.patch.object(service.EngineService, '_get_stack')
def test_stack_resources_list_nonexist_stack(self, mock_get):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
mock_get.side_effect = exception.StackNotFound(stack_name='test')
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.list_stack_resources,
self.ctx, non_exist_identifier)
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
mock_get.assert_called_once_with(self.ctx, non_exist_identifier,
show_deleted=True)
def _stack_create(self, stack_name):
self.patchobject(keystone.KeystoneClientPlugin, '_create',
return_value=test_fakes.FakeKeystoneClient())
stk = tools.get_stack(stack_name, self.ctx, policy_template)
stk.store()
stk.create()
s = stack_object.Stack.get_by_id(self.ctx, stk.id)
self.patchobject(service.EngineService, '_get_stack', return_value=s)
return stk
def test_signal_reception_async(self):
self.eng.thread_group_mgr = tools.DummyThreadGroupMgrLogStart()
stack_name = 'signal_reception_async'
self.stack = self._stack_create(stack_name)
test_data = {'food': 'yum'}
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
test_data)
self.assertEqual([(self.stack.id, mock.ANY)],
self.eng.thread_group_mgr.started)
@mock.patch.object(res.Resource, 'signal')
def test_signal_reception_sync(self, mock_signal):
mock_signal.return_value = None
stack_name = 'signal_reception_sync'
self.stack = self._stack_create(stack_name)
test_data = {'food': 'yum'}
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
test_data,
sync_call=True)
mock_signal.assert_called_once_with(mock.ANY, False)
def test_signal_reception_no_resource(self):
stack_name = 'signal_reception_no_resource'
self.stack = self._stack_create(stack_name)
test_data = {'food': 'yum'}
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal, self.ctx,
dict(self.stack.identifier()),
'resource_does_not_exist',
test_data)
self.assertEqual(exception.ResourceNotFound, ex.exc_info[0])
@mock.patch.object(stack.Stack, 'load')
@mock.patch.object(service.EngineService, '_get_stack')
def test_signal_reception_unavailable_resource(self, mock_get, mock_load):
stack_name = 'signal_reception_unavailable_resource'
stk = tools.get_stack(stack_name, self.ctx, policy_template)
stk.store()
self.stack = stk
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
mock_load.return_value = stk
mock_get.return_value = s
test_data = {'food': 'yum'}
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal, self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
test_data)
self.assertEqual(exception.ResourceNotAvailable, ex.exc_info[0])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY,
use_stored_context=mock.ANY)
mock_get.assert_called_once_with(self.ctx, self.stack.identifier())
@mock.patch.object(res.Resource, 'signal')
def test_signal_returns_metadata(self, mock_signal):
mock_signal.return_value = None
self.stack = self._stack_create('signal_reception')
rsrc = self.stack['WebServerScaleDownPolicy']
test_metadata = {'food': 'yum'}
rsrc.metadata_set(test_metadata)
md = self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy', None,
sync_call=True)
self.assertEqual(test_metadata, md)
mock_signal.assert_called_once_with(mock.ANY, False)
def test_signal_unset_invalid_hook(self):
self.stack = self._stack_create('signal_unset_invalid_hook')
details = {'unset_hook': 'invalid_hook'}
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal,
self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
details)
msg = 'Invalid hook type "invalid_hook"'
self.assertIn(msg, six.text_type(ex.exc_info[1]))
self.assertEqual(exception.InvalidBreakPointHook,
ex.exc_info[0])
def test_signal_unset_not_defined_hook(self):
self.stack = self._stack_create('signal_unset_not_defined_hook')
details = {'unset_hook': 'pre-update'}
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.resource_signal,
self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy',
details)
msg = ('The "pre-update" hook is not defined on '
'AWSScalingPolicy "WebServerScaleDownPolicy"')
self.assertIn(msg, six.text_type(ex.exc_info[1]))
self.assertEqual(exception.InvalidBreakPointHook,
ex.exc_info[0])
@mock.patch.object(res.Resource, 'metadata_update')
@mock.patch.object(res.Resource, 'signal')
@mock.patch.object(service.EngineService, '_get_stack')
def test_signal_calls_metadata_update(self, mock_get, mock_signal,
mock_update):
# fake keystone client
self.patchobject(keystone.KeystoneClientPlugin, '_create',
return_value=test_fakes.FakeKeystoneClient())
stk = tools.get_stack('signal_reception', self.ctx, policy_template)
self.stack = stk
stk.store()
stk.create()
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
mock_get.return_value = s
mock_signal.return_value = None
# this will be called once for the Random resource
mock_update.return_value = None
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy', None,
sync_call=True)
mock_get.assert_called_once_with(self.ctx, self.stack.identifier())
mock_signal.assert_called_once_with(mock.ANY, False)
mock_update.assert_called_once_with()
@mock.patch.object(res.Resource, 'metadata_update')
@mock.patch.object(res.Resource, 'signal')
@mock.patch.object(service.EngineService, '_get_stack')
def test_signal_no_calls_metadata_update(self, mock_get, mock_signal,
mock_update):
# fake keystone client
self.patchobject(keystone.KeystoneClientPlugin, '_create',
return_value=test_fakes.FakeKeystoneClient())
stk = tools.get_stack('signal_reception', self.ctx, policy_template)
self.stack = stk
stk.store()
stk.create()
s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
mock_get.return_value = s
mock_signal.return_value = None
res.Resource.signal_needs_metadata_updates = False
self.eng.resource_signal(self.ctx,
dict(self.stack.identifier()),
'WebServerScaleDownPolicy', None,
sync_call=True)
mock_get.assert_called_once_with(self.ctx, self.stack.identifier())
mock_signal.assert_called_once_with(mock.ANY, False)
# this will never be called
self.assertEqual(0, mock_update.call_count)
res.Resource.signal_needs_metadata_updates = True
def test_lazy_load_resources(self):
stack_name = 'lazy_load_test'
lazy_load_template = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'foo': {'Type': 'GenericResourceType'},
'bar': {
'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Ref': 'foo'},
}
}
}
}
templ = templatem.Template(lazy_load_template)
stk = stack.Stack(self.ctx, stack_name, templ)
self.assertIsNone(stk._resources)
self.assertIsNone(stk._dependencies)
resources = stk.resources
self.assertIsInstance(resources, dict)
self.assertEqual(2, len(resources))
self.assertIsInstance(resources.get('foo'),
generic_rsrc.GenericResource)
self.assertIsInstance(resources.get('bar'),
generic_rsrc.ResourceWithProps)
stack_dependencies = stk.dependencies
self.assertIsInstance(stack_dependencies, dependencies.Dependencies)
self.assertEqual(2, len(stack_dependencies.graph()))
|
pratikmallya/heat
|
heat/tests/engine/service/test_stack_resources.py
|
Python
|
apache-2.0
| 21,657
|
# Copyright 2009-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fail if the C extension module doesn't exist.
Only really intended to be used by internal build scripts.
"""
import sys
sys.path[0:0] = [""]
import bson
import pymongo
if not pymongo.has_c() or not bson.has_c():
sys.exit("could not load C extensions")
|
llvtt/mongo-python-driver
|
tools/fail_if_no_c.py
|
Python
|
apache-2.0
| 842
|
from share.transform.chain.exceptions import * # noqa
from share.transform.chain.links import * # noqa
from share.transform.chain.parsers import * # noqa
from share.transform.chain.transformer import ChainTransformer # noqa
from share.transform.chain.links import Context
# Context singleton to be used for parser definitions
# Class SHOULD be thread safe
# Accessing subattribtues will result in a new copy of the context
# to avoid leaking data between chains
ctx = Context()
|
CenterForOpenScience/SHARE
|
share/transform/chain/__init__.py
|
Python
|
apache-2.0
| 484
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova import compute
from nova import db
from nova import exception
from nova import objects
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get(*args, **kwargs):
inst = fakes.stub_instance(1, uuid=UUID3, task_state="kayaking",
vm_state="slightly crunchy", power_state=1, locked_by='owner')
return fake_instance.fake_instance_obj(args[1], **inst)
def fake_compute_get_all(*args, **kwargs):
db_list = [
fakes.stub_instance(1, uuid=UUID1, task_state="task-1",
vm_state="vm-1", power_state=1, locked_by=None),
fakes.stub_instance(2, uuid=UUID2, task_state="task-2",
vm_state="vm-2", power_state=2, locked_by='admin'),
]
fields = instance_obj.INSTANCE_DEFAULT_FIELDS
return instance_obj._make_instance_list(args[1],
objects.InstanceList(),
db_list, fields)
class ExtendedStatusTest(test.TestCase):
content_type = 'application/json'
prefix = 'os-extended-status:'
def setUp(self):
super(ExtendedStatusTest, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
return_server = fakes.fake_instance_get()
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app_v3(
init_only=('servers',
'os-extended-status')))
return res
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def assertServerStates(self, server, vm_state, power_state, task_state,
locked_by):
self.assertEqual(server.get('%svm_state' % self.prefix), vm_state)
self.assertEqual(int(server.get('%spower_state' % self.prefix)),
power_state)
self.assertEqual(server.get('%stask_state' % self.prefix), task_state)
self.assertEqual(str(server.get('%slocked_by' % self.prefix)),
locked_by)
def test_show(self):
url = '/v3/servers/%s' % UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertServerStates(self._get_server(res.body),
vm_state='slightly crunchy',
power_state=1,
task_state='kayaking',
locked_by='owner')
def test_detail(self):
url = '/v3/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
self.assertServerStates(server,
vm_state='vm-%s' % (i + 1),
power_state=(i + 1),
task_state='task-%s' % (i + 1),
locked_by=['None', 'admin'][i])
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = '/v3/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
|
viggates/nova
|
nova/tests/api/openstack/compute/plugins/v3/test_extended_status.py
|
Python
|
apache-2.0
| 4,622
|
# pyOCD debugger
# Copyright (c) 2016-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ..core import exceptions
from ..coresight.cortex_m_core_registers import (CortexMCoreRegisterInfo, index_for_reg)
from .metrics import CacheMetrics
LOG = logging.getLogger(__name__)
class RegisterCache(object):
"""@brief Cache of a core's register values.
The only interesting part of this cache is how it handles the special registers: CONTROL,
FAULTMASK, BASEPRI, PRIMASK, and CFBP. The values of the first four registers are read and written
all at once as the CFBP register through the hardware DCRSR register. On reads of any of these
registers, or the combined CFBP, the cache will ask the underlying context to read CFBP. It will
then update the cache entries for all five registers. Writes to any of these registers just
invalidate all five.
Same logic applies for XPSR submasks.
"""
CFBP_INDEX = index_for_reg('cfbp')
XPSR_INDEX = index_for_reg('xpsr')
CFBP_REGS = [index_for_reg(name) for name in [
'cfbp',
'control',
'faultmask',
'basepri',
'primask',
]]
XPSR_REGS = [index_for_reg(name) for name in [
'xpsr',
'apsr',
'iapsr',
'eapsr',
'ipsr',
'epsr',
'iepsr',
]]
def __init__(self, context, core):
self._context = context
self._core = core
self._run_token = -1
self._reset_cache()
def _reset_cache(self):
self._cache = {}
self._metrics = CacheMetrics()
def _dump_metrics(self):
if self._metrics.total > 0:
LOG.debug("%d reads [%d%% hits, %d regs]", self._metrics.total, self._metrics.percent_hit, self._metrics.hits)
else:
LOG.debug("no accesses")
def _check_cache(self):
"""@brief Invalidates the cache if needed and returns whether the core is running."""
if self._core.is_running():
LOG.debug("core is running; invalidating cache")
self._reset_cache()
return True
elif self._run_token != self._core.run_token:
self._dump_metrics()
LOG.debug("out of date run token; invalidating cache")
self._reset_cache()
self._run_token = self._core.run_token
return False
def _convert_and_check_registers(self, reg_list):
# convert to index only
reg_list = [index_for_reg(reg) for reg in reg_list]
self._core.check_reg_list(reg_list)
return reg_list
def read_core_registers_raw(self, reg_list):
# Invalidate the cache. If the core is still running, just read directly from it.
if self._check_cache():
return self._context.read_core_registers_raw(reg_list)
reg_list = self._convert_and_check_registers(reg_list)
reg_set = set(reg_list)
# Get list of values we have cached.
cached_set = set(r for r in reg_list if r in self._cache)
self._metrics.hits += len(cached_set)
# Read uncached registers from the target.
read_list = list(reg_set.difference(cached_set))
reading_cfbp = any(r for r in read_list if r in self.CFBP_REGS)
reading_xpsr = any(r for r in read_list if r in self.XPSR_REGS)
if reading_cfbp:
if not self.CFBP_INDEX in read_list:
read_list.append(self.CFBP_INDEX)
cfbp_index = read_list.index(self.CFBP_INDEX)
if reading_xpsr:
if not self.XPSR_INDEX in read_list:
read_list.append(self.XPSR_INDEX)
xpsr_index = read_list.index(self.XPSR_INDEX)
self._metrics.misses += len(read_list)
# Read registers not in the cache from the target.
if read_list:
try:
values = self._context.read_core_registers_raw(read_list)
except exceptions.CoreRegisterAccessError:
# Invalidate cache on register read error just to be safe.
self._reset_cache()
raise
else:
values = []
# Update all CFBP based registers.
if reading_cfbp:
v = values[cfbp_index]
self._cache[self.CFBP_INDEX] = v
for r in self.CFBP_REGS:
if r == self.CFBP_INDEX:
continue
self._cache[r] = (v >> ((-r - 1) * 8)) & 0xff
# Update all XPSR based registers.
if reading_xpsr:
v = values[xpsr_index]
self._cache[self.XPSR_INDEX] = v
for r in self.XPSR_REGS:
if r == self.XPSR_INDEX:
continue
self._cache[r] = v & CortexMCoreRegisterInfo.get(r).psr_mask
# Build the results list in the same order as requested registers.
results = []
for r in reg_list:
if r in cached_set:
results.append(self._cache[r])
else:
i = read_list.index(r)
v = values[i]
results.append(v)
self._cache[r] = v
return results
# TODO only write dirty registers to target right before running.
def write_core_registers_raw(self, reg_list, data_list):
# Check and invalidate the cache. If the core is still running, just pass the writes
# to our context.
if self._check_cache():
self._context.write_core_registers_raw(reg_list, data_list)
return
reg_list = self._convert_and_check_registers(reg_list)
self._metrics.writes += len(reg_list)
writing_cfbp = any(r for r in reg_list if r in self.CFBP_REGS)
writing_xpsr = any(r for r in reg_list if r in self.XPSR_REGS)
# Update cached register values.
for i, r in enumerate(reg_list):
v = data_list[i]
self._cache[r] = v
# Just remove all cached CFBP and XPSR based register values.
if writing_cfbp:
for r in self.CFBP_REGS:
self._cache.pop(r, None)
if writing_xpsr:
for r in self.XPSR_REGS:
self._cache.pop(r, None)
# Write new register values to target.
try:
self._context.write_core_registers_raw(reg_list, data_list)
except exceptions.CoreRegisterAccessError:
# Invalidate cache on register write error just to be safe.
self._reset_cache()
raise
def invalidate(self):
self._reset_cache()
|
pyocd/pyOCD
|
pyocd/cache/register.py
|
Python
|
apache-2.0
| 7,288
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys, os, logging, functools
import multiprocessing as mp
import mxnet as mx
import numpy as np
import random
import shutil
from mxnet.base import MXNetError
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, '../common/'))
sys.path.insert(0, os.path.join(curr_path, '../../../python'))
import models
from contextlib import contextmanager
import pytest
from tempfile import TemporaryDirectory
import locale
xfail_when_nonstandard_decimal_separator = pytest.mark.xfail(
locale.localeconv()["decimal_point"] != ".",
reason="Some operators break when the decimal separator is set to anything other than \".\". "
"These operators should be rewritten to utilize the new FFI. Please see #18097 for more "
"information."
)
def assertRaises(expected_exception, func, *args, **kwargs):
try:
func(*args, **kwargs)
except expected_exception as e:
pass
else:
# Did not raise exception
assert False, "%s did not raise %s" % (func.__name__, expected_exception.__name__)
def default_logger():
"""A logger used to output seed information to logs."""
logger = logging.getLogger(__name__)
# getLogger() lookups will return the same logger, but only add the handler once.
if not len(logger.handlers):
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
logger.addHandler(handler)
if (logger.getEffectiveLevel() == logging.NOTSET):
logger.setLevel(logging.INFO)
return logger
@contextmanager
def random_seed(seed=None):
"""
Runs a code block with a new seed for np, mx and python's random.
Parameters
----------
seed : the seed to pass to np.random, mx.random and python's random.
To impose rng determinism, invoke e.g. as in:
with random_seed(1234):
...
To impose rng non-determinism, invoke as in:
with random_seed():
...
Upon conclusion of the block, the rng's are returned to
a state that is a function of their pre-block state, so
any prior non-determinism is preserved.
"""
try:
next_seed = np.random.randint(0, np.iinfo(np.int32).max)
if seed is None:
np.random.seed()
seed = np.random.randint(0, np.iinfo(np.int32).max)
logger = default_logger()
logger.debug('Setting np, mx and python random seeds = %s', seed)
np.random.seed(seed)
mx.random.seed(seed)
random.seed(seed)
yield
finally:
# Reinstate prior state of np.random and other generators
np.random.seed(next_seed)
mx.random.seed(next_seed)
random.seed(next_seed)
def _assert_raise_cuxx_version_not_satisfied(min_version, cfg):
def less_than(version_left, version_right):
"""Compares two version strings in the format num(.[num])*"""
if not version_left or not version_right:
return False
left = version_left.split(".")
right = version_right.split(".")
# 0 pad shortest version - e.g.
# less_than("9.1", "9.1.9") == less_than("9.1.0", "9.1.9")
longest = max(len(left), len(right))
left.extend([0] * (longest - len(left)))
right.extend([0] * (longest - len(right)))
# compare each of the version components
for l, r in zip(left, right):
if l == r:
continue
return int(l) < int(r)
return False
def test_helper(orig_test):
@functools.wraps(orig_test)
def test_new(*args, **kwargs):
cuxx_off = os.getenv(cfg['TEST_OFF_ENV_VAR']) == 'true'
cuxx_env_version = os.getenv(cfg['VERSION_ENV_VAR'], None if cuxx_off else cfg['DEFAULT_VERSION'])
cuxx_test_disabled = cuxx_off or less_than(cuxx_env_version, min_version)
if not cuxx_test_disabled or mx.context.current_context().device_type == 'cpu':
orig_test(*args, **kwargs)
else:
pytest.raises((MXNetError, RuntimeError), orig_test, *args, **kwargs)
return test_new
return test_helper
def assert_raises_cudnn_not_satisfied(min_version):
return _assert_raise_cuxx_version_not_satisfied(min_version, {
'TEST_OFF_ENV_VAR': 'CUDNN_OFF_TEST_ONLY',
'VERSION_ENV_VAR': 'CUDNN_VERSION',
'DEFAULT_VERSION': '7.3.1'
})
def assert_raises_cuda_not_satisfied(min_version):
return _assert_raise_cuxx_version_not_satisfied(min_version, {
'TEST_OFF_ENV_VAR': 'CUDA_OFF_TEST_ONLY',
'VERSION_ENV_VAR': 'CUDA_VERSION',
'DEFAULT_VERSION': '10.1'
})
def with_seed(seed=None):
"""
A decorator for test functions that manages rng seeds.
Parameters
----------
seed : the seed to pass to np.random and mx.random
This tests decorator sets the np, mx and python random seeds identically
prior to each test, then outputs those seeds if the test fails or
if the test requires a fixed seed (as a reminder to make the test
more robust against random data).
@with_seed()
def test_ok_with_random_data():
...
@with_seed(1234)
def test_not_ok_with_random_data():
...
Use of the @with_seed() decorator for all tests creates
tests isolation and reproducability of failures. When a
test fails, the decorator outputs the seed used. The user
can then set the environment variable MXNET_TEST_SEED to
the value reported, then rerun the test with:
pytest --verbose --capture=no <test_module_name.py>::<failing_test>
To run a test repeatedly, set MXNET_TEST_COUNT=<NNN> in the environment.
To see the seeds of even the passing tests, add '--log-level=DEBUG' to pytest.
"""
def test_helper(orig_test):
@functools.wraps(orig_test)
def test_new(*args, **kwargs):
test_count = int(os.getenv('MXNET_TEST_COUNT', '1'))
env_seed_str = os.getenv('MXNET_TEST_SEED')
for i in range(test_count):
if seed is not None:
this_test_seed = seed
log_level = logging.INFO
elif env_seed_str is not None:
this_test_seed = int(env_seed_str)
log_level = logging.INFO
else:
this_test_seed = np.random.randint(0, np.iinfo(np.int32).max)
log_level = logging.DEBUG
post_test_state = np.random.get_state()
np.random.seed(this_test_seed)
mx.random.seed(this_test_seed)
random.seed(this_test_seed)
logger = default_logger()
# 'pytest --logging-level=DEBUG' shows this msg even with an ensuing core dump.
test_count_msg = '{} of {}: '.format(i+1,test_count) if test_count > 1 else ''
test_msg = ('{}Setting test np/mx/python random seeds, use MXNET_TEST_SEED={}'
' to reproduce.').format(test_count_msg, this_test_seed)
logger.log(log_level, test_msg)
try:
orig_test(*args, **kwargs)
except:
# With exceptions, repeat test_msg at INFO level to be sure it's seen.
if log_level < logging.INFO:
logger.info(test_msg)
raise
finally:
np.random.set_state(post_test_state)
return test_new
return test_helper
def setup_module():
"""
A function with a 'magic name' executed automatically before each pytest module
(file of tests) that helps reproduce a test segfault by setting and outputting the rng seeds.
The segfault-debug procedure on a module called test_module.py is:
1. run "pytest --verbose test_module.py". A seg-faulting output might be:
[INFO] np, mx and python random seeds = 4018804151
test_module.test1 ... ok
test_module.test2 ... Illegal instruction (core dumped)
2. Copy the module-starting seed into the next command, then run:
MXNET_MODULE_SEED=4018804151 pytest --logging-level=DEBUG --verbose test_module.py
Output might be:
[WARNING] **** module-level seed is set: all tests running deterministically ****
[INFO] np, mx and python random seeds = 4018804151
test_module.test1 ... [DEBUG] np and mx random seeds = 3935862516
ok
test_module.test2 ... [DEBUG] np and mx random seeds = 1435005594
Illegal instruction (core dumped)
3. Copy the segfaulting-test seed into the command:
MXNET_TEST_SEED=1435005594 pytest --logging-level=DEBUG --verbose test_module.py:test2
Output might be:
[INFO] np, mx and python random seeds = 2481884723
test_module.test2 ... [DEBUG] np and mx random seeds = 1435005594
Illegal instruction (core dumped)
3. Finally reproduce the segfault directly under gdb (might need additional os packages)
by editing the bottom of test_module.py to be
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
test2()
MXNET_TEST_SEED=1435005594 gdb -ex r --args python test_module.py
4. When finished debugging the segfault, remember to unset any exported MXNET_ seed
variables in the environment to return to non-deterministic testing (a good thing).
"""
module_seed_str = os.getenv('MXNET_MODULE_SEED')
logger = default_logger()
if module_seed_str is None:
seed = np.random.randint(0, np.iinfo(np.int32).max)
else:
seed = int(module_seed_str)
logger.warn('*** module-level seed is set: all tests running deterministically ***')
logger.info('Setting module np/mx/python random seeds, use MXNET_MODULE_SEED=%s to reproduce.', seed)
np.random.seed(seed)
mx.random.seed(seed)
random.seed(seed)
# The MXNET_TEST_SEED environment variable will override MXNET_MODULE_SEED for tests with
# the 'with_seed()' decoration. Inform the user of this once here at the module level.
if os.getenv('MXNET_TEST_SEED') is not None:
logger.warn('*** test-level seed set: all "@with_seed()" tests run deterministically ***')
def teardown_module():
"""
A function with a 'magic name' executed automatically after each pytest test module.
It waits for all operations in one file to finish before carrying on the next.
"""
mx.nd.waitall()
def run_in_spawned_process(func, env, *args):
"""
Helper function to run a test in its own process.
Avoids issues with Singleton- or otherwise-cached environment variable lookups in the backend.
Adds a seed as first arg to propagate determinism.
Parameters
----------
func : function to run in a spawned process.
env : dict of additional environment values to set temporarily in the environment before exec.
args : args to pass to the function.
Returns
-------
Whether the python version supports running the function as a spawned process.
This routine calculates a random seed and passes it into the test as a first argument. If the
test uses random values, it should include an outer 'with random_seed(seed):'. If the
test needs to return values to the caller, consider use of shared variable arguments.
"""
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
return False
else:
seed = np.random.randint(0,1024*1024*1024)
orig_environ = os.environ.copy()
try:
for (key, value) in env.items():
os.environ[key] = str(value)
# Prepend seed as first arg
p = mpctx.Process(target=func, args=(seed,)+args)
p.start()
p.join()
assert p.exitcode == 0, "Non-zero exit code %d from %s()." % (p.exitcode, func.__name__)
finally:
os.environ.clear()
os.environ.update(orig_environ)
return True
def retry(n):
"""Retry n times before failing for stochastic test cases."""
# TODO(szha): replace with flaky
# https://github.com/apache/incubator-mxnet/issues/17803
assert n > 0
def test_helper(orig_test):
@functools.wraps(orig_test)
def test_new(*args, **kwargs):
"""Wrapper for tests function."""
for _ in range(n):
try:
orig_test(*args, **kwargs)
except AssertionError as e:
err = e
continue
return
raise err
return test_new
return test_helper
|
zhreshold/mxnet
|
tests/python/unittest/common.py
|
Python
|
apache-2.0
| 13,748
|
from rdflib import Literal
from classes import ldp
from namespaces import dcterms, oa, rdf
# alias the RDFlib Namespace
ns = oa
# Annotation resources
class Annotation(ldp.Resource):
def __init__(self):
super(Annotation, self).__init__()
self.motivation = None
def add_body(self, body):
self.linked_objects.append((oa.hasBody, body))
self.title = body.title
body.annotation = self
def add_target(self, target):
self.linked_objects.append((oa.hasTarget, target))
target.annotation = self
def graph(self):
graph = super(Annotation, self).graph()
graph.add((self.uri, rdf.type, oa.Annotation))
if self.motivation is not None:
graph.add((self.uri, oa.motivatedBy, self.motivation))
return graph
class TextualBody(ldp.Resource):
def __init__(self, value, content_type):
super(TextualBody, self).__init__()
self.value = value
self.content_type = content_type
if len(self.value) <= 25:
self.title = self.value
else:
self.title = self.value[:24] + '…'
def graph(self):
graph = super(TextualBody, self).graph()
graph.add((self.uri, rdf.value, Literal(self.value)))
graph.add((self.uri, dcterms['format'], Literal(self.content_type)))
graph.add((self.uri, rdf.type, oa.TextualBody))
return graph
class SpecificResource(ldp.Resource):
def __init__(self, source):
super(SpecificResource, self).__init__()
self.source = source
def add_selector(self, selector):
self.title = selector.title
self.linked_objects.append((oa.hasSelector, selector))
selector.annotation = self
def graph(self):
graph = super(SpecificResource, self).graph()
graph.add((self.uri, oa.hasSource, self.source.uri))
graph.add((self.uri, rdf.type, oa.SpecificResource))
return graph
class FragmentSelector(ldp.Resource):
def __init__(self, value, conforms_to=None):
super(FragmentSelector, self).__init__()
self.value = value
self.conforms_to = conforms_to
self.title = self.value
def graph(self):
graph = super(FragmentSelector, self).graph()
graph.add((self.uri, rdf.value, Literal(self.value)))
graph.add((self.uri, rdf.type, oa.FragmentSelector))
if self.conforms_to is not None:
graph.add((self.uri, dcterms.conformsTo, self.conforms_to))
return graph
class XPathSelector(ldp.Resource):
def __init__(self, value):
super(XPathSelector, self).__init__()
self.value = value
self.title = self.value
def graph(self):
graph = super(XPathSelector, self).graph()
graph.add((self.uri, rdf.value, Literal(self.value)))
graph.add((self.uri, rdf.type, oa.XPathSelector))
return graph
|
peichman-umd/newspaper-batchload
|
classes/oa.py
|
Python
|
apache-2.0
| 2,919
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base types used by other parts of libcloud
"""
from libcloud.common.types import LibcloudError, MalformedResponseError
from libcloud.common.types import InvalidCredsError, InvalidCredsException
__all__ = [
"Provider",
"NodeState",
"DeploymentError",
"DeploymentException",
# @@TR: should the unused imports below be exported?
"LibcloudError",
"MalformedResponseError",
"InvalidCredsError",
"InvalidCredsException",
"DEPRECATED_RACKSPACE_PROVIDERS",
"OLD_CONSTANT_TO_NEW_MAPPING"
]
class Provider(object):
"""
Defines for each of the supported providers
:cvar DUMMY: Example provider
:cvar EC2_US_EAST: Amazon AWS US N. Virgina
:cvar EC2_US_WEST: Amazon AWS US N. California
:cvar EC2_EU_WEST: Amazon AWS EU Ireland
:cvar RACKSPACE: Rackspace next-gen OpenStack based Cloud Servers
:cvar RACKSPACE_FIRST_GEN: Rackspace First Gen Cloud Servers
:cvar GCE: Google Compute Engine
:cvar GOGRID: GoGrid
:cvar VPSNET: VPS.net
:cvar LINODE: Linode.com
:cvar VCLOUD: vmware vCloud
:cvar RIMUHOSTING: RimuHosting.com
:cvar ECP: Enomaly
:cvar IBM: IBM Developer Cloud
:cvar OPENNEBULA: OpenNebula.org
:cvar DREAMHOST: DreamHost Private Server
:cvar ELASTICHOSTS: ElasticHosts.com
:cvar CLOUDSIGMA: CloudSigma
:cvar NIMBUS: Nimbus
:cvar BLUEBOX: Bluebox
:cvar OPSOURCE: Opsource Cloud
:cvar DIMENSIONDATA: Dimension Data Cloud
:cvar NINEFOLD: Ninefold
:cvar TERREMARK: Terremark
:cvar EC2_US_WEST_OREGON: Amazon AWS US West 2 (Oregon)
:cvar CLOUDSTACK: CloudStack
:cvar CLOUDSIGMA_US: CloudSigma US Las Vegas
:cvar LIBVIRT: Libvirt driver
:cvar JOYENT: Joyent driver
:cvar VCL: VCL driver
:cvar KTUCLOUD: kt ucloud driver
:cvar GRIDSPOT: Gridspot driver
:cvar ABIQUO: Abiquo driver
:cvar NEPHOSCALE: NephoScale driver
:cvar EXOSCALE: Exoscale driver.
:cvar IKOULA: Ikoula driver.
:cvar OUTSCALE_SAS: Outscale SAS driver.
:cvar OUTSCALE_INC: Outscale INC driver.
:cvar PROFIT_BRICKS: ProfitBricks driver.
:cvar VULTR: vultr driver.
:cvar AZURE: Azure driver.
:cvar AURORACOMPUTE: Aurora Compute driver.
"""
AZURE = 'azure'
DUMMY = 'dummy'
EC2 = 'ec2_us_east'
RACKSPACE = 'rackspace'
GCE = 'gce'
GOGRID = 'gogrid'
VPSNET = 'vpsnet'
LINODE = 'linode'
VCLOUD = 'vcloud'
RIMUHOSTING = 'rimuhosting'
VOXEL = 'voxel'
SOFTLAYER = 'softlayer'
EUCALYPTUS = 'eucalyptus'
ECP = 'ecp'
IBM = 'ibm'
OPENNEBULA = 'opennebula'
DREAMHOST = 'dreamhost'
ELASTICHOSTS = 'elastichosts'
BRIGHTBOX = 'brightbox'
CLOUDSIGMA = 'cloudsigma'
NIMBUS = 'nimbus'
BLUEBOX = 'bluebox'
GANDI = 'gandi'
OPSOURCE = 'opsource'
DIMENSIONDATA = 'dimensiondata'
OPENSTACK = 'openstack'
SKALICLOUD = 'skalicloud'
SERVERLOVE = 'serverlove'
NINEFOLD = 'ninefold'
TERREMARK = 'terremark'
CLOUDSTACK = 'cloudstack'
LIBVIRT = 'libvirt'
JOYENT = 'joyent'
VCL = 'vcl'
KTUCLOUD = 'ktucloud'
GRIDSPOT = 'gridspot'
RACKSPACE_FIRST_GEN = 'rackspace_first_gen'
HOSTVIRTUAL = 'hostvirtual'
ABIQUO = 'abiquo'
DIGITAL_OCEAN = 'digitalocean'
NEPHOSCALE = 'nephoscale'
CLOUDFRAMES = 'cloudframes'
EXOSCALE = 'exoscale'
IKOULA = 'ikoula'
OUTSCALE_SAS = 'outscale_sas'
OUTSCALE_INC = 'outscale_inc'
VSPHERE = 'vsphere'
PROFIT_BRICKS = 'profitbricks'
VULTR = 'vultr'
AURORACOMPUTE = 'aurora_compute'
CLOUDWATT = 'cloudwatt'
PACKET = 'packet'
RUNABOVE = 'runabove'
# OpenStack based providers
HPCLOUD = 'hpcloud'
CLOUDWATT = 'cloudwatt'
KILI = 'kili'
ONAPP = 'onapp'
# Deprecated constants which are still supported
EC2_US_EAST = 'ec2_us_east'
EC2_EU = 'ec2_eu_west' # deprecated name
EC2_EU_WEST = 'ec2_eu_west'
EC2_US_WEST = 'ec2_us_west'
EC2_AP_SOUTHEAST = 'ec2_ap_southeast'
EC2_AP_NORTHEAST = 'ec2_ap_northeast'
EC2_US_WEST_OREGON = 'ec2_us_west_oregon'
EC2_SA_EAST = 'ec2_sa_east'
EC2_AP_SOUTHEAST2 = 'ec2_ap_southeast_2'
ELASTICHOSTS_UK1 = 'elastichosts_uk1'
ELASTICHOSTS_UK2 = 'elastichosts_uk2'
ELASTICHOSTS_US1 = 'elastichosts_us1'
ELASTICHOSTS_US2 = 'elastichosts_us2'
ELASTICHOSTS_US3 = 'elastichosts_us3'
ELASTICHOSTS_CA1 = 'elastichosts_ca1'
ELASTICHOSTS_AU1 = 'elastichosts_au1'
ELASTICHOSTS_CN1 = 'elastichosts_cn1'
CLOUDSIGMA_US = 'cloudsigma_us'
# Deprecated constants which aren't supported anymore
RACKSPACE_UK = 'rackspace_uk'
RACKSPACE_NOVA_BETA = 'rackspace_nova_beta'
RACKSPACE_NOVA_DFW = 'rackspace_nova_dfw'
RACKSPACE_NOVA_LON = 'rackspace_nova_lon'
RACKSPACE_NOVA_ORD = 'rackspace_nova_ord'
# Removed
# SLICEHOST = 'slicehost'
DEPRECATED_RACKSPACE_PROVIDERS = [Provider.RACKSPACE_UK,
Provider.RACKSPACE_NOVA_BETA,
Provider.RACKSPACE_NOVA_DFW,
Provider.RACKSPACE_NOVA_LON,
Provider.RACKSPACE_NOVA_ORD]
OLD_CONSTANT_TO_NEW_MAPPING = {
Provider.RACKSPACE: Provider.RACKSPACE_FIRST_GEN,
Provider.RACKSPACE_UK: Provider.RACKSPACE_FIRST_GEN,
Provider.RACKSPACE_NOVA_BETA: Provider.RACKSPACE,
Provider.RACKSPACE_NOVA_DFW: Provider.RACKSPACE,
Provider.RACKSPACE_NOVA_LON: Provider.RACKSPACE,
Provider.RACKSPACE_NOVA_ORD: Provider.RACKSPACE
}
class NodeState(object):
"""
Standard states for a node
:cvar RUNNING: Node is running.
:cvar REBOOTING: Node is rebooting.
:cvar TERMINATED: Node is terminated. This node can't be started later on.
:cvar STOPPED: Node is stopped. This node can be started later on.
:cvar PENDING: Node is pending.
:cvar STOPPED: Node is stopped.
:cvar SUSPENDED: Node is suspended.
:cvar ERROR: Node is an error state. Usually no operations can be performed
on the node once it ends up in the error state.
:cvar PAUSED: Node is paused.
:cvar UNKNOWN: Node state is unknown.
"""
RUNNING = 0
REBOOTING = 1
TERMINATED = 2
PENDING = 3
UNKNOWN = 4
STOPPED = 5
SUSPENDED = 6
ERROR = 7
PAUSED = 8
@classmethod
def tostring(cls, value):
values = cls.__dict__
values = dict([(key, string) for key, string in values.items() if
not key.startswith('__')])
for item_key, item_value in values.items():
if value == item_value:
return item_key
@classmethod
def fromstring(cls, value):
return getattr(cls, value.upper(), None)
class StorageVolumeState(object):
"""
Standard states of a StorageVolume
"""
AVAILABLE = "available"
ERROR = "error"
INUSE = "in_use"
CREATING = "creating"
DELETING = "deleting"
DELETED = "deleted"
BACKUP = "backup"
ATTACHING = "attaching"
UNKNOWN = "unknown"
class VolumeSnapshotState(object):
"""
Standard states of VolumeSnapshots
"""
AVAILABLE = 0
ERROR = 1
CREATING = 2
DELETING = 3
RESTORING = 4
UNKNOWN = 5
class Architecture(object):
"""
Image and size architectures.
:cvar I386: i386 (32 bt)
:cvar X86_64: x86_64 (64 bit)
"""
I386 = 0
X86_X64 = 1
class DeploymentError(LibcloudError):
"""
Exception used when a Deployment Task failed.
:ivar node: :class:`Node` on which this exception happened, you might want
to call :func:`Node.destroy`
"""
def __init__(self, node, original_exception=None, driver=None):
self.node = node
self.value = original_exception
self.driver = driver
def __str__(self):
return self.__repr__()
def __repr__(self):
return (('<DeploymentError: node=%s, error=%s, driver=%s>'
% (self.node.id, str(self.value), str(self.driver))))
class KeyPairError(LibcloudError):
error_type = 'KeyPairError'
def __init__(self, name, driver):
self.name = name
self.value = 'Key pair with name %s does not exist' % (name)
super(KeyPairError, self).__init__(value=self.value, driver=driver)
def __str__(self):
return self.__repr__()
def __repr__(self):
return ('<%s name=%s, value=%s, driver=%s>' %
(self.error_type, self.name, self.value, self.driver.name))
class KeyPairDoesNotExistError(KeyPairError):
error_type = 'KeyPairDoesNotExistError'
"""Deprecated alias of :class:`DeploymentException`"""
DeploymentException = DeploymentError
|
atsaki/libcloud
|
libcloud/compute/types.py
|
Python
|
apache-2.0
| 9,475
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit testing for affine_channel_op
"""
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
def affine_channel(x, scale, bias, layout):
C = x.shape[1] if layout == 'NCHW' else x.shape[-1]
if len(x.shape) == 4:
new_shape = (1, C, 1, 1) if layout == 'NCHW' else (1, 1, 1, C)
else:
new_shape = (1, C)
scale = scale.reshape(new_shape)
bias = bias.reshape(new_shape)
return x * scale + bias
class TestAffineChannelOp(OpTest):
def setUp(self):
self.op_type = "affine_channel"
self.init_test_case()
x = np.random.random(self.shape).astype("float64")
scale = np.random.random(self.C).astype("float64")
bias = np.random.random(self.C).astype("float64")
y = affine_channel(x, scale, bias, self.layout)
self.inputs = {'X': x, 'Scale': scale, 'Bias': bias}
self.attrs = {'data_layout': self.layout}
self.outputs = {'Out': y}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Scale', 'Bias'], 'Out')
def test_check_grad_stopgrad_dx(self):
self.check_grad(['Scale', 'Bias'], 'Out', no_grad_set=set('X'))
def test_check_grad_stopgrad_dscale_dbias(self):
self.check_grad(['X'], 'Out', no_grad_set=set(['Scale', 'Bias']))
def init_test_case(self):
self.shape = [2, 100, 3, 3]
self.C = 100
self.layout = 'NCHW'
class TestAffineChannelOpError(unittest.TestCase):
def test_errors(self):
with fluid.program_guard(fluid.Program()):
def test_x_type():
input_data = np.random.random(2, 1, 2, 2).astype("float32")
fluid.layers.affine_channel(input_data)
self.assertRaises(TypeError, test_x_type)
def test_x_dtype():
x2 = fluid.layers.data(
name='x2', shape=[None, 1, 2, 2], dtype='int32')
fluid.layers.affine_channel(x2)
self.assertRaises(TypeError, test_x_dtype)
def test_scale_type():
x3 = fluid.layers.data(
name='x3', shape=[None, 1, 2, 2], dtype='float32')
fluid.layers.affine_channel(x3, scale=1)
self.assertRaises(TypeError, test_scale_type)
def test_bias_type():
x4 = fluid.layers.data(
name='x4', shape=[None, 1, 2, 2], dtype='float32')
fluid.layers.affine_channel(x4, bias=1)
self.assertRaises(TypeError, test_bias_type)
class TestAffineChannelNHWC(TestAffineChannelOp):
def init_test_case(self):
self.shape = [2, 3, 3, 100]
self.C = 100
self.layout = 'NHWC'
def test_check_grad_stopgrad_dx(self):
return
def test_check_grad_stopgrad_dscale_dbias(self):
return
class TestAffineChannel2D(TestAffineChannelOp):
def init_test_case(self):
self.shape = [2, 100]
self.C = 100
self.layout = 'NCHW'
def test_check_grad_stopgrad_dx(self):
return
def test_check_grad_stopgrad_dscale_dbias(self):
return
# TODO(qingqing): disable unit testing for large shape
#class TestAffineChannelNCHWLargeShape(TestAffineChannelOp):
# def init_test_case(self):
# self.shape = [4, 128, 112, 112]
# self.C = 128
# self.layout = 'NCHW'
#
# # since the gradient check is very slow in large shape, so skip check_grad
# def test_check_grad(self):
# pass
#
# def test_check_grad_stopgrad_dx(self):
# pass
#
# def test_check_grad_stopgrad_dscale_dbias(self):
# pass
#class TestAffineChannelNHWCLargeShape(TestAffineChannelNCHWLargeShape):
# def init_test_case(self):
# self.shape = [64, 32, 32, 128]
# self.C = 128
# self.layout = 'NHWC'
if __name__ == '__main__':
unittest.main()
|
PaddlePaddle/Paddle
|
python/paddle/fluid/tests/unittests/test_affine_channel_op.py
|
Python
|
apache-2.0
| 4,621
|
#!/usr/bin/env python
#
# Copyright (c) 2017 Hewlett Packard Enterprise, L.P.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import libvirt
import os
import time
from tests import data
from tests import utils
def verify_private_key(stdout):
line = [l for l in stdout.split('\n') if l != '']
if ((line[0] == '-----BEGIN PRIVATE KEY-----' and
line[-1] == '-----END PRIVATE KEY-----')):
return stdout
return ''
def cwd(filename):
return os.path.join(os.path.dirname(__file__), 'data', filename)
server1 = data.server['11111111-2222-3333-4444-555555555555']
server1_image = data.image[server1['image_id']]
class DwarfTestCase(utils.TestCase):
def setUp(self):
super(DwarfTestCase, self).setUp()
self.start_dwarf()
def tearDown(self):
self.stop_dwarf()
super(DwarfTestCase, self).tearDown()
def test_nova_flavors(self):
self.exec_verify(['nova', 'flavor-list'],
filename=cwd('nova_flavor-list'))
self.exec_verify(['nova', 'flavor-show', '100'],
filename=cwd('nova_flavor-show'))
self.exec_verify(['nova', 'flavor-delete', '100'],
filename=cwd('nova_flavor-delete'))
self.exec_verify(['nova', 'flavor-create', 'test.flavor', '999',
'1024', '15', '2'],
filename=cwd('nova_flavor-create'))
def test_nova_keypairs(self):
self.exec_verify(['nova', 'keypair-add', 'test key', '--pub-key',
cwd('nova_keypair-add.pub')],
stdout='')
self.exec_verify(['nova', 'keypair-list'],
filename=cwd('nova_keypair-list'))
self.exec_verify(['nova', 'keypair-show', 'test key'],
filename=cwd('nova_keypair-show'))
self.exec_verify(['nova', 'keypair-delete', 'test key'],
stdout='')
self.exec_verify(['nova', 'keypair-add', 'test key'],
callback=verify_private_key)
def test_nova_servers(self):
# Preload an image
self.create_image(server1_image)
self.exec_verify(['nova', 'boot', '--flavor', server1['flavor_id'],
'--image', server1['image_id'], server1['name']],
filename=cwd('nova_boot'))
self.exec_verify(['nova', 'list'],
filename=cwd('nova_list.building'))
libvirt.DOMAIN_STATE = libvirt.VIR_DOMAIN_RUNNING
libvirt.IP_ADDRESS = server1['ip']
time.sleep(3)
# Should show the IP and status 'active'
self.exec_verify(['nova', 'list'],
filename=cwd('nova_list'))
self.exec_verify(['nova', 'show', server1['id']],
filename=cwd('nova_show'))
self.exec_verify(['nova', 'console-log', server1['id']],
stdout='Test server console log\n')
self.exec_verify(['nova', 'stop', server1['id']],
stdout='Request to stop server %s has been '
'accepted.\n' % server1['id'])
# Should show status 'stopped'
self.exec_verify(['nova', 'show', server1['id']],
filename=cwd('nova_show.stopped'))
self.exec_verify(['nova', 'start', server1['id']],
stdout='Request to start server %s has been '
'accepted.\n' % server1['id'])
# Should show status 'active'
self.exec_verify(['nova', 'show', server1['id']],
filename=cwd('nova_show'))
self.exec_verify(['nova', 'reboot', server1['id']],
stdout='Request to reboot server <Server: %s> has '
'been accepted.\n' % server1['name'])
|
dtroyer/dwarf
|
tests/clients/test_nova.py
|
Python
|
apache-2.0
| 4,430
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from rally import exceptions
from rally.plugins.openstack.scenarios.cinder import utils
from tests.unit import fakes
from tests.unit import test
CINDER_UTILS = "rally.plugins.openstack.scenarios.cinder.utils"
CONF = cfg.CONF
class CinderScenarioTestCase(test.ScenarioTestCase):
def setUp(self):
super(CinderScenarioTestCase, self).setUp()
self.scenario = utils.CinderScenario(self.context)
def test__list_volumes(self):
return_volumes_list = self.scenario._list_volumes()
self.assertEqual(self.clients("cinder").volumes.list.return_value,
return_volumes_list)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.list_volumes")
def test__list_snapshots(self):
return_snapshots_list = self.scenario._list_snapshots()
self.assertEqual(
self.clients("cinder").volume_snapshots.list.return_value,
return_snapshots_list)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.list_snapshots")
def test__set_metadata(self):
volume = fakes.FakeVolume()
self.scenario._set_metadata(volume, sets=2, set_size=4)
calls = self.clients("cinder").volumes.set_metadata.call_args_list
self.assertEqual(len(calls), 2)
for call in calls:
call_volume, metadata = call[0]
self.assertEqual(call_volume, volume)
self.assertEqual(len(metadata), 4)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.set_4_metadatas_2_times")
def test__delete_metadata(self):
volume = fakes.FakeVolume()
keys = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"]
self.scenario._delete_metadata(volume, keys, deletes=3, delete_size=4)
calls = self.clients("cinder").volumes.delete_metadata.call_args_list
self.assertEqual(len(calls), 3)
all_deleted = []
for call in calls:
call_volume, del_keys = call[0]
self.assertEqual(call_volume, volume)
self.assertEqual(len(del_keys), 4)
for key in del_keys:
self.assertIn(key, keys)
self.assertNotIn(key, all_deleted)
all_deleted.append(key)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.delete_4_metadatas_3_times")
def test__delete_metadata_not_enough_keys(self):
volume = fakes.FakeVolume()
keys = ["a", "b", "c", "d", "e"]
self.assertRaises(exceptions.InvalidArgumentsException,
self.scenario._delete_metadata,
volume, keys, deletes=2, delete_size=3)
def test__create_volume(self):
return_volume = self.scenario._create_volume(1)
self.mock_wait_for.mock.assert_called_once_with(
self.clients("cinder").volumes.create.return_value,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
self.mock_resource_is.mock.assert_called_once_with("available")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_volume)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.create_volume")
@mock.patch("rally.plugins.openstack.scenarios.cinder.utils.random")
def test__create_volume_with_size_range(self, mock_random):
mock_random.randint.return_value = 3
return_volume = self.scenario._create_volume(
size={"min": 1, "max": 5},
display_name="TestVolume")
self.clients("cinder").volumes.create.assert_called_once_with(
3, display_name="TestVolume")
self.mock_wait_for.mock.assert_called_once_with(
self.clients("cinder").volumes.create.return_value,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
self.mock_resource_is.mock.assert_called_once_with("available")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_volume)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.create_volume")
def test__update_volume(self):
fake_volume = mock.MagicMock()
volume_update_args = {"display_name": "_updated",
"display_description": "_updated"}
self.scenario.generate_random_name = mock.Mock()
self.scenario._update_volume(fake_volume, **volume_update_args)
self.clients("cinder").volumes.update.assert_called_once_with(
fake_volume,
display_name=self.scenario.generate_random_name.return_value,
display_description="_updated")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.update_volume")
def test__delete_volume(self):
cinder = mock.Mock()
self.scenario._delete_volume(cinder)
cinder.delete.assert_called_once_with()
self.mock_wait_for_status.mock.assert_called_once_with(
cinder,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.delete_volume")
@mock.patch("rally.plugins.openstack.scenarios.cinder.utils.random")
def test__extend_volume_with_size_range(self, mock_random):
volume = mock.Mock()
mock_random.randint.return_value = 3
self.clients("cinder").volumes.extend.return_value = volume
self.scenario._extend_volume(volume, new_size={"min": 1, "max": 5})
volume.extend.assert_called_once_with(volume, 3)
self.mock_wait_for.mock.assert_called_once_with(
volume,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
self.mock_resource_is.mock.assert_called_once_with("available")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.extend_volume")
def test__extend_volume(self):
volume = mock.Mock()
self.clients("cinder").volumes.extend.return_value = volume
self.scenario._extend_volume(volume, 2)
self.mock_wait_for.mock.assert_called_once_with(
volume,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
self.mock_resource_is.mock.assert_called_once_with("available")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.extend_volume")
def test__upload_volume_to_image(self):
volume = mock.Mock()
image = {"os-volume_upload_image": {"image_id": 1}}
volume.upload_to_image.return_value = (None, image)
self.clients("cinder").images.get.return_value = image
self.scenario.generate_random_name = mock.Mock(
return_value="test_vol")
self.scenario._upload_volume_to_image(volume, False,
"container", "disk")
volume.upload_to_image.assert_called_once_with(False, "test_vol",
"container", "disk")
self.mock_wait_for.mock.assert_has_calls([
mock.call(
volume,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.
cinder_volume_create_poll_interval),
mock.call(
self.clients("glance").images.get.return_value,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.glance_image_create_timeout,
check_interval=CONF.benchmark.
glance_image_create_poll_interval)
])
self.mock_get_from_manager.mock.assert_has_calls([mock.call(),
mock.call()])
self.mock_resource_is.mock.assert_has_calls([mock.call("available"),
mock.call("active")])
self.clients("glance").images.get.assert_called_once_with(1)
def test__create_snapshot(self):
return_snapshot = self.scenario._create_snapshot("uuid", False)
self.mock_wait_for.mock.assert_called_once_with(
self.clients("cinder").volume_snapshots.create.return_value,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self.mock_resource_is.mock.assert_called_once_with("available")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_snapshot)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.create_snapshot")
def test__delete_snapshot(self):
snapshot = mock.Mock()
self.scenario._delete_snapshot(snapshot)
snapshot.delete.assert_called_once_with()
self.mock_wait_for_status.mock.assert_called_once_with(
snapshot,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.delete_snapshot")
def test__create_backup(self):
return_backup = self.scenario._create_backup("uuid")
self.mock_wait_for.mock.assert_called_once_with(
self.clients("cinder").backups.create.return_value,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self.mock_resource_is.mock.assert_called_once_with("available")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_backup)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.create_backup")
def test__delete_backup(self):
backup = mock.Mock()
self.scenario._delete_backup(backup)
backup.delete.assert_called_once_with()
self.mock_wait_for_status.mock.assert_called_once_with(
backup,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.delete_backup")
def test__restore_backup(self):
backup = mock.Mock()
restore = mock.Mock()
self.clients("cinder").restores.restore.return_value = backup
self.clients("cinder").volumes.get.return_value = restore
return_restore = self.scenario._restore_backup(backup.id, None)
self.mock_wait_for.mock.assert_called_once_with(
restore,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self.mock_resource_is.mock.assert_called_once_with("available")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_restore)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.restore_backup")
def test__list_backups(self):
return_backups_list = self.scenario._list_backups()
self.assertEqual(
self.clients("cinder").backups.list.return_value,
return_backups_list)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.list_backups")
def test__get_random_server(self):
servers = [1, 2, 3]
context = {"user": {"tenant_id": "fake"},
"users": [{"tenant_id": "fake",
"users_per_tenant": 1}],
"tenant": {"id": "fake", "servers": servers}}
self.scenario.context = context
self.scenario.clients = mock.Mock()
self.scenario.clients("nova").servers.get = mock.Mock(
side_effect=lambda arg: arg)
server_id = self.scenario.get_random_server()
self.assertIn(server_id, servers)
|
vishnu-kumar/PeformanceFramework
|
tests/unit/plugins/openstack/scenarios/cinder/test_utils.py
|
Python
|
apache-2.0
| 15,937
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-13 03:20
from __future__ import unicode_literals
import django.contrib.postgres.fields.ranges
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0019_auto_20170613_0241'),
]
operations = [
migrations.CreateModel(
name='Annotation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('time_range', django.contrib.postgres.fields.ranges.DateTimeRangeField()),
('comment', models.TextField()),
('sensor', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='climate_data.Sensor')),
('station', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='climate_data.Station')),
],
),
]
|
qubs/data-centre
|
climate_data/migrations/0020_annotation.py
|
Python
|
apache-2.0
| 1,145
|
"""
This sample shows how to update the
large thumbnail of an item
Python 2.x
ArcREST 3.0.1
"""
import arcrest
from arcresthelper import securityhandlerhelper
from arcresthelper import common
def trace():
"""
trace finds the line, the filename
and error message and returns it
to the user
"""
import traceback, inspect
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
filename = inspect.getfile(inspect.currentframe())
# script name + line number
line = tbinfo.split(", ")[1]
# Get Python syntax error
#
synerror = traceback.format_exc().splitlines()[-1]
return line, filename, synerror
def main():
proxy_port = None
proxy_url = None
securityinfo = {}
securityinfo['security_type'] = 'Portal'#LDAP, NTLM, OAuth, Portal, PKI
securityinfo['username'] = ""#<UserName>
securityinfo['password'] = ""#<Password>
securityinfo['org_url'] = "http://www.arcgis.com"
securityinfo['proxy_url'] = proxy_url
securityinfo['proxy_port'] = proxy_port
securityinfo['referer_url'] = None
securityinfo['token_url'] = None
securityinfo['certificatefile'] = None
securityinfo['keyfile'] = None
securityinfo['client_id'] = None
securityinfo['secret_id'] = None
itemId = "" #Item ID
pathToImage = r"" #Path to image
try:
shh = securityhandlerhelper.securityhandlerhelper(securityinfo=securityinfo)
if shh.valid == False:
print shh.message
else:
admin = arcrest.manageorg.Administration(securityHandler=shh.securityhandler)
content = admin.content
item = content.getItem(itemId)
itemParams = arcrest.manageorg.ItemParameter()
itemParams.largeThumbnail = pathToImage
print item.userItem.updateItem(itemParameters=itemParams)
except (common.ArcRestHelperError),e:
print "error in function: %s" % e[0]['function']
print "error on line: %s" % e[0]['line']
print "error in file name: %s" % e[0]['filename']
print "with error message: %s" % e[0]['synerror']
if 'arcpyError' in e[0]:
print "with arcpy message: %s" % e[0]['arcpyError']
except:
line, filename, synerror = trace()
print "error on line: %s" % line
print "error in file name: %s" % filename
print "with error message: %s" % synerror
if __name__ == "__main__":
main()
|
DShokes/ArcREST
|
samples/update_largethumbnail.py
|
Python
|
apache-2.0
| 2,476
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Mike Place <mp@saltstack.com>`
'''
# Import python libs
from __future__ import absolute_import
import os
import logging
import tornado.gen
import tornado.ioloop
import tornado.testing
import salt.utils
import salt.config
import salt.exceptions
import salt.transport.ipc
import salt.transport.server
import salt.transport.client
from salt.ext.six.moves import range
# Import Salt Testing libs
import integration
from salttesting.mock import MagicMock
from salttesting.helpers import ensure_in_syspath
log = logging.getLogger(__name__)
ensure_in_syspath('../')
class BaseIPCReqCase(tornado.testing.AsyncTestCase):
'''
Test the req server/client pair
'''
def setUp(self):
super(BaseIPCReqCase, self).setUp()
self._start_handlers = dict(self.io_loop._handlers)
self.socket_path = os.path.join(integration.TMP, 'ipc_test.ipc')
self.server_channel = salt.transport.ipc.IPCMessageServer(
self.socket_path,
io_loop=self.io_loop,
payload_handler=self._handle_payload,
)
self.server_channel.start()
self.payloads = []
def tearDown(self):
super(BaseIPCReqCase, self).tearDown()
failures = []
self.server_channel.close()
os.unlink(self.socket_path)
for k, v in self.io_loop._handlers.iteritems():
if self._start_handlers.get(k) != v:
failures.append((k, v))
if len(failures) > 0:
raise Exception('FDs still attached to the IOLoop: {0}'.format(failures))
@tornado.gen.coroutine
def _handle_payload(self, payload, reply_func):
self.payloads.append(payload)
yield reply_func(payload)
if isinstance(payload, dict) and payload.get('stop'):
self.stop()
class IPCMessageClient(BaseIPCReqCase):
'''
Test all of the clear msg stuff
'''
def _get_channel(self):
channel = salt.transport.ipc.IPCMessageClient(
socket_path=self.socket_path,
io_loop=self.io_loop,
)
channel.connect(callback=self.stop)
self.wait()
return channel
def setUp(self):
super(IPCMessageClient, self).setUp()
self.channel = self._get_channel()
def tearDown(self):
super(IPCMessageClient, self).setUp()
self.channel.close()
def test_basic_send(self):
msg = {'foo': 'bar', 'stop': True}
self.channel.send(msg)
self.wait()
self.assertEqual(self.payloads[0], msg)
def test_many_send(self):
msgs = []
self.server_channel.stream_handler = MagicMock()
for i in range(0, 1000):
msgs.append('test_many_send_{0}'.format(i))
for i in msgs:
self.channel.send(i)
self.channel.send({'stop': True})
self.wait()
self.assertEqual(self.payloads[:-1], msgs)
def test_very_big_message(self):
long_str = ''.join([str(num) for num in range(10**5)])
msg = {'long_str': long_str, 'stop': True}
self.channel.send(msg)
self.wait()
self.assertEqual(msg, self.payloads[0])
def test_multistream_sends(self):
local_channel = self._get_channel()
for c in (self.channel, local_channel):
c.send('foo')
self.channel.send({'stop': True})
self.wait()
self.assertEqual(self.payloads[:-1], ['foo', 'foo'])
def test_multistream_errors(self):
local_channel = self._get_channel()
for c in (self.channel, local_channel):
c.send(None)
for c in (self.channel, local_channel):
c.send('foo')
self.channel.send({'stop': True})
self.wait()
self.assertEqual(self.payloads[:-1], [None, None, 'foo', 'foo'])
if __name__ == '__main__':
from integration import run_tests
run_tests(IPCMessageClient, needs_daemon=False)
|
stephane-martin/salt-debian-packaging
|
salt-2016.3.3/tests/unit/transport/ipc_test.py
|
Python
|
apache-2.0
| 3,969
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-N integration mean tests"""
import numpy as np
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from . import infrastructure as tei
def _get_model(shape, axis, keepdims, input_zp, input_sc, output_zp, output_sc, dtype):
a = relay.var("a", shape=shape, dtype=dtype)
casted = relay.op.cast(a, "int32")
mean = relay.mean(casted, axis, keepdims)
model = relay.qnn.op.requantize(
mean,
input_scale=relay.const(input_sc, "float32"),
input_zero_point=relay.const(input_zp, "int32"),
output_scale=relay.const(output_sc, "float32"),
output_zero_point=relay.const(output_zp, "int32"),
out_dtype=dtype,
)
return model
@requires_ethosn
def test_mean():
trials = [(1, 7, 7, 2048), (1, 8, 8)]
np.random.seed(0)
for shape in trials:
inputs = {
"a": tvm.nd.array(np.random.randint(0, high=255, size=shape, dtype="uint8")),
}
outputs = []
for npu in [False, True]:
model = _get_model(shape, [1, 2], True, 128, 0.0784314, 128, 0.0784314, "uint8")
mod = tei.make_module(model, [])
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
tei.verify(outputs, "uint8", 1)
|
dmlc/tvm
|
tests/python/contrib/test_ethosn/test_mean.py
|
Python
|
apache-2.0
| 2,066
|
"""Test configuration for the ZHA component."""
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
import pytest
import zigpy
from zigpy.application import ControllerApplication
import zigpy.config
import zigpy.group
import zigpy.types
from homeassistant.components.zha import DOMAIN
import homeassistant.components.zha.core.const as zha_const
import homeassistant.components.zha.core.device as zha_core_device
from homeassistant.setup import async_setup_component
from .common import FakeDevice, FakeEndpoint, get_zha_gateway
from tests.common import MockConfigEntry
from tests.components.light.conftest import mock_light_profiles # noqa
FIXTURE_GRP_ID = 0x1001
FIXTURE_GRP_NAME = "fixture group"
@pytest.fixture
def zigpy_app_controller():
"""Zigpy ApplicationController fixture."""
app = MagicMock(spec_set=ControllerApplication)
app.startup = AsyncMock()
app.shutdown = AsyncMock()
groups = zigpy.group.Groups(app)
groups.add_group(FIXTURE_GRP_ID, FIXTURE_GRP_NAME, suppress_event=True)
app.configure_mock(groups=groups)
type(app).ieee = PropertyMock()
app.ieee.return_value = zigpy.types.EUI64.convert("00:15:8d:00:02:32:4f:32")
type(app).nwk = PropertyMock(return_value=zigpy.types.NWK(0x0000))
type(app).devices = PropertyMock(return_value={})
return app
@pytest.fixture(name="config_entry")
async def config_entry_fixture(hass):
"""Fixture representing a config entry."""
entry = MockConfigEntry(
version=2,
domain=zha_const.DOMAIN,
data={
zigpy.config.CONF_DEVICE: {zigpy.config.CONF_DEVICE_PATH: "/dev/ttyUSB0"},
zha_const.CONF_RADIO_TYPE: "ezsp",
},
)
entry.add_to_hass(hass)
return entry
@pytest.fixture
def setup_zha(hass, config_entry, zigpy_app_controller):
"""Set up ZHA component."""
zha_config = {zha_const.CONF_ENABLE_QUIRKS: False}
p1 = patch(
"bellows.zigbee.application.ControllerApplication.new",
return_value=zigpy_app_controller,
)
async def _setup(config=None):
config = config or {}
with p1:
status = await async_setup_component(
hass, zha_const.DOMAIN, {zha_const.DOMAIN: {**zha_config, **config}}
)
assert status is True
await hass.async_block_till_done()
return _setup
@pytest.fixture
def channel():
"""Channel mock factory fixture."""
def channel(name: str, cluster_id: int, endpoint_id: int = 1):
ch = MagicMock()
ch.name = name
ch.generic_id = f"channel_0x{cluster_id:04x}"
ch.id = f"{endpoint_id}:0x{cluster_id:04x}"
ch.async_configure = AsyncMock()
ch.async_initialize = AsyncMock()
return ch
return channel
@pytest.fixture
def zigpy_device_mock(zigpy_app_controller):
"""Make a fake device using the specified cluster classes."""
def _mock_dev(
endpoints,
ieee="00:0d:6f:00:0a:90:69:e7",
manufacturer="FakeManufacturer",
model="FakeModel",
node_descriptor=b"\x02@\x807\x10\x7fd\x00\x00*d\x00\x00",
nwk=0xB79C,
patch_cluster=True,
):
"""Make a fake device using the specified cluster classes."""
device = FakeDevice(
zigpy_app_controller, ieee, manufacturer, model, node_descriptor, nwk=nwk
)
for epid, ep in endpoints.items():
endpoint = FakeEndpoint(manufacturer, model, epid)
endpoint.device = device
device.endpoints[epid] = endpoint
endpoint.device_type = ep["device_type"]
profile_id = ep.get("profile_id")
if profile_id:
endpoint.profile_id = profile_id
for cluster_id in ep.get("in_clusters", []):
endpoint.add_input_cluster(cluster_id, _patch_cluster=patch_cluster)
for cluster_id in ep.get("out_clusters", []):
endpoint.add_output_cluster(cluster_id, _patch_cluster=patch_cluster)
return device
return _mock_dev
@pytest.fixture
def zha_device_joined(hass, setup_zha):
"""Return a newly joined ZHA device."""
async def _zha_device(zigpy_dev):
await setup_zha()
zha_gateway = get_zha_gateway(hass)
await zha_gateway.async_device_initialized(zigpy_dev)
await hass.async_block_till_done()
return zha_gateway.get_device(zigpy_dev.ieee)
return _zha_device
@pytest.fixture
def zha_device_restored(hass, zigpy_app_controller, setup_zha, hass_storage):
"""Return a restored ZHA device."""
async def _zha_device(zigpy_dev, last_seen=None):
zigpy_app_controller.devices[zigpy_dev.ieee] = zigpy_dev
if last_seen is not None:
hass_storage[f"{DOMAIN}.storage"] = {
"key": f"{DOMAIN}.storage",
"version": 1,
"data": {
"devices": [
{
"ieee": str(zigpy_dev.ieee),
"last_seen": last_seen,
"name": f"{zigpy_dev.manufacturer} {zigpy_dev.model}",
}
],
},
}
await setup_zha()
zha_gateway = hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
return zha_gateway.get_device(zigpy_dev.ieee)
return _zha_device
@pytest.fixture(params=["zha_device_joined", "zha_device_restored"])
def zha_device_joined_restored(request):
"""Join or restore ZHA device."""
named_method = request.getfixturevalue(request.param)
named_method.name = request.param
return named_method
@pytest.fixture
def zha_device_mock(hass, zigpy_device_mock):
"""Return a zha Device factory."""
def _zha_device(
endpoints=None,
ieee="00:11:22:33:44:55:66:77",
manufacturer="mock manufacturer",
model="mock model",
node_desc=b"\x02@\x807\x10\x7fd\x00\x00*d\x00\x00",
patch_cluster=True,
):
if endpoints is None:
endpoints = {
1: {
"in_clusters": [0, 1, 8, 768],
"out_clusters": [0x19],
"device_type": 0x0105,
},
2: {
"in_clusters": [0],
"out_clusters": [6, 8, 0x19, 768],
"device_type": 0x0810,
},
}
zigpy_device = zigpy_device_mock(
endpoints, ieee, manufacturer, model, node_desc, patch_cluster=patch_cluster
)
zha_device = zha_core_device.ZHADevice(hass, zigpy_device, MagicMock())
return zha_device
return _zha_device
@pytest.fixture
def hass_disable_services(hass):
"""Mock service register."""
with patch.object(hass.services, "async_register"), patch.object(
hass.services, "has_service", return_value=True
):
yield hass
|
turbokongen/home-assistant
|
tests/components/zha/conftest.py
|
Python
|
apache-2.0
| 6,990
|
import json
import pytest
from indy import crypto, did, error
@pytest.mark.asyncio
async def test_auth_crypt_works_for_created_key(wallet_handle, seed_my1, verkey_my2, message):
verkey = await did.create_key(wallet_handle, json.dumps({'seed': seed_my1}))
await crypto.auth_crypt(wallet_handle, verkey, verkey_my2, message)
@pytest.mark.asyncio
async def test_auth_crypt_works_for_unknown_sender_verkey(wallet_handle, verkey_my1, verkey_my2, message):
with pytest.raises(error.WalletItemNotFound):
await crypto.auth_crypt(wallet_handle, verkey_my1, verkey_my2, message)
@pytest.mark.asyncio
async def test_auth_crypt_works_for_invalid_handle(wallet_handle, verkey_my1, verkey_my2, message):
with pytest.raises(error.WalletInvalidHandle):
invalid_wallet_handle = wallet_handle + 1
await crypto.auth_crypt(invalid_wallet_handle, verkey_my1, verkey_my2, message)
@pytest.mark.asyncio
async def test_auth_crypt_works_for_invalid_recipient_vk(wallet_handle, identity_trustee1, message):
(_, key) = identity_trustee1
with pytest.raises(error.CommonInvalidStructure):
await crypto.auth_crypt(wallet_handle, key, 'CnEDk___MnmiHXEV1WFgbV___eYnPqs___TdcZaNhFVW', message)
|
peacekeeper/indy-sdk
|
wrappers/python/tests/crypto/test_auth_crypt.py
|
Python
|
apache-2.0
| 1,227
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import mock
import mox
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import units
from cinder.brick.initiator import connector
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers import coraid
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def to_coraid_kb(gb):
return math.ceil(float(gb) * units.Gi / 1000)
def coraid_volume_size(gb):
return '{0}K'.format(to_coraid_kb(gb))
fake_esm_ipaddress = "192.168.0.1"
fake_esm_username = "darmok"
fake_esm_group = "tanagra"
fake_esm_group_id = 1
fake_esm_password = "12345678"
fake_coraid_repository_key = 'repository_key'
fake_volume_name = "volume-12345678-1234-1234-1234-1234567890ab"
fake_clone_name = "volume-ffffffff-1234-1234-1234-1234567890ab"
fake_volume_size = 10
fake_repository_name = "A-B:C:D"
fake_pool_name = "FakePool"
fake_aoetarget = 4081
fake_shelf = 16
fake_lun = 241
fake_str_aoetarget = str(fake_aoetarget)
fake_lun_addr = {"shelf": fake_shelf, "lun": fake_lun}
fake_volume_type = {'id': 1}
fake_volume = {"id": fake_volume_name,
"name": fake_volume_name,
"size": fake_volume_size,
"volume_type": fake_volume_type}
fake_clone_volume = {"name": fake_clone_name,
"size": fake_volume_size,
"volume_type": fake_volume_type}
fake_big_clone_volume = {"name": fake_clone_name,
"size": fake_volume_size + 1,
"volume_type": fake_volume_type}
fake_volume_info = {"pool": fake_pool_name,
"repo": fake_repository_name,
"vsxidx": fake_aoetarget,
"index": fake_lun,
"shelf": fake_shelf}
fake_lun_info = {"shelf": fake_shelf, "lun": fake_lun}
fake_snapshot_name = "snapshot-12345678-8888-8888-1234-1234567890ab"
fake_snapshot_id = "12345678-8888-8888-1234-1234567890ab"
fake_volume_id = "12345678-1234-1234-1234-1234567890ab"
fake_snapshot = {"id": fake_snapshot_id,
"name": fake_snapshot_name,
"volume_id": fake_volume_id,
"volume_name": fake_volume_name,
"volume_size": int(fake_volume_size) - 1,
"volume": fake_volume}
fake_configure_data = [{"addr": "cms", "data": "FAKE"}]
fake_esm_fetch = [[
{"command": "super_fake_command"},
{"reply": [
{"lv":
{"containingPool": fake_pool_name,
"lunIndex": fake_aoetarget,
"name": fake_volume_name,
"lvStatus":
{"exportedLun":
{"lun": fake_lun,
"shelf": fake_shelf}}
},
"repoName": fake_repository_name}]}]]
fake_esm_fetch_no_volume = [[
{"command": "super_fake_command"},
{"reply": []}]]
fake_esm_success = {"category": "provider",
"tracking": False,
"configState": "completedSuccessfully",
"heldPending": False,
"metaCROp": "noAction",
"message": None}
fake_group_fullpath = "admin group:%s" % (fake_esm_group)
fake_group_id = 4
fake_login_reply = {"values": [
{"fullPath": fake_group_fullpath,
"groupId": fake_group_id}],
"message": "",
"state": "adminSucceed",
"metaCROp": "noAction"}
fake_group_fail_fullpath = "fail group:%s" % (fake_esm_group)
fake_group_fail_id = 5
fake_login_reply_group_fail = {"values": [
{"fullPath": fake_group_fail_fullpath,
"groupId": fake_group_fail_id}],
"message": "",
"state": "adminSucceed",
"metaCROp": "noAction"}
def compare(a, b):
if type(a) != type(b):
return False
if type(a) == list or type(a) == tuple:
if len(a) != len(b):
return False
return all(map(lambda t: compare(t[0], t[1]), zip(a, b)))
elif type(a) == dict:
if len(a) != len(b):
return False
for k, v in a.items():
if not compare(v, b[k]):
return False
return True
else:
return a == b
def pack_data(request):
request['data'] = jsonutils.dumps(request['data'])
class FakeRpcBadRequest(Exception):
pass
class FakeRpcIsNotCalled(Exception):
def __init__(self, handle, url_params, data):
self.handle = handle
self.url_params = url_params
self.data = data
def __str__(self):
return 'Fake Rpc handle for {0}/{1}/{2} not found'.format(
self.handle, self.url_params, self.data)
class FakeRpcHandle(object):
def __init__(self, handle, url_params, data, result):
self.handle = handle
self.url_params = url_params
self.data = data
self.result = result
self._is_called = False
def set_called(self):
self._is_called = True
def __call__(self, handle, url_params, data,
allow_empty_response=False):
if handle != self.handle:
raise FakeRpcBadRequest(
'Unexpected handle name {0}. Expected {1}.'
.format(handle, self.handle))
if not compare(url_params, self.url_params):
raise FakeRpcBadRequest('Unexpected url params: {0} / {1}'
.format(url_params, self.url_params))
if not compare(data, self.data):
raise FakeRpcBadRequest('Unexpected data: {0}/{1}'
.format(data, self.data))
if callable(self.result):
return self.result()
else:
return self.result
class FakeRpc(object):
def __init__(self):
self._handles = []
def handle(self, handle, url_params, data, result):
self._handles.append(FakeRpcHandle(handle, url_params, data, result))
def __call__(self, handle_name, url_params, data,
allow_empty_response=False):
for handle in self._handles:
if (handle.handle == handle_name and
compare(handle.url_params, url_params) and
compare(handle.data, handle.data)):
handle.set_called()
return handle(handle_name, url_params, data,
allow_empty_response)
raise FakeRpcIsNotCalled(handle_name, url_params, data)
class CoraidDriverTestCase(test.TestCase):
def setUp(self):
super(CoraidDriverTestCase, self).setUp()
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
configuration.coraid_default_repository = 'default_repository'
configuration.coraid_esm_address = fake_esm_ipaddress
configuration.coraid_user = fake_esm_username
configuration.coraid_group = fake_esm_group
configuration.coraid_password = fake_esm_password
configuration.volume_name_template = "volume-%s"
configuration.snapshot_name_template = "snapshot-%s"
configuration.coraid_repository_key = fake_coraid_repository_key
configuration.use_multipath_for_image_xfer = False
configuration.enforce_multipath_for_image_xfer = False
configuration.num_volume_device_scan_tries = 3
configuration.volume_dd_blocksize = '1M'
self.fake_rpc = FakeRpc()
self.stubs.Set(coraid.CoraidRESTClient, 'rpc', self.fake_rpc)
self.driver = coraid.CoraidDriver(configuration=configuration)
self.driver.do_setup({})
def mock_volume_types(self, repositories=None):
if not repositories:
repositories = [fake_repository_name]
self.mox.StubOutWithMock(volume_types, 'get_volume_type_extra_specs')
for repository in repositories:
(volume_types
.get_volume_type_extra_specs(fake_volume_type['id'],
fake_coraid_repository_key)
.AndReturn('<in> {0}'.format(repository)))
class CoraidDriverLoginSuccessTestCase(CoraidDriverTestCase):
def setUp(self):
super(CoraidDriverLoginSuccessTestCase, self).setUp()
login_results = {'state': 'adminSucceed',
'values': [
{'fullPath':
'admin group:{0}'.format(fake_esm_group),
'groupId': fake_esm_group_id
}]}
self.fake_rpc.handle('admin', {'op': 'login',
'username': fake_esm_username,
'password': fake_esm_password},
'Login', login_results)
self.fake_rpc.handle('admin', {'op': 'setRbacGroup',
'groupId': fake_esm_group_id},
'Group', {'state': 'adminSucceed'})
class CoraidDriverApplianceTestCase(CoraidDriverLoginSuccessTestCase):
def test_resize_volume(self):
new_volume_size = int(fake_volume_size) + 1
fetch_request = {'shelf': 'cms',
'orchStrRepo': '',
'lv': fake_volume_name}
self.fake_rpc.handle('fetch', fetch_request, None,
fake_esm_fetch)
reply = {'configState': 'completedSuccessfully'}
resize_volume_request = {'addr': 'cms',
'data': {
'lvName': fake_volume_name,
'newLvName': fake_volume_name + '-resize',
'size':
coraid_volume_size(new_volume_size),
'repoName': fake_repository_name},
'op': 'orchStrLunMods',
'args': 'resize'}
pack_data(resize_volume_request)
self.fake_rpc.handle('configure', {}, [resize_volume_request],
reply)
real_reply = self.driver.appliance.resize_volume(fake_volume_name,
new_volume_size)
self.assertEqual(reply['configState'], real_reply['configState'])
class CoraidDriverIntegrationalTestCase(CoraidDriverLoginSuccessTestCase):
def setUp(self):
super(CoraidDriverIntegrationalTestCase, self).setUp()
self.appliance = self.driver.appliance
# NOTE(nsobolevsky) prevent re-creation esm appliance
self.stubs.Set(coraid.CoraidDriver, 'appliance', self.appliance)
def test_create_volume(self):
self.mock_volume_types()
create_volume_request = {'addr': 'cms',
'data': {
'servers': [],
'size':
coraid_volume_size(fake_volume_size),
'repoName': fake_repository_name,
'lvName': fake_volume_name},
'op': 'orchStrLun',
'args': 'add'}
pack_data(create_volume_request)
self.fake_rpc.handle('configure', {}, [create_volume_request],
{'configState': 'completedSuccessfully',
'firstParam': 'fake_first_param'})
self.mox.ReplayAll()
self.driver.create_volume(fake_volume)
self.mox.VerifyAll()
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
def test_create_volume_volume_type_no_repo_key(self, volume_specs_mock):
"""Test volume creation without repo specified in volume type."""
volume_specs_mock.return_value = None
create_volume_request = {'addr': 'cms',
'data': {
'servers': [],
'size':
coraid_volume_size(fake_volume_size),
'repoName': 'default_repository',
'lvName': fake_volume_name},
'op': 'orchStrLun',
'args': 'add'}
pack_data(create_volume_request)
self.fake_rpc.handle('configure', {}, [create_volume_request],
{'configState': 'completedSuccessfully',
'firstParam': 'fake_first_param'})
self.driver.create_volume(fake_volume)
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
def test_create_volume_volume_type_no_repo_data(self, volume_specs_mock):
"""Test volume creation w/o repo in volume type nor config."""
volume_specs_mock.return_value = None
self.driver.configuration.coraid_default_repository = None
create_volume_request = {'addr': 'cms',
'data': {
'servers': [],
'size':
coraid_volume_size(fake_volume_size),
'repoName': 'default_repository',
'lvName': fake_volume_name},
'op': 'orchStrLun',
'args': 'add'}
pack_data(create_volume_request)
self.fake_rpc.handle('configure', {}, [create_volume_request],
{'configState': 'completedSuccessfully',
'firstParam': 'fake_first_param'})
self.assertRaises(exception.CoraidException,
self.driver.create_volume, fake_volume)
def test_delete_volume(self):
delete_volume_request = {'addr': 'cms',
'data': {
'repoName': fake_repository_name,
'lvName': fake_volume_name},
'op': 'orchStrLun/verified',
'args': 'delete'}
pack_data(delete_volume_request)
self.fake_rpc.handle('configure', {}, [delete_volume_request],
{'configState': 'completedSuccessfully'})
self.fake_rpc.handle('fetch', {'orchStrRepo': '',
'shelf': 'cms',
'lv': fake_volume_name},
None,
fake_esm_fetch)
self.mox.ReplayAll()
self.driver.delete_volume(fake_volume)
self.mox.VerifyAll()
def test_ping_ok(self):
self.fake_rpc.handle('fetch', {}, None, '')
self.mox.ReplayAll()
self.driver.appliance.ping()
self.mox.VerifyAll()
def test_ping_failed(self):
def rpc(handle, url_params, data,
allow_empty_response=True):
raise test.TestingException("Some exception")
self.stubs.Set(self.driver.appliance, 'rpc', rpc)
self.mox.ReplayAll()
self.assertRaises(exception.CoraidESMNotAvailable,
self.driver.appliance.ping)
self.mox.VerifyAll()
def test_delete_not_existing_lun(self):
delete_volume_request = {'addr': 'cms',
'data': {
'repoName': fake_repository_name,
'lvName': fake_volume_name},
'op': 'orchStrLun/verified',
'args': 'delete'}
pack_data(delete_volume_request)
self.fake_rpc.handle('configure', {}, [delete_volume_request],
{'configState': 'completedSuccessfully'})
self.fake_rpc.handle('fetch', {'orchStrRepo': '',
'shelf': 'cms',
'lv': fake_volume_name},
None,
fake_esm_fetch_no_volume)
self.mox.ReplayAll()
self.assertRaises(
exception.VolumeNotFound,
self.driver.appliance.delete_lun,
fake_volume['name'])
self.mox.VerifyAll()
def test_delete_not_existing_volumeappliance_is_ok(self):
def delete_lun(volume_name):
raise exception.VolumeNotFound(volume_id=fake_volume['name'])
self.stubs.Set(self.driver.appliance, 'delete_lun', delete_lun)
def ping():
pass
self.stubs.Set(self.driver.appliance, 'ping', ping)
self.mox.ReplayAll()
self.driver.delete_volume(fake_volume)
self.mox.VerifyAll()
def test_delete_not_existing_volume_sleepingappliance(self):
def delete_lun(volume_name):
raise exception.VolumeNotFound(volume_id=fake_volume['name'])
self.stubs.Set(self.driver.appliance, 'delete_lun', delete_lun)
def ping():
raise exception.CoraidESMNotAvailable(reason="Any reason")
self.stubs.Set(self.driver.appliance, 'ping', ping)
self.driver.appliance.ping = ping
self.mox.ReplayAll()
self.assertRaises(exception.CoraidESMNotAvailable,
self.driver.delete_volume,
fake_volume)
self.mox.VerifyAll()
def test_create_snapshot(self):
fetch_request = {'shelf': 'cms',
'orchStrRepo': '',
'lv': fake_volume_name}
self.fake_rpc.handle('fetch', fetch_request, None,
fake_esm_fetch)
create_snapshot_request = {'addr': 'cms',
'data': {
'repoName': fake_repository_name,
'lvName': fake_volume_name,
'newLvName': fake_snapshot_name},
'op': 'orchStrLunMods',
'args': 'addClSnap'}
pack_data(create_snapshot_request)
self.fake_rpc.handle('configure', {}, [create_snapshot_request],
{'configState': 'completedSuccessfully'})
self.mox.ReplayAll()
self.driver.create_snapshot(fake_snapshot)
self.mox.VerifyAll()
def test_delete_snapshot(self):
fetch_request = {'shelf': 'cms',
'orchStrRepo': '',
'lv': fake_snapshot_name}
self.fake_rpc.handle('fetch', fetch_request, None,
fake_esm_fetch)
delete_snapshot_request = {'addr': 'cms',
'data': {
'repoName': fake_repository_name,
'lvName': fake_snapshot_name,
'newLvName': 'noop'},
'op': 'orchStrLunMods',
'args': 'delClSnap'}
pack_data(delete_snapshot_request)
self.fake_rpc.handle('configure', {}, [delete_snapshot_request],
{'configState': 'completedSuccessfully'})
self.mox.ReplayAll()
self.driver.delete_snapshot(fake_snapshot)
self.mox.VerifyAll()
def test_create_volume_from_snapshot(self):
self.mock_volume_types()
self.mox.StubOutWithMock(self.driver.appliance, 'resize_volume')
self.driver.appliance.resize_volume(fake_volume_name,
fake_volume['size'])\
.AndReturn(None)
fetch_request = {'shelf': 'cms',
'orchStrRepo': '',
'lv': fake_snapshot_name}
self.fake_rpc.handle('fetch', fetch_request, None,
fake_esm_fetch)
create_clone_request = {'addr': 'cms',
'data': {
'lvName': fake_snapshot_name,
'repoName': fake_repository_name,
'newLvName': fake_volume_name,
'newRepoName': fake_repository_name},
'op': 'orchStrLunMods',
'args': 'addClone'}
pack_data(create_clone_request)
self.fake_rpc.handle('configure', {}, [create_clone_request],
{'configState': 'completedSuccessfully'})
self.mox.ReplayAll()
self.driver.create_volume_from_snapshot(fake_volume, fake_snapshot)
self.mox.VerifyAll()
def test_initialize_connection(self):
fetch_request = {'shelf': 'cms',
'orchStrRepo': '',
'lv': fake_volume_name}
self.fake_rpc.handle('fetch', fetch_request, None,
fake_esm_fetch)
self.mox.ReplayAll()
connection = self.driver.initialize_connection(fake_volume, {})
self.mox.VerifyAll()
self.assertEqual(connection['driver_volume_type'], 'aoe')
self.assertEqual(connection['data']['target_shelf'], fake_shelf)
self.assertEqual(connection['data']['target_lun'], fake_lun)
def test_get_repository_capabilities(self):
reply = [[{}, {'reply': [
{'name': 'repo1',
'profile':
{'fullName': 'Bronze-Bronze:Profile1'}},
{'name': 'repo2',
'profile':
{'fullName': 'Bronze-Bronze:Profile2'}}]}]]
self.fake_rpc.handle('fetch', {'orchStrRepo': ''}, None,
reply)
self.mox.ReplayAll()
capabilities = self.driver.get_volume_stats(refresh=True)
self.mox.VerifyAll()
self.assertEqual(
capabilities[fake_coraid_repository_key],
'Bronze-Bronze:Profile1:repo1 Bronze-Bronze:Profile2:repo2')
def test_create_cloned_volume(self):
self.mock_volume_types([fake_repository_name])
fetch_request = {'shelf': 'cms',
'orchStrRepo': '',
'lv': fake_volume_name}
self.fake_rpc.handle('fetch', fetch_request, None,
fake_esm_fetch)
shelf_lun = '{0}.{1}'.format(fake_shelf, fake_lun)
create_clone_request = {'addr': 'cms',
'data': {
'shelfLun': shelf_lun,
'lvName': fake_volume_name,
'repoName': fake_repository_name,
'newLvName': fake_clone_name,
'newRepoName': fake_repository_name},
'op': 'orchStrLunMods',
'args': 'addClone'}
pack_data(create_clone_request)
self.fake_rpc.handle('configure', {}, [create_clone_request],
{'configState': 'completedSuccessfully'})
self.mox.ReplayAll()
self.driver.create_cloned_volume(fake_clone_volume, fake_volume)
self.mox.VerifyAll()
def test_create_cloned_volume_with_resize(self):
self.mock_volume_types([fake_repository_name])
self.mox.StubOutWithMock(self.driver.appliance, 'resize_volume')
self.driver.appliance.resize_volume(fake_big_clone_volume['name'],
fake_big_clone_volume['size'])\
.AndReturn(None)
fetch_request = {'shelf': 'cms',
'orchStrRepo': '',
'lv': fake_volume_name}
self.fake_rpc.handle('fetch', fetch_request, None,
fake_esm_fetch)
shelf_lun = '{0}.{1}'.format(fake_shelf, fake_lun)
create_clone_request = {'addr': 'cms',
'data': {
'shelfLun': shelf_lun,
'lvName': fake_volume_name,
'repoName': fake_repository_name,
'newLvName': fake_clone_name,
'newRepoName': fake_repository_name},
'op': 'orchStrLunMods',
'args': 'addClone'}
pack_data(create_clone_request)
self.fake_rpc.handle('configure', {}, [create_clone_request],
{'configState': 'completedSuccessfully'})
self.mox.ReplayAll()
self.driver.create_cloned_volume(fake_big_clone_volume, fake_volume)
self.mox.VerifyAll()
def test_create_cloned_volume_in_different_repository(self):
self.mock_volume_types([fake_repository_name + '_another'])
fetch_request = {'shelf': 'cms',
'orchStrRepo': '',
'lv': fake_volume_name}
self.fake_rpc.handle('fetch', fetch_request, None,
fake_esm_fetch)
self.mox.ReplayAll()
self.assertRaises(
exception.CoraidException,
self.driver.create_cloned_volume,
fake_clone_volume,
fake_volume)
self.mox.VerifyAll()
def test_extend_volume(self):
self.mox.StubOutWithMock(self.driver.appliance, 'resize_volume')
self.driver.appliance.resize_volume(fake_volume_name, 10)\
.AndReturn(None)
self.mox.ReplayAll()
self.driver.extend_volume(fake_volume, 10)
self.mox.VerifyAll()
class AutoReloginCoraidTestCase(test.TestCase):
def setUp(self):
super(AutoReloginCoraidTestCase, self).setUp()
self.rest_client = coraid.CoraidRESTClient('https://fake')
self.appliance = coraid.CoraidAppliance(self.rest_client,
'fake_username',
'fake_password',
'fake_group')
def _test_auto_relogin_fail(self, state):
self.mox.StubOutWithMock(self.rest_client, 'rpc')
self.rest_client.rpc('fake_handle', {}, None, False).\
AndReturn({'state': state,
'metaCROp': 'reboot'})
self.rest_client.rpc('fake_handle', {}, None, False).\
AndReturn({'state': state,
'metaCROp': 'reboot'})
self.rest_client.rpc('fake_handle', {}, None, False).\
AndReturn({'state': state,
'metaCROp': 'reboot'})
self.mox.StubOutWithMock(self.appliance, '_ensure_session')
self.appliance._ensure_session().AndReturn(None)
self.mox.StubOutWithMock(self.appliance, '_relogin')
self.appliance._relogin().AndReturn(None)
self.appliance._relogin().AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(exception.CoraidESMReloginFailed,
self.appliance.rpc,
'fake_handle', {}, None, False)
self.mox.VerifyAll()
def test_auto_relogin_fail_admin(self):
self._test_auto_relogin_fail('GeneralAdminFailure')
def test_auto_relogin_fail_inactivity(self):
self._test_auto_relogin_fail('passwordInactivityTimeout')
def test_auto_relogin_fail_absolute(self):
self._test_auto_relogin_fail('passwordAbsoluteTimeout')
def test_auto_relogin_success(self):
self.mox.StubOutWithMock(self.rest_client, 'rpc')
self.rest_client.rpc('fake_handle', {}, None, False).\
AndReturn({'state': 'GeneralAdminFailure',
'metaCROp': 'reboot'})
self.rest_client.rpc('fake_handle', {}, None, False).\
AndReturn({'state': 'ok'})
self.mox.StubOutWithMock(self.appliance, '_ensure_session')
self.appliance._ensure_session().AndReturn(None)
self.mox.StubOutWithMock(self.appliance, '_relogin')
self.appliance._relogin().AndReturn(None)
self.mox.ReplayAll()
reply = self.appliance.rpc('fake_handle', {}, None, False)
self.mox.VerifyAll()
self.assertEqual(reply['state'], 'ok')
class CoraidDriverImageTestCases(CoraidDriverTestCase):
def setUp(self):
super(CoraidDriverImageTestCases, self).setUp()
self.fake_dev_path = '/dev/ether/fake_dev'
self.fake_connection = {'driver_volume_type': 'aoe',
'data': {'target_shelf': fake_shelf,
'target_lun': fake_lun}}
self.fake_volume_info = {
'shelf': self.fake_connection['data']['target_shelf'],
'lun': self.fake_connection['data']['target_lun']}
self.mox.StubOutWithMock(self.driver, 'initialize_connection')
self.driver.initialize_connection(fake_volume, {})\
.AndReturn(self.fake_connection)
self.mox.StubOutWithMock(self.driver, 'terminate_connection')
self.driver.terminate_connection(fake_volume, mox.IgnoreArg(),
force=False).AndReturn(None)
root_helper = 'sudo cinder-rootwrap /etc/cinder/rootwrap.conf'
self.mox.StubOutWithMock(connector, 'get_connector_properties')
connector.get_connector_properties(root_helper,
CONF.my_ip, False, False).\
AndReturn({})
self.mox.StubOutWithMock(utils, 'brick_get_connector')
aoe_initiator = self.mox.CreateMockAnything()
utils.brick_get_connector('aoe',
device_scan_attempts=3,
use_multipath=False,
conn=mox.IgnoreArg()).\
AndReturn(aoe_initiator)
aoe_initiator\
.connect_volume(self.fake_connection['data'])\
.AndReturn({'path': self.fake_dev_path})
aoe_initiator.check_valid_device(self.fake_dev_path, mox.IgnoreArg())\
.AndReturn(True)
aoe_initiator.disconnect_volume(
{'target_shelf': self.fake_volume_info['shelf'],
'target_lun': self.fake_volume_info['lun']}, mox.IgnoreArg())
def test_copy_volume_to_image(self):
fake_image_service = 'fake-image-service'
fake_image_meta = 'fake-image-meta'
self.mox.StubOutWithMock(image_utils, 'upload_volume')
image_utils.upload_volume({},
fake_image_service,
fake_image_meta,
self.fake_dev_path)
self.mox.ReplayAll()
self.driver.copy_volume_to_image({},
fake_volume,
fake_image_service,
fake_image_meta)
self.mox.VerifyAll()
def test_copy_image_to_volume(self):
fake_image_service = 'fake-image-service'
fake_image_id = 'fake-image-id;'
self.mox.StubOutWithMock(image_utils, 'fetch_to_raw')
image_utils.fetch_to_raw({},
fake_image_service,
fake_image_id,
self.fake_dev_path,
mox.IgnoreArg(),
size=fake_volume_size)
self.mox.ReplayAll()
self.driver.copy_image_to_volume({},
fake_volume,
fake_image_service,
fake_image_id)
self.mox.VerifyAll()
class CoraidResetConnectionTestCase(CoraidDriverTestCase):
def test_create_new_appliance_for_every_request(self):
self.mox.StubOutWithMock(coraid, 'CoraidRESTClient')
self.mox.StubOutWithMock(coraid, 'CoraidAppliance')
coraid.CoraidRESTClient(mox.IgnoreArg())
coraid.CoraidRESTClient(mox.IgnoreArg())
coraid.CoraidAppliance(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('fake_app1')
coraid.CoraidAppliance(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('fake_app2')
self.mox.ReplayAll()
self.assertEqual(self.driver.appliance, 'fake_app1')
self.assertEqual(self.driver.appliance, 'fake_app2')
self.mox.VerifyAll()
|
Akrog/cinder
|
cinder/tests/test_coraid.py
|
Python
|
apache-2.0
| 33,873
|
"""The tests for the Tasmota sensor platform."""
import copy
import datetime
from datetime import timedelta
import json
from unittest.mock import Mock, patch
import hatasmota
from hatasmota.utils import (
get_topic_stat_status,
get_topic_tele_sensor,
get_topic_tele_will,
)
import pytest
from homeassistant import config_entries
from homeassistant.components import sensor
from homeassistant.components.tasmota.const import DEFAULT_PREFIX
from homeassistant.const import ATTR_ASSUMED_STATE, STATE_UNKNOWN
from homeassistant.helpers import entity_registry as er
from homeassistant.util import dt
from .test_common import (
DEFAULT_CONFIG,
help_test_availability,
help_test_availability_discovery_update,
help_test_availability_poll_state,
help_test_availability_when_connection_lost,
help_test_discovery_device_remove,
help_test_discovery_removal,
help_test_discovery_update_unchanged,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
)
from tests.common import async_fire_mqtt_message, async_fire_time_changed
DEFAULT_SENSOR_CONFIG = {
"sn": {
"Time": "2020-09-25T12:47:15",
"DHT11": {"Temperature": None},
"TempUnit": "C",
}
}
BAD_INDEXED_SENSOR_CONFIG_3 = {
"sn": {
"Time": "2020-09-25T12:47:15",
"ENERGY": {
"ApparentPower": [7.84, 1.23, 2.34],
},
}
}
INDEXED_SENSOR_CONFIG = {
"sn": {
"Time": "2020-09-25T12:47:15",
"ENERGY": {
"TotalStartTime": "2018-11-23T15:33:47",
"Total": 0.017,
"TotalTariff": [0.000, 0.017],
"Yesterday": 0.000,
"Today": 0.002,
"ExportActive": 0.000,
"ExportTariff": [0.000, 0.000],
"Period": 0.00,
"Power": 0.00,
"ApparentPower": 7.84,
"ReactivePower": -7.21,
"Factor": 0.39,
"Frequency": 50.0,
"Voltage": 234.31,
"Current": 0.039,
"ImportActive": 12.580,
"ImportReactive": 0.002,
"ExportReactive": 39.131,
"PhaseAngle": 290.45,
},
}
}
INDEXED_SENSOR_CONFIG_2 = {
"sn": {
"Time": "2020-09-25T12:47:15",
"ENERGY": {
"TotalStartTime": "2018-11-23T15:33:47",
"Total": [0.000, 0.017],
"TotalTariff": [0.000, 0.017],
"Yesterday": 0.000,
"Today": 0.002,
"ExportActive": 0.000,
"ExportTariff": [0.000, 0.000],
"Period": 0.00,
"Power": 0.00,
"ApparentPower": 7.84,
"ReactivePower": -7.21,
"Factor": 0.39,
"Frequency": 50.0,
"Voltage": 234.31,
"Current": 0.039,
"ImportActive": 12.580,
"ImportReactive": 0.002,
"ExportReactive": 39.131,
"PhaseAngle": 290.45,
},
}
}
NESTED_SENSOR_CONFIG = {
"sn": {
"Time": "2020-03-03T00:00:00+00:00",
"TX23": {
"Speed": {"Act": 14.8, "Avg": 8.5, "Min": 12.2, "Max": 14.8},
"Dir": {
"Card": "WSW",
"Deg": 247.5,
"Avg": 266.1,
"AvgCard": "W",
"Range": 0,
},
},
"SpeedUnit": "km/h",
}
}
async def test_controlling_state_via_mqtt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config = copy.deepcopy(DEFAULT_SENSOR_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_dht11_temperature")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
entity_reg = er.async_get(hass)
entry = entity_reg.async_get("sensor.tasmota_dht11_temperature")
assert entry.disabled is False
assert entry.disabled_by is None
assert entry.entity_category is None
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("sensor.tasmota_dht11_temperature")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test periodic state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/SENSOR", '{"DHT11":{"Temperature":20.5}}'
)
state = hass.states.get("sensor.tasmota_dht11_temperature")
assert state.state == "20.5"
# Test polled state update
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"DHT11":{"Temperature":20.0}}}',
)
state = hass.states.get("sensor.tasmota_dht11_temperature")
assert state.state == "20.0"
async def test_nested_sensor_state_via_mqtt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config = copy.deepcopy(NESTED_SENSOR_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_tx23_speed_act")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("sensor.tasmota_tx23_speed_act")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test periodic state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/SENSOR", '{"TX23":{"Speed":{"Act":"12.3"}}}'
)
state = hass.states.get("sensor.tasmota_tx23_speed_act")
assert state.state == "12.3"
# Test polled state update
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"TX23":{"Speed":{"Act":"23.4"}}}}',
)
state = hass.states.get("sensor.tasmota_tx23_speed_act")
assert state.state == "23.4"
async def test_indexed_sensor_state_via_mqtt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config = copy.deepcopy(INDEXED_SENSOR_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_energy_totaltariff_1")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("sensor.tasmota_energy_totaltariff_1")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test periodic state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/SENSOR", '{"ENERGY":{"TotalTariff":[1.2,3.4]}}'
)
state = hass.states.get("sensor.tasmota_energy_totaltariff_1")
assert state.state == "3.4"
# Test polled state update
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"ENERGY":{"TotalTariff":[5.6,7.8]}}}',
)
state = hass.states.get("sensor.tasmota_energy_totaltariff_1")
assert state.state == "7.8"
async def test_indexed_sensor_state_via_mqtt2(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT for sensor with last_reset property."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config = copy.deepcopy(INDEXED_SENSOR_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_energy_total")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
assert (
state.attributes[sensor.ATTR_STATE_CLASS] == sensor.STATE_CLASS_TOTAL_INCREASING
)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("sensor.tasmota_energy_total")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test periodic state update
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/SENSOR",
'{"ENERGY":{"Total":1.2,"TotalStartTime":"2018-11-23T15:33:47"}}',
)
state = hass.states.get("sensor.tasmota_energy_total")
assert state.state == "1.2"
# Test polled state update
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"ENERGY":{"Total":5.6,"TotalStartTime":"2018-11-23T16:33:47"}}}',
)
state = hass.states.get("sensor.tasmota_energy_total")
assert state.state == "5.6"
async def test_indexed_sensor_state_via_mqtt3(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT for indexed sensor with last_reset property."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config = copy.deepcopy(INDEXED_SENSOR_CONFIG_2)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_energy_total_1")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
assert (
state.attributes[sensor.ATTR_STATE_CLASS] == sensor.STATE_CLASS_TOTAL_INCREASING
)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("sensor.tasmota_energy_total_1")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test periodic state update
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/tele/SENSOR",
'{"ENERGY":{"Total":[1.2, 3.4],"TotalStartTime":"2018-11-23T15:33:47"}}',
)
state = hass.states.get("sensor.tasmota_energy_total_1")
assert state.state == "3.4"
# Test polled state update
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"ENERGY":{"Total":[5.6,7.8],"TotalStartTime":"2018-11-23T16:33:47"}}}',
)
state = hass.states.get("sensor.tasmota_energy_total_1")
assert state.state == "7.8"
async def test_bad_indexed_sensor_state_via_mqtt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT where sensor is not matching configuration."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config = copy.deepcopy(BAD_INDEXED_SENSOR_CONFIG_3)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_energy_apparentpower_0")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
state = hass.states.get("sensor.tasmota_energy_apparentpower_1")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
state = hass.states.get("sensor.tasmota_energy_apparentpower_2")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("sensor.tasmota_energy_apparentpower_0")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
state = hass.states.get("sensor.tasmota_energy_apparentpower_1")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
state = hass.states.get("sensor.tasmota_energy_apparentpower_2")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test periodic state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/SENSOR", '{"ENERGY":{"ApparentPower":[1.2,3.4,5.6]}}'
)
state = hass.states.get("sensor.tasmota_energy_apparentpower_0")
assert state.state == "1.2"
state = hass.states.get("sensor.tasmota_energy_apparentpower_1")
assert state.state == "3.4"
state = hass.states.get("sensor.tasmota_energy_apparentpower_2")
assert state.state == "5.6"
# Test periodic state update with too few values
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/SENSOR", '{"ENERGY":{"ApparentPower":[7.8,9.0]}}'
)
state = hass.states.get("sensor.tasmota_energy_apparentpower_0")
assert state.state == "7.8"
state = hass.states.get("sensor.tasmota_energy_apparentpower_1")
assert state.state == "9.0"
state = hass.states.get("sensor.tasmota_energy_apparentpower_2")
assert state.state == "5.6"
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/SENSOR", '{"ENERGY":{"ApparentPower":2.3}}'
)
state = hass.states.get("sensor.tasmota_energy_apparentpower_0")
assert state.state == "2.3"
state = hass.states.get("sensor.tasmota_energy_apparentpower_1")
assert state.state == "9.0"
state = hass.states.get("sensor.tasmota_energy_apparentpower_2")
assert state.state == "5.6"
# Test polled state update
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"ENERGY":{"ApparentPower":[1.2,3.4,5.6]}}}',
)
state = hass.states.get("sensor.tasmota_energy_apparentpower_0")
assert state.state == "1.2"
state = hass.states.get("sensor.tasmota_energy_apparentpower_1")
assert state.state == "3.4"
state = hass.states.get("sensor.tasmota_energy_apparentpower_2")
assert state.state == "5.6"
# Test polled state update with too few values
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"ENERGY":{"ApparentPower":[7.8,9.0]}}}',
)
state = hass.states.get("sensor.tasmota_energy_apparentpower_0")
assert state.state == "7.8"
state = hass.states.get("sensor.tasmota_energy_apparentpower_1")
assert state.state == "9.0"
state = hass.states.get("sensor.tasmota_energy_apparentpower_2")
assert state.state == "5.6"
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS10",
'{"StatusSNS":{"ENERGY":{"ApparentPower":2.3}}}',
)
state = hass.states.get("sensor.tasmota_energy_apparentpower_0")
assert state.state == "2.3"
state = hass.states.get("sensor.tasmota_energy_apparentpower_1")
assert state.state == "9.0"
state = hass.states.get("sensor.tasmota_energy_apparentpower_2")
assert state.state == "5.6"
@pytest.mark.parametrize("status_sensor_disabled", [False])
async def test_status_sensor_state_via_mqtt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
entity_reg = er.async_get(hass)
# Pre-enable the status sensor
entity_reg.async_get_or_create(
sensor.DOMAIN,
"tasmota",
"00000049A3BC_status_sensor_status_sensor_status_signal",
suggested_object_id="tasmota_status",
disabled_by=None,
)
config = copy.deepcopy(DEFAULT_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_status")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("sensor.tasmota_status")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test pushed state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/tele/STATE", '{"Wifi":{"Signal":20.5}}'
)
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_status")
assert state.state == "20.5"
# Test polled state update
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS11",
'{"StatusSTS":{"Wifi":{"Signal":20.0}}}',
)
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_status")
assert state.state == "20.0"
# Test force update flag
entity = hass.data["entity_components"]["sensor"].get_entity(
"sensor.tasmota_status"
)
assert entity.force_update
@pytest.mark.parametrize("status_sensor_disabled", [False])
async def test_single_shot_status_sensor_state_via_mqtt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
entity_reg = er.async_get(hass)
# Pre-enable the status sensor
entity_reg.async_get_or_create(
sensor.DOMAIN,
"tasmota",
"00000049A3BC_status_sensor_status_sensor_status_restart_reason",
suggested_object_id="tasmota_status",
disabled_by=None,
)
config = copy.deepcopy(DEFAULT_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_status")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("sensor.tasmota_status")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test polled state update
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS1",
'{"StatusPRM":{"RestartReason":"Some reason"}}',
)
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_status")
assert state.state == "Some reason"
# Test polled state update is ignored
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS1",
'{"StatusPRM":{"RestartReason":"Another reason"}}',
)
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_status")
assert state.state == "Some reason"
# Device signals online again
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_status")
assert state.state == "Some reason"
# Test polled state update
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS1",
'{"StatusPRM":{"RestartReason":"Another reason"}}',
)
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_status")
assert state.state == "Another reason"
# Test polled state update is ignored
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS1",
'{"StatusPRM":{"RestartReason":"Third reason"}}',
)
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_status")
assert state.state == "Another reason"
@pytest.mark.parametrize("status_sensor_disabled", [False])
@patch.object(hatasmota.status_sensor, "datetime", Mock(wraps=datetime.datetime))
async def test_restart_time_status_sensor_state_via_mqtt(
hass, mqtt_mock, setup_tasmota
):
"""Test state update via MQTT."""
entity_reg = er.async_get(hass)
# Pre-enable the status sensor
entity_reg.async_get_or_create(
sensor.DOMAIN,
"tasmota",
"00000049A3BC_status_sensor_status_sensor_last_restart_time",
suggested_object_id="tasmota_status",
disabled_by=None,
)
config = copy.deepcopy(DEFAULT_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_status")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("sensor.tasmota_status")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test polled state update
utc_now = datetime.datetime(2020, 11, 11, 8, 0, 0, tzinfo=dt.UTC)
hatasmota.status_sensor.datetime.now.return_value = utc_now
async_fire_mqtt_message(
hass,
"tasmota_49A3BC/stat/STATUS11",
'{"StatusSTS":{"UptimeSec":"3600"}}',
)
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_status")
assert state.state == "2020-11-11T07:00:00+00:00"
async def test_attributes(hass, mqtt_mock, setup_tasmota):
"""Test correct attributes for sensors."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config = {
"sn": {
"DHT11": {"Temperature": None},
"Beer": {"CarbonDioxide": None},
"TempUnit": "C",
}
}
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_dht11_temperature")
assert state.attributes.get("device_class") == "temperature"
assert state.attributes.get("friendly_name") == "Tasmota DHT11 Temperature"
assert state.attributes.get("icon") is None
assert state.attributes.get("unit_of_measurement") == "°C"
state = hass.states.get("sensor.tasmota_beer_CarbonDioxide")
assert state.attributes.get("device_class") == "carbon_dioxide"
assert state.attributes.get("friendly_name") == "Tasmota Beer CarbonDioxide"
assert state.attributes.get("icon") is None
assert state.attributes.get("unit_of_measurement") == "ppm"
async def test_nested_sensor_attributes(hass, mqtt_mock, setup_tasmota):
"""Test correct attributes for sensors."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config = copy.deepcopy(NESTED_SENSOR_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_tx23_speed_act")
assert state.attributes.get("device_class") is None
assert state.attributes.get("friendly_name") == "Tasmota TX23 Speed Act"
assert state.attributes.get("icon") is None
assert state.attributes.get("unit_of_measurement") == "km/h"
state = hass.states.get("sensor.tasmota_tx23_dir_avg")
assert state.attributes.get("device_class") is None
assert state.attributes.get("friendly_name") == "Tasmota TX23 Dir Avg"
assert state.attributes.get("icon") is None
assert state.attributes.get("unit_of_measurement") == " "
async def test_indexed_sensor_attributes(hass, mqtt_mock, setup_tasmota):
"""Test correct attributes for sensors."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config = {
"sn": {
"Dummy1": {"Temperature": [None, None]},
"Dummy2": {"CarbonDioxide": [None, None]},
"TempUnit": "C",
}
}
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_dummy1_temperature_0")
assert state.attributes.get("device_class") == "temperature"
assert state.attributes.get("friendly_name") == "Tasmota Dummy1 Temperature 0"
assert state.attributes.get("icon") is None
assert state.attributes.get("unit_of_measurement") == "°C"
state = hass.states.get("sensor.tasmota_dummy2_carbondioxide_1")
assert state.attributes.get("device_class") == "carbon_dioxide"
assert state.attributes.get("friendly_name") == "Tasmota Dummy2 CarbonDioxide 1"
assert state.attributes.get("icon") is None
assert state.attributes.get("unit_of_measurement") == "ppm"
@pytest.mark.parametrize("status_sensor_disabled", [False])
@pytest.mark.parametrize(
"sensor_name, disabled, disabled_by",
[
("tasmota_firmware_version", True, er.DISABLED_INTEGRATION),
("tasmota_ip", True, er.DISABLED_INTEGRATION),
("tasmota_last_restart_time", False, None),
("tasmota_mqtt_connect_count", False, None),
("tasmota_rssi", True, er.DISABLED_INTEGRATION),
("tasmota_signal", True, er.DISABLED_INTEGRATION),
("tasmota_ssid", False, None),
("tasmota_wifi_connect_count", False, None),
],
)
async def test_diagnostic_sensors(
hass, mqtt_mock, setup_tasmota, sensor_name, disabled, disabled_by
):
"""Test properties of diagnostic sensors."""
entity_reg = er.async_get(hass)
config = copy.deepcopy(DEFAULT_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get(f"sensor.{sensor_name}")
assert bool(state) != disabled
entry = entity_reg.async_get(f"sensor.{sensor_name}")
assert entry.disabled == disabled
assert entry.disabled_by == disabled_by
assert entry.entity_category == "diagnostic"
@pytest.mark.parametrize("status_sensor_disabled", [False])
async def test_enable_status_sensor(hass, mqtt_mock, setup_tasmota):
"""Test enabling status sensor."""
entity_reg = er.async_get(hass)
config = copy.deepcopy(DEFAULT_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_signal")
assert state is None
entry = entity_reg.async_get("sensor.tasmota_signal")
assert entry.disabled
assert entry.disabled_by == er.DISABLED_INTEGRATION
# Enable the signal level status sensor
updated_entry = entity_reg.async_update_entity(
"sensor.tasmota_signal", disabled_by=None
)
assert updated_entry != entry
assert updated_entry.disabled is False
await hass.async_block_till_done()
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(seconds=config_entries.RELOAD_AFTER_UPDATE_DELAY + 1),
)
await hass.async_block_till_done()
# Fake re-send of retained discovery message
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("sensor.tasmota_signal")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
state = hass.states.get("sensor.tasmota_signal")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async def test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test availability after MQTT disconnection."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config = copy.deepcopy(DEFAULT_SENSOR_CONFIG)
await help_test_availability_when_connection_lost(
hass,
mqtt_client_mock,
mqtt_mock,
sensor.DOMAIN,
config,
sensor_config,
"tasmota_dht11_temperature",
)
async def test_availability(hass, mqtt_mock, setup_tasmota):
"""Test availability."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config = copy.deepcopy(DEFAULT_SENSOR_CONFIG)
await help_test_availability(
hass,
mqtt_mock,
sensor.DOMAIN,
config,
sensor_config,
"tasmota_dht11_temperature",
)
async def test_availability_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test availability discovery update."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config = copy.deepcopy(DEFAULT_SENSOR_CONFIG)
await help_test_availability_discovery_update(
hass,
mqtt_mock,
sensor.DOMAIN,
config,
sensor_config,
"tasmota_dht11_temperature",
)
async def test_availability_poll_state(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test polling after MQTT connection (re)established."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config = copy.deepcopy(DEFAULT_SENSOR_CONFIG)
poll_topic = "tasmota_49A3BC/cmnd/STATUS"
await help_test_availability_poll_state(
hass,
mqtt_client_mock,
mqtt_mock,
sensor.DOMAIN,
config,
poll_topic,
"10",
sensor_config,
)
async def test_discovery_removal_sensor(hass, mqtt_mock, caplog, setup_tasmota):
"""Test removal of discovered sensor."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config1 = copy.deepcopy(DEFAULT_SENSOR_CONFIG)
await help_test_discovery_removal(
hass,
mqtt_mock,
caplog,
sensor.DOMAIN,
config,
config,
sensor_config1,
{},
"tasmota_dht11_temperature",
"Tasmota DHT11 Temperature",
)
async def test_discovery_update_unchanged_sensor(
hass, mqtt_mock, caplog, setup_tasmota
):
"""Test update of discovered sensor."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config = copy.deepcopy(DEFAULT_SENSOR_CONFIG)
with patch(
"homeassistant.components.tasmota.sensor.TasmotaSensor.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass,
mqtt_mock,
caplog,
sensor.DOMAIN,
config,
discovery_update,
sensor_config,
"tasmota_dht11_temperature",
"Tasmota DHT11 Temperature",
)
async def test_discovery_device_remove(hass, mqtt_mock, setup_tasmota):
"""Test device registry remove."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config = copy.deepcopy(DEFAULT_SENSOR_CONFIG)
unique_id = f"{DEFAULT_CONFIG['mac']}_sensor_sensor_DHT11_Temperature"
await help_test_discovery_device_remove(
hass, mqtt_mock, sensor.DOMAIN, unique_id, config, sensor_config
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock, setup_tasmota):
"""Test MQTT subscriptions are managed when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config = copy.deepcopy(DEFAULT_SENSOR_CONFIG)
topics = [
get_topic_tele_sensor(config),
get_topic_stat_status(config, 10),
get_topic_tele_will(config),
]
await help_test_entity_id_update_subscriptions(
hass,
mqtt_mock,
sensor.DOMAIN,
config,
topics,
sensor_config,
"tasmota_dht11_temperature",
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test MQTT discovery update when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
sensor_config = copy.deepcopy(DEFAULT_SENSOR_CONFIG)
await help_test_entity_id_update_discovery_update(
hass,
mqtt_mock,
sensor.DOMAIN,
config,
sensor_config,
"tasmota_dht11_temperature",
)
|
aronsky/home-assistant
|
tests/components/tasmota/test_sensor.py
|
Python
|
apache-2.0
| 33,360
|
# Minimal support for git commands on an hg repository
#
# Copyright 2005, 2006 Chris Mason <mason@suse.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''browse the repository in a graphical way
The hgk extension allows browsing the history of a repository in a
graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
distributed with Mercurial.)
hgk consists of two parts: a Tcl script that does the displaying and
querying of information, and an extension to Mercurial named hgk.py,
which provides hooks for hgk to get information. hgk can be found in
the contrib directory, and the extension is shipped in the hgext
repository, and needs to be enabled.
The hg view command will launch the hgk Tcl script. For this command
to work, hgk must be in your search path. Alternately, you can specify
the path to hgk in your .hgrc file::
[hgk]
path=/location/of/hgk
hgk can make use of the extdiff extension to visualize revisions.
Assuming you had already configured extdiff vdiff command, just add::
[hgk]
vdiff=vdiff
Revisions context menu will now display additional entries to fire
vdiff on hovered and selected revisions.
'''
import os
from mercurial import commands, util, patch, revlog, cmdutil
from mercurial.node import nullid, nullrev, short
from mercurial.i18n import _
def difftree(ui, repo, node1=None, node2=None, *files, **opts):
"""diff trees from two commits"""
def __difftree(repo, node1, node2, files=[]):
assert node2 is not None
mmap = repo[node1].manifest()
mmap2 = repo[node2].manifest()
m = cmdutil.match(repo, files)
modified, added, removed = repo.status(node1, node2, m)[:3]
empty = short(nullid)
for f in modified:
# TODO get file permissions
ui.write(":100664 100664 %s %s M\t%s\t%s\n" %
(short(mmap[f]), short(mmap2[f]), f, f))
for f in added:
ui.write(":000000 100664 %s %s N\t%s\t%s\n" %
(empty, short(mmap2[f]), f, f))
for f in removed:
ui.write(":100664 000000 %s %s D\t%s\t%s\n" %
(short(mmap[f]), empty, f, f))
##
while True:
if opts['stdin']:
try:
line = raw_input().split(' ')
node1 = line[0]
if len(line) > 1:
node2 = line[1]
else:
node2 = None
except EOFError:
break
node1 = repo.lookup(node1)
if node2:
node2 = repo.lookup(node2)
else:
node2 = node1
node1 = repo.changelog.parents(node1)[0]
if opts['patch']:
if opts['pretty']:
catcommit(ui, repo, node2, "")
m = cmdutil.match(repo, files)
chunks = patch.diff(repo, node1, node2, match=m,
opts=patch.diffopts(ui, {'git': True}))
for chunk in chunks:
ui.write(chunk)
else:
__difftree(repo, node1, node2, files=files)
if not opts['stdin']:
break
def catcommit(ui, repo, n, prefix, ctx=None):
nlprefix = '\n' + prefix
if ctx is None:
ctx = repo[n]
ui.write("tree %s\n" % short(ctx.changeset()[0])) # use ctx.node() instead ??
for p in ctx.parents():
ui.write("parent %s\n" % p)
date = ctx.date()
description = ctx.description().replace("\0", "")
lines = description.splitlines()
if lines and lines[-1].startswith('committer:'):
committer = lines[-1].split(': ')[1].rstrip()
else:
committer = ctx.user()
ui.write("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1]))
ui.write("committer %s %s %s\n" % (committer, int(date[0]), date[1]))
ui.write("revision %d\n" % ctx.rev())
ui.write("branch %s\n\n" % ctx.branch())
if prefix != "":
ui.write("%s%s\n" % (prefix, description.replace('\n', nlprefix).strip()))
else:
ui.write(description + "\n")
if prefix:
ui.write('\0')
def base(ui, repo, node1, node2):
"""output common ancestor information"""
node1 = repo.lookup(node1)
node2 = repo.lookup(node2)
n = repo.changelog.ancestor(node1, node2)
ui.write(short(n) + "\n")
def catfile(ui, repo, type=None, r=None, **opts):
"""cat a specific revision"""
# in stdin mode, every line except the commit is prefixed with two
# spaces. This way the our caller can find the commit without magic
# strings
#
prefix = ""
if opts['stdin']:
try:
(type, r) = raw_input().split(' ')
prefix = " "
except EOFError:
return
else:
if not type or not r:
ui.warn(_("cat-file: type or revision not supplied\n"))
commands.help_(ui, 'cat-file')
while r:
if type != "commit":
ui.warn(_("aborting hg cat-file only understands commits\n"))
return 1
n = repo.lookup(r)
catcommit(ui, repo, n, prefix)
if opts['stdin']:
try:
(type, r) = raw_input().split(' ')
except EOFError:
break
else:
break
# git rev-tree is a confusing thing. You can supply a number of
# commit sha1s on the command line, and it walks the commit history
# telling you which commits are reachable from the supplied ones via
# a bitmask based on arg position.
# you can specify a commit to stop at by starting the sha1 with ^
def revtree(ui, args, repo, full="tree", maxnr=0, parents=False):
def chlogwalk():
count = len(repo)
i = count
l = [0] * 100
chunk = 100
while True:
if chunk > i:
chunk = i
i = 0
else:
i -= chunk
for x in xrange(chunk):
if i + x >= count:
l[chunk - x:] = [0] * (chunk - x)
break
if full != None:
l[x] = repo[i + x]
l[x].changeset() # force reading
else:
l[x] = 1
for x in xrange(chunk - 1, -1, -1):
if l[x] != 0:
yield (i + x, full != None and l[x] or None)
if i == 0:
break
# calculate and return the reachability bitmask for sha
def is_reachable(ar, reachable, sha):
if len(ar) == 0:
return 1
mask = 0
for i in xrange(len(ar)):
if sha in reachable[i]:
mask |= 1 << i
return mask
reachable = []
stop_sha1 = []
want_sha1 = []
count = 0
# figure out which commits they are asking for and which ones they
# want us to stop on
for i, arg in enumerate(args):
if arg.startswith('^'):
s = repo.lookup(arg[1:])
stop_sha1.append(s)
want_sha1.append(s)
elif arg != 'HEAD':
want_sha1.append(repo.lookup(arg))
# calculate the graph for the supplied commits
for i, n in enumerate(want_sha1):
reachable.append(set())
visit = [n]
reachable[i].add(n)
while visit:
n = visit.pop(0)
if n in stop_sha1:
continue
for p in repo.changelog.parents(n):
if p not in reachable[i]:
reachable[i].add(p)
visit.append(p)
if p in stop_sha1:
continue
# walk the repository looking for commits that are in our
# reachability graph
for i, ctx in chlogwalk():
n = repo.changelog.node(i)
mask = is_reachable(want_sha1, reachable, n)
if mask:
parentstr = ""
if parents:
pp = repo.changelog.parents(n)
if pp[0] != nullid:
parentstr += " " + short(pp[0])
if pp[1] != nullid:
parentstr += " " + short(pp[1])
if not full:
ui.write("%s%s\n" % (short(n), parentstr))
elif full == "commit":
ui.write("%s%s\n" % (short(n), parentstr))
catcommit(ui, repo, n, ' ', ctx)
else:
(p1, p2) = repo.changelog.parents(n)
(h, h1, h2) = map(short, (n, p1, p2))
(i1, i2) = map(repo.changelog.rev, (p1, p2))
date = ctx.date()[0]
ui.write("%s %s:%s" % (date, h, mask))
mask = is_reachable(want_sha1, reachable, p1)
if i1 != nullrev and mask > 0:
ui.write("%s:%s " % (h1, mask)),
mask = is_reachable(want_sha1, reachable, p2)
if i2 != nullrev and mask > 0:
ui.write("%s:%s " % (h2, mask))
ui.write("\n")
if maxnr and count >= maxnr:
break
count += 1
def revparse(ui, repo, *revs, **opts):
"""parse given revisions"""
def revstr(rev):
if rev == 'HEAD':
rev = 'tip'
return revlog.hex(repo.lookup(rev))
for r in revs:
revrange = r.split(':', 1)
ui.write('%s\n' % revstr(revrange[0]))
if len(revrange) == 2:
ui.write('^%s\n' % revstr(revrange[1]))
# git rev-list tries to order things by date, and has the ability to stop
# at a given commit without walking the whole repo. TODO add the stop
# parameter
def revlist(ui, repo, *revs, **opts):
"""print revisions"""
if opts['header']:
full = "commit"
else:
full = None
copy = [x for x in revs]
revtree(ui, copy, repo, full, opts['max_count'], opts['parents'])
def config(ui, repo, **opts):
"""print extension options"""
def writeopt(name, value):
ui.write('k=%s\nv=%s\n' % (name, value))
writeopt('vdiff', ui.config('hgk', 'vdiff', ''))
def view(ui, repo, *etc, **opts):
"start interactive history viewer"
os.chdir(repo.root)
optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v])
cmd = ui.config("hgk", "path", "hgk") + " %s %s" % (optstr, " ".join(etc))
ui.debug("running %s\n" % cmd)
util.system(cmd)
cmdtable = {
"^view":
(view,
[('l', 'limit', '', _('limit number of changes displayed'))],
_('hg view [-l LIMIT] [REVRANGE]')),
"debug-diff-tree":
(difftree,
[('p', 'patch', None, _('generate patch')),
('r', 'recursive', None, _('recursive')),
('P', 'pretty', None, _('pretty')),
('s', 'stdin', None, _('stdin')),
('C', 'copy', None, _('detect copies')),
('S', 'search', "", _('search'))],
_('hg git-diff-tree [OPTION]... NODE1 NODE2 [FILE]...')),
"debug-cat-file":
(catfile,
[('s', 'stdin', None, _('stdin'))],
_('hg debug-cat-file [OPTION]... TYPE FILE')),
"debug-config":
(config, [], _('hg debug-config')),
"debug-merge-base":
(base, [], _('hg debug-merge-base REV REV')),
"debug-rev-parse":
(revparse,
[('', 'default', '', _('ignored'))],
_('hg debug-rev-parse REV')),
"debug-rev-list":
(revlist,
[('H', 'header', None, _('header')),
('t', 'topo-order', None, _('topo-order')),
('p', 'parents', None, _('parents')),
('n', 'max-count', 0, _('max-count'))],
_('hg debug-rev-list [OPTION]... REV...')),
}
|
joewalnes/idea-community
|
plugins/hg4idea/testData/bin/hgext/hgk.py
|
Python
|
apache-2.0
| 11,681
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import copy
import re
"""
Created on July 12, 2018
@author: wangc
"""
def _deweird(s):
"""
Sometimes numpy loadtxt returns strings like "b'stuff'"
This converts them to "stuff"
@ In, s, str, possibly weird string
@ Out, _deweird, str, possibly less weird string
"""
if type(s) == str and s.startswith("b'") and s.endswith("'"):
return s[2:-1]
else:
return s
class SaphireData:
"""
Class that parses output of SAPHIRE outputs and write a RAVEN compatible CSV
"""
def __init__(self, outFiles):
"""
Initialize the class
@ In, outFiles, list, list of output files of SAPHIRE
@ Out, None
"""
self.headerNames = [] # list of variable names in SAPHIRE output files
self.outData = [] # list of variable values in SAPHIRE output files
for outFile in outFiles:
outFileName, outFileType = outFile[0], outFile[1]
if outFileType == 'uncertainty':
headers, data = self.getUncertainty(outFileName)
self.headerNames.extend(headers)
self.outData.extend(data)
elif outFileType == 'importance':
headers, data = self.getImportance(outFileName)
self.headerNames.extend(headers)
self.outData.extend(data)
elif outFileType == 'quantiles':
print("File:",outFileName, "with type", outFileType, "is not implemented yet! Skipping" )
pass
else:
raise IOError('The output file', outFileName, 'with type', outFileType, 'is not supported yet!')
def getUncertainty(self, outName):
"""
Method to extract the uncertainty information of Event Tree or Fault Tree from SAPHIRE output files
@ In, outName, string, the name of output file
@ Out, (headerNames,outData), tuple, where headerNames is a list of output variable names and
outData is a list of output variable values
"""
headerNames = []
outData = []
outFile = os.path.abspath(os.path.expanduser(outName))
data = np.loadtxt(outFile, dtype=object, delimiter=',', skiprows=2)
headers = data[0]
for i in range(1, len(data)):
for j in range(1, len(headers)):
name = _deweird(data[i,0]).strip().replace(" ", "~")
header = _deweird(headers[j]).strip().replace(" ", "~")
headerNames.append(name + '_' + header)
outData.append(float(_deweird(data[i,j])))
return headerNames, outData
def getImportance(self, outName):
"""
Method to extract the importance information of Fault Tree from SAPHIRE output files
@ In, outName, string, the name of output file
@ Out, headerNames, list, list of output variable names
@ Out, outData, list, list of output variable values
"""
headerNames = []
outData = []
outFile = os.path.abspath(os.path.expanduser(outName))
data = np.loadtxt(outFile, dtype=object, delimiter=',', skiprows=2)
headers = data[0]
for i in range(1, len(data)):
for j in range(1, len(headers)):
name = _deweird(data[i,0]).strip().replace(" ", "~")
header = _deweird(headers[j]).strip().replace(" ", "~")
headerNames.append(name + '_' + header)
outData.append(float(_deweird(data[i,j])))
return headerNames, outData
def writeCSV(self, output):
"""
Print data into CSV format
@ In, output, str, the name of output file
@ Out, None
"""
outObj = open(output.strip()+".csv", mode='w+b') if not output.endswith('csv') else open(output.strip(), mode='w+b')
# create string for header names
headerString = ",".join(self.headerNames)
# write & save array as csv file
# FIXME: There is a problem with the numpy.savetxt, if provided data is 1D array_like, the demiliter will be
# ignored, and out file format is not correct
np.savetxt(outObj, [self.outData], delimiter=',', header=headerString, comments='')
outObj.close()
|
idaholab/raven
|
framework/CodeInterfaces/Saphire/SaphireData.py
|
Python
|
apache-2.0
| 4,508
|
"""Support for BMW car locks with BMW ConnectedDrive."""
import logging
from homeassistant.components.bmw_connected_drive import DOMAIN as BMW_DOMAIN
from homeassistant.components.lock import LockDevice
from homeassistant.const import STATE_LOCKED, STATE_UNLOCKED
DEPENDENCIES = ['bmw_connected_drive']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BMW Connected Drive lock."""
accounts = hass.data[BMW_DOMAIN]
_LOGGER.debug('Found BMW accounts: %s',
', '.join([a.name for a in accounts]))
devices = []
for account in accounts:
if not account.read_only:
for vehicle in account.account.vehicles:
device = BMWLock(account, vehicle, 'lock', 'BMW lock')
devices.append(device)
add_entities(devices, True)
class BMWLock(LockDevice):
"""Representation of a BMW vehicle lock."""
def __init__(self, account, vehicle, attribute: str, sensor_name):
"""Initialize the lock."""
self._account = account
self._vehicle = vehicle
self._attribute = attribute
self._name = '{} {}'.format(self._vehicle.name, self._attribute)
self._unique_id = '{}-{}'.format(self._vehicle.vin, self._attribute)
self._sensor_name = sensor_name
self._state = None
@property
def should_poll(self):
"""Do not poll this class.
Updates are triggered from BMWConnectedDriveAccount.
"""
return False
@property
def unique_id(self):
"""Return the unique ID of the lock."""
return self._unique_id
@property
def name(self):
"""Return the name of the lock."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the lock."""
vehicle_state = self._vehicle.state
return {
'car': self._vehicle.name,
'door_lock_state': vehicle_state.door_lock_state.value
}
@property
def is_locked(self):
"""Return true if lock is locked."""
return self._state == STATE_LOCKED
def lock(self, **kwargs):
"""Lock the car."""
_LOGGER.debug("%s: locking doors", self._vehicle.name)
# Optimistic state set here because it takes some time before the
# update callback response
self._state = STATE_LOCKED
self.schedule_update_ha_state()
self._vehicle.remote_services.trigger_remote_door_lock()
def unlock(self, **kwargs):
"""Unlock the car."""
_LOGGER.debug("%s: unlocking doors", self._vehicle.name)
# Optimistic state set here because it takes some time before the
# update callback response
self._state = STATE_UNLOCKED
self.schedule_update_ha_state()
self._vehicle.remote_services.trigger_remote_door_unlock()
def update(self):
"""Update state of the lock."""
from bimmer_connected.state import LockState
_LOGGER.debug("%s: updating data for %s", self._vehicle.name,
self._attribute)
vehicle_state = self._vehicle.state
# Possible values: LOCKED, SECURED, SELECTIVE_LOCKED, UNLOCKED
self._state = STATE_LOCKED \
if vehicle_state.door_lock_state \
in [LockState.LOCKED, LockState.SECURED] \
else STATE_UNLOCKED
def update_callback(self):
"""Schedule a state update."""
self.schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Add callback after being added to hass.
Show latest data after startup.
"""
self._account.add_update_listener(self.update_callback)
|
nugget/home-assistant
|
homeassistant/components/bmw_connected_drive/lock.py
|
Python
|
apache-2.0
| 3,775
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common array methods."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import functools
import math
import numbers
import numpy as np
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import manip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sort_ops
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.ops.numpy_ops import np_dtypes
from tensorflow.python.ops.numpy_ops import np_export
from tensorflow.python.ops.numpy_ops import np_utils
from tensorflow.python.util import nest
newaxis = np_export.np_export_constant(__name__, 'newaxis', np.newaxis)
@np_utils.np_doc('empty')
def empty(shape, dtype=float): # pylint: disable=redefined-outer-name
return zeros(shape, dtype)
@np_utils.np_doc('empty_like')
def empty_like(a, dtype=None):
return zeros_like(a, dtype)
@np_utils.np_doc('zeros')
def zeros(shape, dtype=float): # pylint: disable=redefined-outer-name
dtype = (
np_utils.result_type(dtype) if dtype else np_dtypes.default_float_type())
return array_ops.zeros(shape, dtype=dtype)
@np_utils.np_doc('zeros_like')
def zeros_like(a, dtype=None): # pylint: disable=missing-docstring
if dtype is None:
# We need to let np_utils.result_type decide the dtype, not tf.zeros_like
dtype = np_utils.result_type(a)
else:
# TF and numpy has different interpretations of Python types such as
# `float`, so we let `np_utils.result_type` decide.
dtype = np_utils.result_type(dtype)
dtype = dtypes.as_dtype(dtype) # Work around b/149877262
return array_ops.zeros_like(a, dtype)
@np_utils.np_doc('ones')
def ones(shape, dtype=float): # pylint: disable=redefined-outer-name
if dtype:
dtype = np_utils.result_type(dtype)
return array_ops.ones(shape, dtype=dtype)
@np_utils.np_doc('ones_like')
def ones_like(a, dtype=None):
if dtype is None:
dtype = np_utils.result_type(a)
else:
dtype = np_utils.result_type(dtype)
return array_ops.ones_like(a, dtype)
@np_utils.np_doc('eye')
def eye(N, M=None, k=0, dtype=float): # pylint: disable=invalid-name,missing-docstring
if dtype:
dtype = np_utils.result_type(dtype)
if not M:
M = N
# Making sure N, M and k are `int`
N = int(N)
M = int(M)
k = int(k)
if k >= M or -k >= N:
# tf.linalg.diag will raise an error in this case
return zeros([N, M], dtype=dtype)
if k == 0:
return linalg_ops.eye(N, M, dtype=dtype)
# We need the precise length, otherwise tf.linalg.diag will raise an error
diag_len = min(N, M)
if k > 0:
if N >= M:
diag_len -= k
elif N + k > M:
diag_len = M - k
elif k <= 0:
if M >= N:
diag_len += k
elif M - k > N:
diag_len = N + k
diagonal_ = array_ops.ones([diag_len], dtype=dtype)
return array_ops.matrix_diag(diagonal=diagonal_, num_rows=N, num_cols=M, k=k)
@np_utils.np_doc('identity')
def identity(n, dtype=float):
return eye(N=n, M=n, dtype=dtype)
@np_utils.np_doc('full')
def full(shape, fill_value, dtype=None): # pylint: disable=redefined-outer-name
if not isinstance(shape, np_arrays.ndarray):
shape = asarray(np_arrays.convert_to_tensor(shape, dtype_hint=np.int32))
shape = atleast_1d(shape)
fill_value = asarray(fill_value, dtype=dtype)
return array_ops.broadcast_to(fill_value, shape)
# Using doc only here since np full_like signature doesn't seem to have the
# shape argument (even though it exists in the documentation online).
@np_utils.np_doc_only('full_like')
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): # pylint: disable=missing-docstring,redefined-outer-name
"""order, subok and shape arguments mustn't be changed."""
if order != 'K':
raise ValueError('Non-standard orders are not supported.')
if not subok:
raise ValueError('subok being False is not supported.')
if shape:
raise ValueError('Overriding the shape is not supported.')
a = asarray(a)
dtype = dtype or np_utils.result_type(a)
fill_value = asarray(fill_value, dtype=dtype)
return array_ops.broadcast_to(fill_value, array_ops.shape(a))
def _array_internal(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name
"""Main implementation of np.array()."""
result_t = val
if not isinstance(result_t, ops.Tensor):
if not dtype:
dtype = np_utils.result_type(result_t)
# We can't call `convert_to_tensor(result_t, dtype=dtype)` here because
# convert_to_tensor doesn't allow incompatible arguments such as (5.5, int)
# while np.array allows them. We need to convert-then-cast.
# EagerTensor conversion complains about "mixed types" when converting
# tensors with no dtype information. This is because it infers types based
# on one selected item in the list. So e.g. when converting [2., 2j]
# to a tensor, it will select float32 as the inferred type and not be able
# to convert the list to a float 32 tensor.
# Since we have some information about the final dtype we care about, we
# supply that information so that convert_to_tensor will do best-effort
# conversion to that dtype first.
result_t = np_arrays.convert_to_tensor(result_t, dtype_hint=dtype)
result_t = math_ops.cast(result_t, dtype=dtype)
elif dtype:
result_t = math_ops.cast(result_t, dtype)
if copy:
result_t = array_ops.identity(result_t)
if ndmin == 0:
return result_t
ndims = array_ops.rank(result_t)
def true_fn():
old_shape = array_ops.shape(result_t)
new_shape = array_ops.concat(
[array_ops.ones(ndmin - ndims, dtypes.int32), old_shape], axis=0)
return array_ops.reshape(result_t, new_shape)
result_t = np_utils.cond(
np_utils.greater(ndmin, ndims), true_fn, lambda: result_t)
return result_t
# TODO(wangpeng): investigate whether we can make `copy` default to False.
# pylint: disable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-doc-return-or-yield,g-doc-args
@np_utils.np_doc_only('array')
def array(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name
"""Since Tensors are immutable, a copy is made only if val is placed on a
different device than the current one. Even if `copy` is False, a new Tensor
may need to be built to satisfy `dtype` and `ndim`. This is used only if `val`
is an ndarray or a Tensor.
""" # pylint:disable=g-docstring-missing-newline
if dtype:
dtype = np_utils.result_type(dtype)
return _array_internal(val, dtype, copy, ndmin)
# pylint: enable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-doc-return-or-yield,g-doc-args
@np_utils.np_doc('asarray')
def asarray(a, dtype=None):
if dtype:
dtype = np_utils.result_type(dtype)
if isinstance(a, np_arrays.ndarray) and (
not dtype or dtype == a.dtype.as_numpy_dtype):
return a
return array(a, dtype, copy=False)
@np_utils.np_doc('asanyarray')
def asanyarray(a, dtype=None):
return asarray(a, dtype)
@np_utils.np_doc('ascontiguousarray')
def ascontiguousarray(a, dtype=None):
return array(a, dtype, ndmin=1)
# Numerical ranges.
@np_utils.np_doc('arange')
def arange(start, stop=None, step=1, dtype=None):
"""Returns `step`-separated values in the range [start, stop).
Args:
start: Start of the interval. Included in the range.
stop: End of the interval. If not specified, `start` is treated as 0 and
`start` value is used as `stop`. If specified, it is not included in the
range if `step` is integer. When `step` is floating point, it may or may
not be included.
step: The difference between 2 consecutive values in the output range. It is
recommended to use `linspace` instead of using non-integer values for
`step`.
dtype: Optional. Type of the resulting ndarray. Could be a python type, a
NumPy type or a TensorFlow `DType`. If not provided, the largest type of
`start`, `stop`, `step` is used.
Raises:
ValueError: If step is zero.
"""
if not step:
raise ValueError('step must be non-zero.')
if dtype:
dtype = np_utils.result_type(dtype)
else:
if stop is None:
dtype = np_utils.result_type(start, step)
else:
dtype = np_utils.result_type(start, step, stop)
if step > 0 and ((stop is not None and start > stop) or
(stop is None and start < 0)):
return array([], dtype=dtype)
if step < 0 and ((stop is not None and start < stop) or
(stop is None and start > 0)):
return array([], dtype=dtype)
# TODO(srbs): There are some bugs when start or stop is float type and dtype
# is integer type.
return math_ops.cast(
math_ops.range(start, limit=stop, delta=step), dtype=dtype)
# Building matrices.
@np_utils.np_doc('diag')
def diag(v, k=0): # pylint: disable=missing-docstring
"""Raises an error if input is not 1- or 2-d."""
v = asarray(v)
v_rank = array_ops.rank(v)
v.shape.with_rank_at_most(2)
# TODO(nareshmodi): Consider a np_utils.Assert version that will fail during
# tracing time if the shape is known.
control_flow_ops.Assert(
np_utils.logical_or(math_ops.equal(v_rank, 1), math_ops.equal(v_rank, 2)),
[v_rank])
def _diag(v, k):
return np_utils.cond(
math_ops.equal(array_ops.size(v), 0),
lambda: array_ops.zeros([abs(k), abs(k)], dtype=v.dtype),
lambda: array_ops.matrix_diag(v, k=k))
def _diag_part(v, k):
v_shape = array_ops.shape(v)
v, k = np_utils.cond(
np_utils.logical_or(
np_utils.less_equal(k, -1 * np_utils.getitem(v_shape, 0)),
np_utils.greater_equal(k, np_utils.getitem(v_shape, 1)),
), lambda: (array_ops.zeros([0, 0], dtype=v.dtype), 0), lambda: (v, k))
result = array_ops.matrix_diag_part(v, k=k)
return result
result = np_utils.cond(
math_ops.equal(v_rank, 1), lambda: _diag(v, k), lambda: _diag_part(v, k))
return result
@np_utils.np_doc('diagonal')
def diagonal(a, offset=0, axis1=0, axis2=1): # pylint: disable=missing-docstring
a = asarray(a)
maybe_rank = a.shape.rank
if maybe_rank is not None and offset == 0 and (
axis1 == maybe_rank - 2 or axis1 == -2) and (axis2 == maybe_rank - 1 or
axis2 == -1):
return array_ops.matrix_diag_part(a)
a = moveaxis(a, (axis1, axis2), (-2, -1))
a_shape = array_ops.shape(a)
def _zeros(): # pylint: disable=missing-docstring
return (array_ops.zeros(
array_ops.concat([a_shape[:-1], [0]], 0), dtype=a.dtype), 0)
# All zeros since diag_part doesn't handle all possible k (aka offset).
# Written this way since cond will run shape inference on both branches,
# and diag_part shape inference will fail when offset is out of bounds.
a, offset = np_utils.cond(
np_utils.logical_or(
np_utils.less_equal(offset, -1 * np_utils.getitem(a_shape, -2)),
np_utils.greater_equal(offset, np_utils.getitem(a_shape, -1)),
), _zeros, lambda: (a, offset))
a = array_ops.matrix_diag_part(a, k=offset)
return a
@np_utils.np_doc('diagflat')
def diagflat(v, k=0):
v = asarray(v)
return diag(array_ops.reshape(v, [-1]), k)
def _promote_dtype(*arrays):
dtype = np_utils.result_type(*arrays)
def _fast_asarray(a):
if isinstance(a, np_arrays.ndarray) and dtype == a.dtype.as_numpy_dtype:
return a
return _array_internal(a, dtype=dtype, copy=False)
return [_fast_asarray(a) for a in arrays]
def _promote_dtype_binary(t1, t2):
dtype = np_utils._result_type_binary(t1, t2) # pylint: disable=protected-access
if not(
isinstance(t1, np_arrays.ndarray) and dtype == t1.dtype.as_numpy_dtype):
t1 = _array_internal(t1, dtype=dtype, copy=False)
if not(
isinstance(t2, np_arrays.ndarray) and dtype == t2.dtype.as_numpy_dtype):
t2 = _array_internal(t2, dtype=dtype, copy=False)
return t1, t2
@np_utils.np_doc('all')
def all(a, axis=None, keepdims=None): # pylint: disable=redefined-builtin
a = asarray(a, dtype=bool)
return math_ops.reduce_all(input_tensor=a, axis=axis, keepdims=keepdims)
@np_utils.np_doc('any')
def any(a, axis=None, keepdims=None): # pylint: disable=redefined-builtin
a = asarray(a, dtype=bool)
return math_ops.reduce_any(input_tensor=a, axis=axis, keepdims=keepdims)
@np_utils.np_doc('compress')
def compress(condition, a, axis=None): # pylint: disable=redefined-outer-name,missing-function-docstring
condition = asarray(condition, dtype=bool)
a = asarray(a)
if condition.ndim != 1:
raise ValueError('condition must be a 1-d array.')
# `np.compress` treats scalars as 1-d arrays.
if a.ndim == 0:
a = ravel(a)
if axis is None:
a = ravel(a)
axis = 0
if axis < 0:
axis += a.ndim
assert axis >= 0 and axis < a.ndim
# `tf.boolean_mask` requires the first dimensions of array and condition to
# match. `np.compress` pads condition with False when it is shorter.
condition_t = condition
a_t = a
if condition.shape[0] < a.shape[axis]:
padding = array_ops.fill([a.shape[axis] - condition.shape[0]], False)
condition_t = array_ops.concat([condition_t, padding], axis=0)
return array_ops.boolean_mask(tensor=a_t, mask=condition_t, axis=axis)
@np_utils.np_doc('copy')
def copy(a):
return array(a, copy=True)
def _maybe_promote_to_int(a):
if dtypes.as_dtype(a.dtype).is_integer:
# If a is an integer type and its precision is less than that of `int`,
# the output type will be `int`.
a_numpy_dtype = a.dtype.as_numpy_dtype
output_type = np.promote_types(a_numpy_dtype, int)
if output_type != a_numpy_dtype:
a = asarray(a, dtype=output_type)
return a
@np_utils.np_doc('cumprod')
def cumprod(a, axis=None, dtype=None): # pylint: disable=missing-docstring
a = asarray(a, dtype=dtype)
if dtype is None:
a = _maybe_promote_to_int(a)
# If axis is None, the input is flattened.
if axis is None:
a = ravel(a)
axis = 0
elif axis < 0:
axis += array_ops.rank(a)
return math_ops.cumprod(a, axis)
@np_utils.np_doc('cumsum')
def cumsum(a, axis=None, dtype=None): # pylint: disable=missing-docstring
a = asarray(a, dtype=dtype)
if dtype is None:
a = _maybe_promote_to_int(a)
# If axis is None, the input is flattened.
if axis is None:
a = ravel(a)
axis = 0
elif axis < 0:
axis += array_ops.rank(a)
return math_ops.cumsum(a, axis)
@np_utils.np_doc('imag')
def imag(val):
val = asarray(val)
# TODO(srbs): np.imag returns a scalar if `val` is a scalar, whereas we always
# return an ndarray.
return math_ops.imag(val)
_TO_INT_ = 0
_TO_FLOAT = 1
def _reduce(tf_fn,
a,
axis=None,
dtype=None,
keepdims=None,
promote_int=_TO_INT_,
tf_bool_fn=None,
preserve_bool=False):
"""A general reduction function.
Args:
tf_fn: the TF reduction function.
a: the array to be reduced.
axis: (optional) the axis along which to do the reduction. If None, all
dimensions are reduced.
dtype: (optional) the dtype of the result.
keepdims: (optional) whether to keep the reduced dimension(s).
promote_int: how to promote integer and bool inputs. There are three
choices. (1) `_TO_INT_` always promotes them to np.int_ or np.uint; (2)
`_TO_FLOAT` always promotes them to a float type (determined by
dtypes.default_float_type); (3) None: don't promote.
tf_bool_fn: (optional) the TF reduction function for bool inputs. It will
only be used if `dtype` is explicitly set to `np.bool_` or if `a`'s dtype
is `np.bool_` and `preserve_bool` is True.
preserve_bool: a flag to control whether to use `tf_bool_fn` if `a`'s dtype
is `np.bool_` (some reductions such as np.sum convert bools to integers,
while others such as np.max preserve bools.
Returns:
An ndarray.
"""
if dtype:
dtype = np_utils.result_type(dtype)
if keepdims is None:
keepdims = False
a = asarray(a, dtype=dtype)
if ((dtype == np.bool_ or preserve_bool and a.dtype == np.bool_) and
tf_bool_fn is not None):
return tf_bool_fn(input_tensor=a, axis=axis, keepdims=keepdims)
if dtype is None:
dtype = a.dtype.as_numpy_dtype
if np.issubdtype(dtype, np.integer) or dtype == np.bool_:
if promote_int == _TO_INT_:
# If a is an integer/bool type and whose bit width is less than np.int_,
# numpy up-casts it to np.int_ based on the documentation at
# https://numpy.org/doc/1.18/reference/generated/numpy.sum.html
if dtype == np.bool_:
is_signed = True
width = 8 # We can use any number here that is less than 64
else:
is_signed = np.issubdtype(dtype, np.signedinteger)
width = np.iinfo(dtype).bits
# Numpy int_ and uint are defined as 'long' and 'unsigned long', so
# should have the same bit width.
if width < np.iinfo(np.int_).bits:
if is_signed:
dtype = np.int_
else:
dtype = np.uint
a = math_ops.cast(a, dtype)
elif promote_int == _TO_FLOAT:
a = math_ops.cast(a, np_dtypes.default_float_type())
if isinstance(axis, ops.Tensor) and axis.dtype not in (
dtypes.int32, dtypes.int64):
axis = math_ops.cast(axis, dtypes.int64)
return tf_fn(input_tensor=a, axis=axis, keepdims=keepdims)
# TODO (DarrenZhang01): Add `axis` support to the `size` API.
@np_utils.np_doc('size')
def size(x, axis=None): # pylint: disable=missing-docstring
if axis is not None:
raise NotImplementedError('axis argument is not supported in the current '
'`np.size` implementation')
if isinstance(x, (int, float, np.int32, np.int64, np.float32, np.float64)):
return 1
x = asarray(x)
if x.shape.is_fully_defined():
return np.prod(x.shape.as_list(), dtype=int)
else:
return array_ops.size_v2(x)
@np_utils.np_doc('sum')
def sum(a, axis=None, dtype=None, keepdims=None): # pylint: disable=redefined-builtin
return _reduce(
math_ops.reduce_sum,
a,
axis=axis,
dtype=dtype,
keepdims=keepdims,
tf_bool_fn=math_ops.reduce_any)
@np_utils.np_doc('prod')
def prod(a, axis=None, dtype=None, keepdims=None):
return _reduce(
math_ops.reduce_prod,
a,
axis=axis,
dtype=dtype,
keepdims=keepdims,
tf_bool_fn=math_ops.reduce_all)
@np_utils.np_doc('mean', unsupported_params=['out'])
def mean(a, axis=None, dtype=None, out=None, keepdims=None):
if out is not None:
raise ValueError('Setting out is not supported.')
return _reduce(
math_ops.reduce_mean,
a,
axis=axis,
dtype=dtype,
keepdims=keepdims,
promote_int=_TO_FLOAT)
@np_utils.np_doc('amax', unsupported_params=['out'])
def amax(a, axis=None, out=None, keepdims=None):
if out is not None:
raise ValueError('Setting out is not supported.')
return _reduce(
math_ops.reduce_max,
a,
axis=axis,
dtype=None,
keepdims=keepdims,
promote_int=None,
tf_bool_fn=math_ops.reduce_any,
preserve_bool=True)
@np_utils.np_doc('amin', unsupported_params=['out'])
def amin(a, axis=None, out=None, keepdims=None):
if out is not None:
raise ValueError('Setting out is not supported.')
return _reduce(
math_ops.reduce_min,
a,
axis=axis,
dtype=None,
keepdims=keepdims,
promote_int=None,
tf_bool_fn=math_ops.reduce_all,
preserve_bool=True)
@np_utils.np_doc('var')
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=None): # pylint: disable=missing-docstring
if dtype:
working_dtype = np_utils.result_type(a, dtype)
else:
working_dtype = None
if out is not None:
raise ValueError('Setting out is not supported.')
if ddof != 0:
# TF reduce_variance doesn't support ddof, so calculate it using raw ops.
def reduce_fn(input_tensor, axis, keepdims):
means = math_ops.reduce_mean(input_tensor, axis=axis, keepdims=True)
centered = input_tensor - means
if input_tensor.dtype in (dtypes.complex64, dtypes.complex128):
centered = math_ops.cast(
math_ops.real(centered * math_ops.conj(centered)),
input_tensor.dtype)
else:
centered = math_ops.square(centered)
squared_deviations = math_ops.reduce_sum(
centered, axis=axis, keepdims=keepdims)
if axis is None:
n = array_ops.size(input_tensor)
else:
if axis < 0:
axis += array_ops.rank(input_tensor)
n = math_ops.reduce_prod(
array_ops.gather(array_ops.shape(input_tensor), axis))
n = math_ops.cast(n - ddof, input_tensor.dtype)
return math_ops.cast(math_ops.divide(squared_deviations, n), dtype)
else:
reduce_fn = math_ops.reduce_variance
result = _reduce(
reduce_fn,
a,
axis=axis,
dtype=working_dtype,
keepdims=keepdims,
promote_int=_TO_FLOAT)
if dtype:
result = math_ops.cast(result, dtype)
return result
@np_utils.np_doc('std')
def std(a, axis=None, keepdims=None): # pylint: disable=missing-function-docstring
return _reduce(
math_ops.reduce_std,
a,
axis=axis,
dtype=None,
keepdims=keepdims,
promote_int=_TO_FLOAT)
@np_utils.np_doc('ravel')
def ravel(a): # pylint: disable=missing-docstring
a = asarray(a)
return array_ops.reshape(a, [-1])
@np_utils.np_doc('real')
def real(val):
val = asarray(val)
# TODO(srbs): np.real returns a scalar if val is a scalar, whereas we always
# return an ndarray.
return math_ops.real(val)
@np_utils.np_doc('repeat')
def repeat(a, repeats, axis=None): # pylint: disable=missing-docstring
a = asarray(a)
original_shape = a._shape_as_list() # pylint: disable=protected-access
# Best effort recovery of the shape.
known_shape = original_shape is not None and None not in original_shape
if known_shape:
if not original_shape:
original_shape = (repeats,)
else:
repeats_np = np.ravel(np.array(repeats))
if repeats_np.size == 1:
repeats_np = repeats_np.item()
if axis is None:
original_shape = (repeats_np * np.prod(original_shape),)
else:
original_shape[axis] = repeats_np * original_shape[axis]
else:
if axis is None:
original_shape = (repeats_np.sum(),)
else:
original_shape[axis] = repeats_np.sum()
repeats = asarray(repeats)
result = array_ops.repeat(a, repeats, axis)
if known_shape:
result.set_shape(original_shape)
return result
@np_utils.np_doc('around')
def around(a, decimals=0): # pylint: disable=missing-docstring
a = asarray(a)
dtype = a.dtype.as_numpy_dtype
factor = math.pow(10, decimals)
if np.issubdtype(dtype, np.inexact):
factor = math_ops.cast(factor, dtype)
else:
# Use float as the working dtype when a.dtype is exact (e.g. integer),
# because `decimals` can be negative.
float_dtype = np_dtypes.default_float_type()
a = a.astype(float_dtype)
factor = math_ops.cast(factor, float_dtype)
a = math_ops.multiply(a, factor)
a = math_ops.round(a)
a = math_ops.divide(a, factor)
return a.astype(dtype)
setattr(np_arrays.ndarray, '__round__', around)
@np_utils.np_doc('reshape')
def reshape(a, newshape, order='C'):
"""order argument can only b 'C' or 'F'."""
if order not in {'C', 'F'}:
raise ValueError('Unsupported order argument {}'.format(order))
a = asarray(a)
if isinstance(newshape, int):
newshape = [newshape]
if order == 'F':
r = array_ops.transpose(
array_ops.reshape(array_ops.transpose(a), newshape[::-1]))
else:
r = array_ops.reshape(a, newshape)
return r
def _reshape_method_wrapper(a, *newshape, **kwargs):
order = kwargs.pop('order', 'C')
if kwargs:
raise ValueError('Unsupported arguments: {}'.format(kwargs.keys()))
if len(newshape) == 1 and not isinstance(newshape[0], int):
newshape = newshape[0]
return reshape(a, newshape, order=order)
@np_utils.np_doc('expand_dims')
def expand_dims(a, axis):
a = asarray(a)
return array_ops.expand_dims(a, axis=axis)
@np_utils.np_doc('squeeze')
def squeeze(a, axis=None):
a = asarray(a)
return array_ops.squeeze(a, axis)
@np_utils.np_doc('transpose')
def transpose(a, axes=None):
a = asarray(a)
if axes is not None:
axes = asarray(axes)
return array_ops.transpose(a=a, perm=axes)
@np_utils.np_doc('swapaxes')
def swapaxes(a, axis1, axis2): # pylint: disable=missing-docstring
a = asarray(a)
def adjust_axes(axes, rank):
def f(x):
if isinstance(x, int):
if x < 0:
x = x + rank
else:
x = array_ops.where_v2(x < 0, np_utils.add(x, a_rank), x)
return x
return nest.map_structure(f, axes)
if (a.shape.rank is not None and
isinstance(axis1, int) and isinstance(axis2, int)):
# This branch makes sure `perm` is statically known, to avoid a
# not-compile-time-constant XLA error.
a_rank = a.shape.rank
axis1, axis2 = adjust_axes((axis1, axis2), a_rank)
perm = list(range(a_rank))
perm[axis1] = axis2
perm[axis2] = axis1
else:
a_rank = array_ops.rank(a)
axis1, axis2 = adjust_axes((axis1, axis2), a_rank)
perm = math_ops.range(a_rank)
perm = array_ops.tensor_scatter_update(perm, [[axis1], [axis2]],
[axis2, axis1])
a = array_ops.transpose(a, perm)
return a
@np_utils.np_doc('moveaxis')
def moveaxis(a, source, destination): # pylint: disable=missing-docstring
"""Raises ValueError if source, destination not in (-ndim(a), ndim(a))."""
if not source and not destination:
return a
a = asarray(a)
if isinstance(source, int):
source = (source,)
if isinstance(destination, int):
destination = (destination,)
if len(source) != len(destination):
raise ValueError('The lengths of source and destination must equal')
a_rank = np_utils._maybe_static(array_ops.rank(a)) # pylint: disable=protected-access
def _correct_axis(axis, rank):
if axis < 0:
return axis + rank
return axis
source = tuple(_correct_axis(axis, a_rank) for axis in source)
destination = tuple(_correct_axis(axis, a_rank) for axis in destination)
if a.shape.rank is not None:
perm = [i for i in range(a_rank) if i not in source]
for dest, src in sorted(zip(destination, source)):
assert dest <= len(perm)
perm.insert(dest, src)
else:
r = math_ops.range(a_rank)
def _remove_indices(a, b):
"""Remove indices (`b`) from `a`."""
items = array_ops.unstack(sort_ops.sort(array_ops.stack(b)), num=len(b))
i = 0
result = []
for item in items:
result.append(a[i:item])
i = item + 1
result.append(a[i:])
return array_ops.concat(result, 0)
minus_sources = _remove_indices(r, source)
minus_dest = _remove_indices(r, destination)
perm = array_ops.scatter_nd(
array_ops.expand_dims(minus_dest, 1), minus_sources, [a_rank])
perm = array_ops.tensor_scatter_update(
perm, array_ops.expand_dims(destination, 1), source)
a = array_ops.transpose(a, perm)
return a
@np_utils.np_doc('pad')
def pad(array, pad_width, mode, **kwargs): # pylint: disable=redefined-outer-name
"""Only supports modes 'constant', 'reflect' and 'symmetric' currently."""
constant_values = kwargs.get('constant_values', 0)
if not (mode == 'constant' or mode == 'reflect' or mode == 'symmetric'):
raise ValueError('Unsupported padding mode: ' + mode)
mode = mode.upper()
array = asarray(array)
pad_width = asarray(pad_width, dtype=dtypes.int32)
return array_ops.pad(
tensor=array,
paddings=pad_width,
mode=mode,
constant_values=constant_values)
@np_utils.np_doc('take')
def take(a, indices, axis=None, out=None, mode='clip'):
"""out argument is not supported, and default mode is clip."""
if out is not None:
raise ValueError('out argument is not supported in take.')
if mode not in {'raise', 'clip', 'wrap'}:
raise ValueError("Invalid mode '{}' for take".format(mode))
a = asarray(a)
indices = asarray(indices)
if axis is None:
a = array_ops.reshape(a, [-1])
axis = 0
axis_size = array_ops.shape(a, out_type=indices.dtype)[axis]
if mode == 'clip':
indices = clip_ops.clip_by_value(indices, 0, axis_size - 1)
elif mode == 'wrap':
indices = math_ops.floormod(indices, axis_size)
else:
raise ValueError("The 'raise' mode to take is not supported.")
return array_ops.gather(a, indices, axis=axis)
@np_utils.np_doc_only('where')
def where(condition, x=None, y=None):
"""Raises ValueError if exactly one of x or y is not None."""
condition = asarray(condition, dtype=np.bool_)
if x is None and y is None:
return nonzero(condition)
elif x is not None and y is not None:
x, y = _promote_dtype(x, y)
return array_ops.where_v2(condition, x, y)
raise ValueError('Both x and y must be ndarrays, or both must be None.')
@np_utils.np_doc('select')
def select(condlist, choicelist, default=0): # pylint: disable=missing-docstring
if len(condlist) != len(choicelist):
msg = 'condlist must have length equal to choicelist ({} vs {})'
raise ValueError(msg.format(len(condlist), len(choicelist)))
if not condlist:
raise ValueError('condlist must be non-empty')
choices = _promote_dtype(default, *choicelist)
choicelist = choices[1:]
output = choices[0]
# The traversal is in reverse order so we can return the first value in
# choicelist where condlist is True.
for cond, choice in zip(condlist[::-1], choicelist[::-1]):
output = where(cond, choice, output)
return output
@np_utils.np_doc('shape', link=np_utils.Link(
'https://numpy.org/doc/1.18/reference/generated/numpy.shape.html'))
def shape(a):
a = asarray(a)
return a.shape
@np_utils.np_doc('ndim', link=np_utils.NoLink())
def ndim(a):
a = asarray(a)
return a.ndim
@np_utils.np_doc('isscalar')
def isscalar(num):
return ndim(num) == 0
def _boundaries_to_sizes(a, boundaries, axis):
"""Converting boundaries of splits to sizes of splits.
Args:
a: the array to be split.
boundaries: the boundaries, as in np.split.
axis: the axis along which to split.
Returns:
A list of sizes of the splits, as in tf.split.
"""
if axis >= len(a.shape):
raise ValueError('axis %s is out of bound for shape %s' % (axis, a.shape))
total_size = a.shape[axis]
sizes = []
sizes_sum = 0
prev = 0
for i, b in enumerate(boundaries):
size = b - prev
if size < 0:
raise ValueError('The %s-th boundary %s is smaller than the previous '
'boundary %s' % (i, b, prev))
size = min(size, max(0, total_size - sizes_sum))
sizes.append(size)
sizes_sum += size
prev = b
sizes.append(max(0, total_size - sizes_sum))
return sizes
@np_utils.np_doc('split')
def split(ary, indices_or_sections, axis=0):
ary = asarray(ary)
if not isinstance(indices_or_sections, six.integer_types):
indices_or_sections = _boundaries_to_sizes(ary, indices_or_sections, axis)
return array_ops.split(ary, indices_or_sections, axis=axis)
def _split_on_axis(np_fun_name, axis):
@np_utils.np_doc(np_fun_name)
def f(ary, indices_or_sections):
return split(ary, indices_or_sections, axis=axis)
return f
vsplit = _split_on_axis('vsplit', axis=0)
hsplit = _split_on_axis('hsplit', axis=1)
dsplit = _split_on_axis('dsplit', axis=2)
@np_utils.np_doc('broadcast_to')
def broadcast_to(array, shape): # pylint: disable=redefined-outer-name
return full(shape, array)
@np_utils.np_doc('stack')
def stack(arrays, axis=0): # pylint: disable=missing-function-docstring
if isinstance(arrays, (np_arrays.ndarray, ops.Tensor)):
arrays = asarray(arrays)
if axis == 0:
return arrays
else:
return swapaxes(arrays, 0, axis)
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
return asarray(array_ops.stack(unwrapped_arrays, axis))
@np_utils.np_doc('hstack')
def hstack(tup):
arrays = [atleast_1d(a) for a in tup]
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
rank = array_ops.rank(unwrapped_arrays[0])
return np_utils.cond(
math_ops.equal(rank,
1), lambda: array_ops.concat(unwrapped_arrays, axis=0),
lambda: array_ops.concat(unwrapped_arrays, axis=1))
@np_utils.np_doc('vstack')
def vstack(tup):
arrays = [atleast_2d(a) for a in tup]
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
return array_ops.concat(unwrapped_arrays, axis=0)
@np_utils.np_doc('dstack')
def dstack(tup):
arrays = [atleast_3d(a) for a in tup]
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
return array_ops.concat(unwrapped_arrays, axis=2)
def _pad_left_to(n, old_shape):
old_shape = asarray(old_shape, dtype=np.int32)
new_shape = array_ops.pad(
old_shape, [[math_ops.maximum(n - array_ops.size(old_shape), 0), 0]],
constant_values=1)
return asarray(new_shape)
def _atleast_nd(n, new_shape, *arys):
"""Reshape arrays to be at least `n`-dimensional.
Args:
n: The minimal rank.
new_shape: a function that takes `n` and the old shape and returns the
desired new shape.
*arys: ndarray(s) to be reshaped.
Returns:
The reshaped array(s).
"""
def f(x):
# pylint: disable=g-long-lambda
x = asarray(x)
return asarray(
np_utils.cond(
np_utils.greater(n, array_ops.rank(x)),
lambda: reshape(x, new_shape(n, array_ops.shape(x))),
lambda: x))
arys = list(map(f, arys))
if len(arys) == 1:
return arys[0]
else:
return arys
@np_utils.np_doc('atleast_1d')
def atleast_1d(*arys):
return _atleast_nd(1, _pad_left_to, *arys)
@np_utils.np_doc('atleast_2d')
def atleast_2d(*arys):
return _atleast_nd(2, _pad_left_to, *arys)
@np_utils.np_doc('atleast_3d')
def atleast_3d(*arys): # pylint: disable=missing-docstring
def new_shape(_, old_shape):
# pylint: disable=g-long-lambda
ndim_ = array_ops.size(old_shape)
return np_utils.cond(
math_ops.equal(ndim_, 0),
lambda: constant_op.constant([1, 1, 1], dtype=dtypes.int32),
lambda: np_utils.cond(
math_ops.equal(ndim_, 1), lambda: array_ops.pad(
old_shape, [[1, 1]], constant_values=1), lambda: array_ops.pad(
old_shape, [[0, 1]], constant_values=1)))
return _atleast_nd(3, new_shape, *arys)
@np_utils.np_doc('nonzero')
def nonzero(a):
a = atleast_1d(a)
if a.shape.rank is None:
raise ValueError("The rank of `a` is unknown, so we can't decide how many "
'arrays to return.')
return array_ops.unstack(
array_ops.where_v2(math_ops.cast(a, dtypes.bool)),
a.shape.rank,
axis=1)
@np_utils.np_doc('diag_indices')
def diag_indices(n, ndim=2): # pylint: disable=missing-docstring,redefined-outer-name
if n < 0:
raise ValueError(
'n argument to diag_indices must be nonnegative, got {}'.format(n))
if ndim < 0:
raise ValueError(
'ndim argument to diag_indices must be nonnegative, got {}'.format(
ndim))
return (math_ops.range(n),) * ndim
@np_utils.np_doc('tri')
def tri(N, M=None, k=0, dtype=None): # pylint: disable=invalid-name,missing-docstring
M = M if M is not None else N
if dtype is not None:
dtype = np_utils.result_type(dtype)
else:
dtype = np_dtypes.default_float_type()
if k < 0:
lower = -k - 1
if lower > N:
r = array_ops.zeros([N, M], dtype)
else:
# Keep as tf bool, since we create an upper triangular matrix and invert
# it.
o = array_ops.ones([N, M], dtype=dtypes.bool)
r = math_ops.cast(
math_ops.logical_not(array_ops.matrix_band_part(o, lower, -1)), dtype)
else:
o = array_ops.ones([N, M], dtype)
if k > M:
r = o
else:
r = array_ops.matrix_band_part(o, -1, k)
return r
@np_utils.np_doc('tril')
def tril(m, k=0): # pylint: disable=missing-docstring
m = asarray(m)
if m.shape.ndims is None:
raise ValueError('Argument to tril should have known rank')
m_shape = m.shape.as_list()
if len(m_shape) < 2:
raise ValueError('Argument to tril must have rank at least 2')
if m_shape[-1] is None or m_shape[-2] is None:
raise ValueError('Currently, the last two dimensions of the input array '
'need to be known.')
z = constant_op.constant(0, m.dtype)
mask = tri(*m_shape[-2:], k=k, dtype=bool)
return array_ops.where_v2(
array_ops.broadcast_to(mask, array_ops.shape(m)), m, z)
@np_utils.np_doc('triu')
def triu(m, k=0): # pylint: disable=missing-docstring
m = asarray(m)
if m.shape.ndims is None:
raise ValueError('Argument to triu should have known rank')
m_shape = m.shape.as_list()
if len(m_shape) < 2:
raise ValueError('Argument to triu must have rank at least 2')
if m_shape[-1] is None or m_shape[-2] is None:
raise ValueError('Currently, the last two dimensions of the input array '
'need to be known.')
z = constant_op.constant(0, m.dtype)
mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)
return array_ops.where_v2(
array_ops.broadcast_to(mask, array_ops.shape(m)), z, m)
@np_utils.np_doc('flip')
def flip(m, axis=None): # pylint: disable=missing-docstring
m = asarray(m)
if axis is None:
return array_ops.reverse(m, math_ops.range(array_ops.rank(m)))
axis = np_utils._canonicalize_axis(axis, array_ops.rank(m)) # pylint: disable=protected-access
return array_ops.reverse(m, [axis])
@np_utils.np_doc('flipud')
def flipud(m): # pylint: disable=missing-docstring
return flip(m, 0)
@np_utils.np_doc('fliplr')
def fliplr(m): # pylint: disable=missing-docstring
return flip(m, 1)
@np_utils.np_doc('roll')
def roll(a, shift, axis=None): # pylint: disable=missing-docstring
a = asarray(a)
if axis is not None:
return manip_ops.roll(a, shift, axis)
# If axis is None, the roll happens as a 1-d tensor.
original_shape = array_ops.shape(a)
a = manip_ops.roll(array_ops.reshape(a, [-1]), shift, 0)
return array_ops.reshape(a, original_shape)
@np_utils.np_doc('rot90')
def rot90(m, k=1, axes=(0, 1)): # pylint: disable=missing-docstring
m_rank = array_ops.rank(m)
ax1, ax2 = np_utils._canonicalize_axes(axes, m_rank) # pylint: disable=protected-access
k = k % 4
if k == 0:
return m
elif k == 2:
return flip(flip(m, ax1), ax2)
else:
perm = math_ops.range(m_rank)
perm = array_ops.tensor_scatter_update(perm, [[ax1], [ax2]], [ax2, ax1])
if k == 1:
return transpose(flip(m, ax2), perm)
else:
return flip(transpose(m, perm), ax2)
@np_utils.np_doc('vander')
def vander(x, N=None, increasing=False): # pylint: disable=missing-docstring,invalid-name
x = asarray(x)
x_shape = array_ops.shape(x)
N = N or x_shape[0]
N_temp = np_utils.get_static_value(N) # pylint: disable=invalid-name
if N_temp is not None:
N = N_temp
if N < 0:
raise ValueError('N must be nonnegative')
else:
control_flow_ops.Assert(N >= 0, [N])
rank = array_ops.rank(x)
rank_temp = np_utils.get_static_value(rank)
if rank_temp is not None:
rank = rank_temp
if rank != 1:
raise ValueError('x must be a one-dimensional array')
else:
control_flow_ops.Assert(math_ops.equal(rank, 1), [rank])
if increasing:
start = 0
limit = N
delta = 1
else:
start = N - 1
limit = -1
delta = -1
x = array_ops.expand_dims(x, -1)
return math_ops.pow(
x, math_ops.cast(math_ops.range(start, limit, delta), dtype=x.dtype))
@np_utils.np_doc('ix_')
def ix_(*args): # pylint: disable=missing-docstring
n = len(args)
output = []
for i, a in enumerate(args):
a = asarray(a)
a_rank = array_ops.rank(a)
a_rank_temp = np_utils.get_static_value(a_rank)
if a_rank_temp is not None:
a_rank = a_rank_temp
if a_rank != 1:
raise ValueError('Arguments must be 1-d, got arg {} of rank {}'.format(
i, a_rank))
else:
control_flow_ops.Assert(math_ops.equal(a_rank, 1), [a_rank])
new_shape = [1] * n
new_shape[i] = -1
dtype = a.dtype
if dtype == dtypes.bool:
output.append(array_ops.reshape(nonzero(a)[0], new_shape))
elif dtype.is_integer:
output.append(array_ops.reshape(a, new_shape))
else:
raise ValueError(
'Only integer and bool dtypes are supported, got {}'.format(dtype))
return output
@np_utils.np_doc('broadcast_arrays')
def broadcast_arrays(*args, **kwargs): # pylint: disable=missing-docstring
subok = kwargs.pop('subok', False)
if subok:
raise ValueError('subok=True is not supported.')
if kwargs:
raise ValueError('Received unsupported arguments {}'.format(kwargs.keys()))
args = [asarray(arg) for arg in args]
return np_utils.tf_broadcast(*args)
@np_utils.np_doc_only('sign')
def sign(x, out=None, where=None, **kwargs): # pylint: disable=missing-docstring,redefined-outer-name
if out:
raise ValueError('tf.numpy doesnt support setting out.')
if where:
raise ValueError('tf.numpy doesnt support setting where.')
if kwargs:
raise ValueError('tf.numpy doesnt support setting {}'.format(kwargs.keys()))
x = asarray(x)
dtype = x.dtype.as_numpy_dtype
if np.issubdtype(dtype, np.complexfloating):
result = math_ops.cast(math_ops.sign(math_ops.real(x)), dtype)
else:
result = math_ops.sign(x)
return result
# Note that np.take_along_axis may not be present in some supported versions of
# numpy.
@np_utils.np_doc('take_along_axis')
def take_along_axis(arr, indices, axis): # pylint: disable=missing-docstring
arr = asarray(arr)
indices = asarray(indices)
if axis is None:
return take_along_axis(arr.ravel(), indices, 0)
rank = array_ops.rank(arr)
axis = axis + rank if axis < 0 else axis
# Broadcast shapes to match, ensure that the axis of interest is not
# broadcast.
arr_shape_original = array_ops.shape(arr)
indices_shape_original = array_ops.shape(indices)
arr_shape = array_ops.tensor_scatter_update(arr_shape_original, [[axis]], [1])
indices_shape = array_ops.tensor_scatter_update(indices_shape_original,
[[axis]], [1])
broadcasted_shape = array_ops.broadcast_dynamic_shape(arr_shape,
indices_shape)
arr_shape = array_ops.tensor_scatter_update(broadcasted_shape, [[axis]],
[arr_shape_original[axis]])
indices_shape = array_ops.tensor_scatter_update(
broadcasted_shape, [[axis]], [indices_shape_original[axis]])
arr = array_ops.broadcast_to(arr, arr_shape)
indices = array_ops.broadcast_to(indices, indices_shape)
# Save indices shape so we can restore it later.
possible_result_shape = indices.shape
# Correct indices since gather doesn't correctly handle negative indices.
indices = array_ops.where_v2(indices < 0, indices + arr_shape[axis], indices)
swapaxes_ = lambda t: swapaxes(t, axis, -1)
dont_move_axis_to_end = math_ops.equal(axis, np_utils.subtract(rank, 1))
arr = np_utils.cond(dont_move_axis_to_end, lambda: arr,
lambda: swapaxes_(arr))
indices = np_utils.cond(dont_move_axis_to_end, lambda: indices,
lambda: swapaxes_(indices))
arr_shape = array_ops.shape(arr)
arr = array_ops.reshape(arr, [-1, arr_shape[-1]])
indices_shape = array_ops.shape(indices)
indices = array_ops.reshape(indices, [-1, indices_shape[-1]])
result = array_ops.gather(arr, indices, batch_dims=1)
result = array_ops.reshape(result, indices_shape)
result = np_utils.cond(dont_move_axis_to_end, lambda: result,
lambda: swapaxes_(result))
result.set_shape(possible_result_shape)
return result
_SLICE_ERORR = (
'only integers, slices (`:`), ellipsis (`...`), '
'numpy.newaxis (`None`) and integer or boolean arrays are valid indices')
def _as_index(idx, need_scalar=True):
"""Helper function to parse idx as an index.
Args:
idx: index
need_scalar: If idx needs to be a scalar value.
Returns:
A pair, (indx, bool). First one is the parsed index and can be a tensor,
or scalar integer / Dimension. Second one is True if rank is known to be 0.
Raises:
IndexError: For incorrect indices.
"""
if isinstance(idx, (numbers.Integral, tensor_shape.Dimension)):
return idx, True
data = asarray(idx)
if data.dtype == dtypes.bool:
if data.shape.ndims != 1:
# TODO(agarwal): handle higher rank boolean masks.
raise NotImplementedError('Need rank 1 for bool index %s' % idx)
data = array_ops.where_v2(data)
data = array_ops.reshape(data, [-1])
if need_scalar and data.shape.rank not in (None, 0):
raise IndexError(_SLICE_ERORR + ', got {!r}'.format(idx))
np_dtype = data.dtype.as_numpy_dtype
if not np.issubdtype(np_dtype, np.integer):
raise IndexError(_SLICE_ERORR + ', got {!r}'.format(idx))
if data.dtype not in (dtypes.int64, dtypes.int32):
# TF slicing can only handle int32/int64. So we need to cast.
promoted_dtype = np.promote_types(np.int32, np_dtype)
if promoted_dtype == np.int32:
data = math_ops.cast(data, dtypes.int32)
elif promoted_dtype == np.int64:
data = math_ops.cast(data, dtypes.int64)
else:
raise IndexError(_SLICE_ERORR + ', got {!r}'.format(idx))
return data, data.shape.rank == 0
class _UpdateMethod(enum.Enum):
UPDATE = 0
ADD = 1
MIN = 2
MAX = 3
def _slice_helper(tensor, slice_spec, update_method=None, updates=None):
"""Helper function for __getitem__ and _with_index_update_helper.
This function collects the indices in `slice_spec` into two buckets, which we
can call "idx1" and "idx2" here. idx1 is intended for `strided_slice`, idx2
`gather`. They also correspond to "basic indices" and "advanced indices" in
numpy. This function supports both reading and writing at the indices. The
reading path can be summarized as `gather(stride_slice(tensor, idx1),
idx2)`. The writing path can be summarized as `strided_slice_update(tensor,
idx1, scatter(strided_slice(tensor, idx1), idx2, updates))`. (`gather` here
means `tf.gather` or `tf.gather_nd`; `scatter` here means
`tf.tensor_scatter_update`.) The writing path is inefficient because it needs
to first read out a portion (probably much larger than `updates`) of `tensor`
using `strided_slice`, update it, and then write the portion back. An
alternative approach is to only use `scatter`, which amounts to using the
indexing mechanism of gather/scatter to implement
strided_slice/strided_slice_update. This is feasible for XLA Gather/Scatter
because they support spans (e.g. `2:5`) in indices (as begin/end pairs), but
not TF gather/scatter because they don't support spans (except those that
cover entire dimensions, i.e. `:`). If we materialize spans into individual
indices, the size of the index tensor would explode. (Note that XLA
Gather/Scatter have a similar problem for stride > 1 because they don't
support strides. Indices such as `1:2:8` will need to be materialized into
individual indices such as [1, 3, 5, 7].)
Args:
tensor: the tensor to be read from or write into.
slice_spec: the indices.
update_method: (optional) a member of `_UpdateMethod`, indicating how to
update the values (replacement, add, etc.). `None` indicates just reading.
updates: (optional) the new values to write into `tensor`. It must have the
same dtype as `tensor`.
Returns:
The result of reading (if `update_method` is `None`) or the updated `tensor`
after writing.
"""
begin, end, strides = [], [], []
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
advanced_indices = []
shrink_indices = []
for index, s in enumerate(slice_spec):
if isinstance(s, slice):
if s.start is not None:
begin.append(_as_index(s.start)[0])
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None:
end.append(_as_index(s.stop)[0])
else:
end.append(0)
end_mask |= (1 << index)
if s.step is not None:
strides.append(_as_index(s.step)[0])
else:
strides.append(1)
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is array_ops.newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
s, is_scalar = _as_index(s, False)
if is_scalar:
begin.append(s)
end.append(s + 1)
strides.append(1)
shrink_axis_mask |= (1 << index)
shrink_indices.append(index)
else:
begin.append(0)
end.append(0)
strides.append(1)
begin_mask |= (1 << index)
end_mask |= (1 << index)
advanced_indices.append((index, s, ellipsis_mask != 0))
# stack possibly involves no tensors, so we must use op_scope correct graph.
with ops.name_scope(
None,
'strided_slice', [tensor] + begin + end + strides,
skip_on_eager=False) as name:
if begin:
packed_begin, packed_end, packed_strides = (array_ops.stack(begin),
array_ops.stack(end),
array_ops.stack(strides))
if (packed_begin.dtype == dtypes.int64 or
packed_end.dtype == dtypes.int64 or
packed_strides.dtype == dtypes.int64):
if packed_begin.dtype != dtypes.int64:
packed_begin = math_ops.cast(packed_begin, dtypes.int64)
if packed_end.dtype != dtypes.int64:
packed_end = math_ops.cast(packed_end, dtypes.int64)
if packed_strides.dtype != dtypes.int64:
packed_strides = math_ops.cast(packed_strides, dtypes.int64)
else:
var_empty = constant_op.constant([], dtype=dtypes.int32)
packed_begin = packed_end = packed_strides = var_empty
if update_method == _UpdateMethod.UPDATE and not advanced_indices:
return array_ops.tensor_strided_slice_update(
tensor,
packed_begin,
packed_end,
packed_strides,
updates,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name)
else:
# TODO(b/164251540): Find a better way to support update that does not
# involve one read + two writes.
if updates is not None:
original_tensor = tensor
# TODO(agarwal): set_shape on tensor to set rank.
tensor = array_ops.strided_slice(
tensor,
packed_begin,
packed_end,
packed_strides,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name)
if not advanced_indices:
if update_method is None:
return tensor
assert update_method != _UpdateMethod.UPDATE
# TF lacks TensorStridedSliceAdd and alike, so we need to do
# read+add+update.
if update_method == _UpdateMethod.ADD:
update_op = math_ops.add
elif update_method == _UpdateMethod.MIN:
update_op = math_ops.minimum
elif update_method == _UpdateMethod.MAX:
update_op = math_ops.maximum
return array_ops.tensor_strided_slice_update(
original_tensor,
packed_begin,
packed_end,
packed_strides,
update_op(tensor, updates),
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name + '_2')
advanced_indices_map = {}
for index, data, had_ellipsis in advanced_indices:
if had_ellipsis:
num_shrink = len([x for x in shrink_indices if x > index])
dim = index - len(slice_spec) + num_shrink
else:
num_shrink = len([x for x in shrink_indices if x < index])
dim = index - num_shrink
advanced_indices_map[dim] = data
dims = sorted(advanced_indices_map.keys())
dims_contiguous = True
if len(dims) > 1:
if dims[0] < 0 and dims[-1] >= 0: # not all same sign
dims_contiguous = False
else:
for i in range(len(dims) - 1):
if dims[i] + 1 != dims[i + 1]:
dims_contiguous = False
break
indices = [advanced_indices_map[x] for x in dims]
indices = _promote_dtype(*indices)
indices = np_utils.tf_broadcast(*indices)
stacked_indices = array_ops.stack(indices, axis=-1)
# Skip the contiguous-dims optimization for update because there is no
# tf.*scatter* op that supports the `axis` argument.
if not dims_contiguous or updates is not None:
if range(len(dims)) != dims:
tensor = moveaxis(tensor, dims, range(len(dims)))
tensor_shape_prefix = array_ops.shape(
tensor, out_type=stacked_indices.dtype)[:len(dims)]
stacked_indices = array_ops.where_v2(
stacked_indices < 0, stacked_indices + tensor_shape_prefix,
stacked_indices)
if updates is None:
return array_ops.gather_nd(tensor, stacked_indices)
else:
# We only need to move-axis `updates` in the contiguous case becausce
# only in this case the result dimensions of advanced indexing are in
# the middle of `updates`. In the non-contiguous case, those dimensions
# are always at the front.
if dims_contiguous:
# TODO(wangpeng): Support unknown rank (e.g. by partially flattening
# `updates`)
if stacked_indices.shape.rank is None:
raise NotImplementedError(
'Rank of the advanced indices must currently be known')
batch_size = stacked_indices.shape.rank - 1
batch_start = dims[0]
if batch_start < 0:
batch_start += len(dims) - batch_size
def range_(start, length):
return range(start, start + length)
updates = moveaxis(updates, range_(batch_start, batch_size),
range(batch_size))
if update_method == _UpdateMethod.UPDATE:
update_op = array_ops.tensor_scatter_update
elif update_method == _UpdateMethod.ADD:
update_op = array_ops.tensor_scatter_add
elif update_method == _UpdateMethod.MIN:
update_op = array_ops.tensor_scatter_min
elif update_method == _UpdateMethod.MAX:
update_op = array_ops.tensor_scatter_max
tensor = update_op(
tensor, stacked_indices, updates)
if range(len(dims)) != dims:
tensor = moveaxis(tensor, range(len(dims)), dims)
return array_ops.tensor_strided_slice_update(
original_tensor,
packed_begin,
packed_end,
packed_strides,
tensor,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name + '_2')
# Note that gather_nd does not support gathering from inside the array.
# To avoid shuffling data back and forth, we transform the indices and
# do a gather instead.
rank = np_utils._maybe_static(array_ops.rank(tensor)) # pylint: disable=protected-access
dims = [(x + rank if x < 0 else x) for x in dims]
shape_tensor = array_ops.shape(tensor)
dim_sizes = array_ops.gather(shape_tensor, dims)
if len(dims) == 1:
stacked_indices = indices[0]
stacked_indices = math_ops.cast(stacked_indices, dtypes.int32)
stacked_indices = array_ops.where_v2(stacked_indices < 0,
stacked_indices + dim_sizes,
stacked_indices)
axis = dims[0]
if len(dims) > 1:
index_scaling = math_ops.cumprod(
dim_sizes, reverse=True, exclusive=True)
def _tensordot(a, b):
# TODO(b/168657656): This function should be replaced by
# tensordot(axis=1) once MatMul has int32 XLA kernel.
b = array_ops.broadcast_to(b, array_ops.shape(a))
return math_ops.reduce_sum(a * b, axis=-1)
stacked_indices = _tensordot(stacked_indices, index_scaling)
flat_shape = array_ops.concat(
[shape_tensor[:axis], [-1], shape_tensor[axis + len(dims):]],
axis=0)
tensor = array_ops.reshape(tensor, flat_shape)
return array_ops.gather(tensor, stacked_indices, axis=axis)
def _as_spec_tuple(slice_spec):
"""Convert slice_spec to tuple."""
if isinstance(slice_spec,
(list, tuple)) and not isinstance(slice_spec, np.ndarray):
is_index = True
for s in slice_spec:
if s is None or s is Ellipsis or isinstance(s, (list, tuple, slice)):
is_index = False
break
elif isinstance(s, (np_arrays.ndarray, np.ndarray)) and s.ndim != 0:
is_index = False
break
if not is_index:
return tuple(slice_spec)
return (slice_spec,)
def _getitem(self, slice_spec):
"""Implementation of ndarray.__getitem__."""
if (isinstance(slice_spec, bool) or (isinstance(slice_spec, ops.Tensor) and
slice_spec.dtype == dtypes.bool) or
(isinstance(slice_spec, (np.ndarray, np_arrays.ndarray)) and
slice_spec.dtype == np.bool_)):
return array_ops.boolean_mask(tensor=self, mask=slice_spec)
if not isinstance(slice_spec, tuple):
slice_spec = _as_spec_tuple(slice_spec)
result_t = _slice_helper(self, slice_spec)
return result_t
def _with_index_update_helper(update_method, a, slice_spec, updates):
"""Implementation of ndarray._with_index_*."""
if (isinstance(slice_spec, bool) or (isinstance(slice_spec, ops.Tensor) and
slice_spec.dtype == dtypes.bool) or
(isinstance(slice_spec, (np.ndarray, np_arrays.ndarray)) and
slice_spec.dtype == np.bool_)):
slice_spec = nonzero(slice_spec)
if not isinstance(slice_spec, tuple):
slice_spec = _as_spec_tuple(slice_spec)
a_dtype = a.dtype
a, updates = _promote_dtype_binary(a, updates)
result_t = _slice_helper(a, slice_spec, update_method, updates)
return result_t.astype(a_dtype)
setattr(np_arrays.ndarray, '_numpy_style_getitem', _getitem)
setattr(np_arrays.ndarray, '_with_index_update',
functools.partial(_with_index_update_helper, _UpdateMethod.UPDATE))
setattr(np_arrays.ndarray, '_with_index_add',
functools.partial(_with_index_update_helper, _UpdateMethod.ADD))
setattr(np_arrays.ndarray, '_with_index_min',
functools.partial(_with_index_update_helper, _UpdateMethod.MIN))
setattr(np_arrays.ndarray, '_with_index_max',
functools.partial(_with_index_update_helper, _UpdateMethod.MAX))
|
frreiss/tensorflow-fred
|
tensorflow/python/ops/numpy_ops/np_array_ops.py
|
Python
|
apache-2.0
| 60,984
|
from SubjectNodes import SubjectNode
class MultiClassSubjectNode(SubjectNode):
def __init__(self):
SubjectNode.__init__(numClasses=1)
def __changeClassificationAttributes__(self,attributesList):
pass
|
camallen/aggregation
|
experimental/graphicalClassification/MultiClassSubjectNode.py
|
Python
|
apache-2.0
| 227
|
'''Tests for helpers_views'''
from mysite import settings
from mysite.helpers.db_access import DBAccess
from mysite.helpers import testhelpers as th
from heatmap.helpers.table_view import (TableForm, QueryForm)
from heatmap.helpers import test_helpers_views as thv
from django.test import TestCase # Provides mocks for client interactions
import os
import sqlite3
class TestTableViews(TestCase):
def setUp(self):
self.assertTrue(th.SetupTestDB())
self.db_path = th.TEST_DB_PATH
self.dba = DBAccess(db_path=self.db_path)
self.conn = sqlite3.connect(self.db_path)
self.cursor = self.conn.cursor()
def tearDown(self):
th.TearDownTestDB()
def testmaketable(self):
CLIENT_URL = '/heatmap/maketable/'
TABLE_LIST = ['data_rev', 'data']
response = self.client.get(CLIENT_URL)
self.assertEqual(response.status_code, 200)
form_data = {'numrows': 10, 'lastrow': 1}
form = TableForm(data=form_data)
post_dict = {'display_form': form,
'tablename': TABLE_LIST[0],
'table_list': TABLE_LIST}
response = self.client.post(CLIENT_URL, post_dict)
self.assertEqual(response.status_code, 200)
def testDeletetable(self):
# Put in the file to delete
CLIENT_URL = '/heatmap/upload/'
post_dict = {'filename': thv.TEST_DATA_FILENAME}
response = self.client.post(CLIENT_URL, post_dict)
# Delete the file
CLIENT_URL = '/heatmap/deletetable/'
response = self.client.get(CLIENT_URL)
self.assertEqual(response.status_code, 200)
post_dict = {'tablename': thv.TEST_DATA_TABLENAME}
response = self.client.post(CLIENT_URL, post_dict)
self.assertEqual(response.status_code, 200)
def testQuery(self):
CLIENT_URL = '/heatmap/query/'
TABLE_LIST = ['data_rev', 'data']
TABLE_NAME = "testQuery"
response = self.client.get(CLIENT_URL)
self.assertEqual(response.status_code, 200)
# Test the post
th.CreateTableWithData(TABLE_NAME, self.conn)
query_string = "SELECT * from %s" % TABLE_NAME
form = QueryForm(data={'query_string': query_string})
post_dict = {'form': form,
'table_list': TABLE_LIST}
response = self.client.post(CLIENT_URL, post_dict)
self.assertEqual(response.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
ScienceStacks/JViz
|
mysite/heatmap/helpers/test_table_view.py
|
Python
|
apache-2.0
| 2,324
|
from __future__ import unicode_literals
from .service import Service # noqa:flake8
__version__ = '1.0.1'
|
bcicen/fig
|
fig/__init__.py
|
Python
|
apache-2.0
| 107
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
from helpers import unittest
from datetime import timedelta
import luigi
import luigi.date_interval
import luigi.interface
import luigi.notifications
from helpers import with_config
from luigi.mock import MockTarget, MockFileSystem
from luigi.parameter import ParameterException
from worker_test import email_patch
luigi.notifications.DEBUG = True
class A(luigi.Task):
p = luigi.IntParameter()
class WithDefault(luigi.Task):
x = luigi.Parameter(default='xyz')
class Foo(luigi.Task):
bar = luigi.Parameter()
p2 = luigi.IntParameter()
multi = luigi.Parameter(is_list=True)
not_a_param = "lol"
class Bar(luigi.Task):
multibool = luigi.BoolParameter(is_list=True)
def run(self):
Bar._val = self.multibool
class Baz(luigi.Task):
bool = luigi.BoolParameter()
def run(self):
Baz._val = self.bool
class ForgotParam(luigi.Task):
param = luigi.Parameter()
def run(self):
pass
class ForgotParamDep(luigi.Task):
def requires(self):
return ForgotParam()
def run(self):
pass
class HasGlobalParam(luigi.Task):
x = luigi.Parameter()
global_param = luigi.IntParameter(is_global=True, default=123) # global parameters need default values
global_bool_param = luigi.BoolParameter(is_global=True, default=False)
def run(self):
self.complete = lambda: True
def complete(self):
return False
class HasGlobalParamDep(luigi.Task):
x = luigi.Parameter()
def requires(self):
return HasGlobalParam(self.x)
_shared_global_param = luigi.Parameter(is_global=True, default='123')
class SharedGlobalParamA(luigi.Task):
shared_global_param = _shared_global_param
class SharedGlobalParamB(luigi.Task):
shared_global_param = _shared_global_param
class BananaDep(luigi.Task):
x = luigi.Parameter()
y = luigi.Parameter(default='def')
def output(self):
return MockTarget('banana-dep-%s-%s' % (self.x, self.y))
def run(self):
self.output().open('w').close()
class Banana(luigi.Task):
x = luigi.Parameter()
y = luigi.Parameter()
style = luigi.Parameter(default=None)
def requires(self):
if self.style is None:
return BananaDep() # will fail
elif self.style == 'x-arg':
return BananaDep(self.x)
elif self.style == 'y-kwarg':
return BananaDep(y=self.y)
elif self.style == 'x-arg-y-arg':
return BananaDep(self.x, self.y)
else:
raise Exception('unknown style')
def output(self):
return MockTarget('banana-%s-%s' % (self.x, self.y))
def run(self):
self.output().open('w').close()
class MyConfig(luigi.Config):
mc_p = luigi.IntParameter()
mc_q = luigi.IntParameter(default=73)
class MyConfigWithoutSection(luigi.Config):
use_cmdline_section = False
mc_r = luigi.IntParameter()
mc_s = luigi.IntParameter(default=99)
class NoopTask(luigi.Task):
pass
class ParameterTest(unittest.TestCase):
def setUp(self):
super(ParameterTest, self).setUp()
# Need to restore some defaults for the global params since they are overriden
HasGlobalParam.global_param.set_global(123)
HasGlobalParam.global_bool_param.set_global(False)
def test_default_param(self):
self.assertEqual(WithDefault().x, 'xyz')
def test_missing_param(self):
def create_a():
return A()
self.assertRaises(luigi.parameter.MissingParameterException, create_a)
def test_unknown_param(self):
def create_a():
return A(p=5, q=4)
self.assertRaises(luigi.parameter.UnknownParameterException, create_a)
def test_unknown_param_2(self):
def create_a():
return A(1, 2, 3)
self.assertRaises(luigi.parameter.UnknownParameterException, create_a)
def test_duplicated_param(self):
def create_a():
return A(5, p=7)
self.assertRaises(luigi.parameter.DuplicateParameterException, create_a)
def test_parameter_registration(self):
self.assertEqual(len(Foo.get_params()), 3)
def test_task_creation(self):
f = Foo("barval", p2=5, multi=('m1', 'm2'))
self.assertEqual(len(f.get_params()), 3)
self.assertEqual(f.bar, "barval")
self.assertEqual(f.p2, 5)
self.assertEqual(f.multi, ('m1', 'm2'))
self.assertEqual(f.not_a_param, "lol")
def test_multibool(self):
luigi.run(['--local-scheduler', '--no-lock', 'Bar', '--multibool', 'true', '--multibool', 'false'])
self.assertEqual(Bar._val, (True, False))
def test_multibool_empty(self):
luigi.run(['--local-scheduler', '--no-lock', 'Bar'])
self.assertEqual(Bar._val, tuple())
def test_bool_false(self):
luigi.run(['--local-scheduler', '--no-lock', 'Baz'])
self.assertEqual(Baz._val, False)
def test_bool_true(self):
luigi.run(['--local-scheduler', '--no-lock', 'Baz', '--bool'])
self.assertEqual(Baz._val, True)
def test_forgot_param(self):
self.assertRaises(luigi.parameter.MissingParameterException, luigi.run, ['--local-scheduler', '--no-lock', 'ForgotParam'],)
@email_patch
def test_forgot_param_in_dep(self, emails):
# A programmatic missing parameter will cause an error email to be sent
luigi.run(['--local-scheduler', '--no-lock', 'ForgotParamDep'])
self.assertNotEquals(emails, [])
def test_default_param_cmdline(self):
luigi.run(['--local-scheduler', '--no-lock', 'WithDefault'])
self.assertEqual(WithDefault().x, 'xyz')
def test_global_param_defaults(self):
h = HasGlobalParam(x='xyz')
self.assertEqual(h.global_param, 123)
self.assertEqual(h.global_bool_param, False)
def test_global_param_cmdline(self):
luigi.run(['--local-scheduler', '--no-lock', 'HasGlobalParam', '--x', 'xyz', '--global-param', '124'])
h = HasGlobalParam(x='xyz')
self.assertEqual(h.global_param, 124)
self.assertEqual(h.global_bool_param, False)
def test_global_param_cmdline_flipped(self):
luigi.run(['--local-scheduler', '--no-lock', '--global-param', '125', 'HasGlobalParam', '--x', 'xyz'])
h = HasGlobalParam(x='xyz')
self.assertEqual(h.global_param, 125)
self.assertEqual(h.global_bool_param, False)
def test_global_param_override(self):
h1 = HasGlobalParam(x='xyz', global_param=124)
h2 = HasGlobalParam(x='xyz')
self.assertEquals(h1.global_param, 124)
self.assertEquals(h2.global_param, 123)
def test_global_param_dep_cmdline(self):
luigi.run(['--local-scheduler', '--no-lock', 'HasGlobalParamDep', '--x', 'xyz', '--global-param', '124'])
h = HasGlobalParam(x='xyz')
self.assertEqual(h.global_param, 124)
self.assertEqual(h.global_bool_param, False)
def test_global_param_dep_cmdline_optparse(self):
luigi.run(['--local-scheduler', '--no-lock', '--task', 'HasGlobalParamDep', '--x', 'xyz', '--global-param', '124'], use_optparse=True)
h = HasGlobalParam(x='xyz')
self.assertEqual(h.global_param, 124)
self.assertEqual(h.global_bool_param, False)
def test_global_param_dep_cmdline_bool(self):
luigi.run(['--local-scheduler', '--no-lock', 'HasGlobalParamDep', '--x', 'xyz', '--global-bool-param'])
h = HasGlobalParam(x='xyz')
self.assertEqual(h.global_param, 123)
self.assertEqual(h.global_bool_param, True)
def test_global_param_shared(self):
luigi.run(['--local-scheduler', '--no-lock', 'SharedGlobalParamA', '--shared-global-param', 'abc'])
b = SharedGlobalParamB()
self.assertEqual(b.shared_global_param, 'abc')
def test_insignificant_parameter(self):
class InsignificantParameterTask(luigi.Task):
foo = luigi.Parameter(significant=False, default='foo_default')
bar = luigi.Parameter()
t1 = InsignificantParameterTask(foo='x', bar='y')
self.assertEqual(t1.task_id, 'InsignificantParameterTask(bar=y)')
t2 = InsignificantParameterTask('u', 'z')
self.assertEqual(t2.foo, 'u')
self.assertEqual(t2.bar, 'z')
self.assertEqual(t2.task_id, 'InsignificantParameterTask(bar=z)')
def test_local_significant_param(self):
""" Obviously, if anything should be positional, so should local
significant parameters """
class MyTask(luigi.Task):
# This could typically be "--label-company=disney"
x = luigi.Parameter(significant=True)
MyTask('arg')
self.assertRaises(luigi.parameter.MissingParameterException,
lambda: MyTask())
def test_local_insignificant_param(self):
""" Ensure we have the same behavior as in before a78338c """
class MyTask(luigi.Task):
# This could typically be "--num-threads=True"
x = luigi.Parameter(significant=False)
MyTask('arg')
self.assertRaises(luigi.parameter.MissingParameterException,
lambda: MyTask())
class TestNewStyleGlobalParameters(unittest.TestCase):
def setUp(self):
super(TestNewStyleGlobalParameters, self).setUp()
MockTarget.fs.clear()
BananaDep.y.reset_global()
def expect_keys(self, expected):
self.assertEquals(set(MockTarget.fs.get_all_data().keys()), set(expected))
def test_x_arg(self):
luigi.run(['--local-scheduler', '--no-lock', 'Banana', '--x', 'foo', '--y', 'bar', '--style', 'x-arg'])
self.expect_keys(['banana-foo-bar', 'banana-dep-foo-def'])
def test_x_arg_override(self):
luigi.run(['--local-scheduler', '--no-lock', 'Banana', '--x', 'foo', '--y', 'bar', '--style', 'x-arg', '--BananaDep-y', 'xyz'])
self.expect_keys(['banana-foo-bar', 'banana-dep-foo-xyz'])
def test_x_arg_override_stupid(self):
luigi.run(['--local-scheduler', '--no-lock', 'Banana', '--x', 'foo', '--y', 'bar', '--style', 'x-arg', '--BananaDep-x', 'blabla'])
self.expect_keys(['banana-foo-bar', 'banana-dep-foo-def'])
def test_x_arg_y_arg(self):
luigi.run(['--local-scheduler', '--no-lock', 'Banana', '--x', 'foo', '--y', 'bar', '--style', 'x-arg-y-arg'])
self.expect_keys(['banana-foo-bar', 'banana-dep-foo-bar'])
def test_x_arg_y_arg_override(self):
luigi.run(['--local-scheduler', '--no-lock', 'Banana', '--x', 'foo', '--y', 'bar', '--style', 'x-arg-y-arg', '--BananaDep-y', 'xyz'])
self.expect_keys(['banana-foo-bar', 'banana-dep-foo-bar'])
def test_x_arg_y_arg_override_all(self):
luigi.run(['--local-scheduler', '--no-lock', 'Banana', '--x', 'foo', '--y', 'bar', '--style', 'x-arg-y-arg', '--BananaDep-y', 'xyz', '--BananaDep-x', 'blabla'])
self.expect_keys(['banana-foo-bar', 'banana-dep-foo-bar'])
def test_y_arg_override(self):
luigi.run(['--local-scheduler', '--no-lock', 'Banana', '--x', 'foo', '--y', 'bar', '--style', 'y-kwarg', '--BananaDep-x', 'xyz'])
self.expect_keys(['banana-foo-bar', 'banana-dep-xyz-bar'])
def test_y_arg_override_both(self):
luigi.run(['--local-scheduler', '--no-lock', 'Banana', '--x', 'foo', '--y', 'bar', '--style', 'y-kwarg', '--BananaDep-x', 'xyz', '--BananaDep-y', 'blah'])
self.expect_keys(['banana-foo-bar', 'banana-dep-xyz-bar'])
def test_y_arg_override_banana(self):
luigi.run(['--local-scheduler', '--no-lock', 'Banana', '--y', 'bar', '--style', 'y-kwarg', '--BananaDep-x', 'xyz', '--Banana-x', 'baz'])
self.expect_keys(['banana-baz-bar', 'banana-dep-xyz-bar'])
class TestRemoveGlobalParameters(unittest.TestCase):
def setUp(self):
super(TestRemoveGlobalParameters, self).setUp()
MyConfig.mc_p.reset_global()
MyConfig.mc_q.reset_global()
MyConfigWithoutSection.mc_r.reset_global()
MyConfigWithoutSection.mc_s.reset_global()
def run_and_check(self, args):
run_exit_status = luigi.run(['--local-scheduler', '--no-lock'] + args)
self.assertTrue(run_exit_status)
return run_exit_status
def test_use_config_class_1(self):
self.run_and_check(['--MyConfig-mc-p', '99', '--mc-r', '55', 'NoopTask'])
self.assertEqual(MyConfig().mc_p, 99)
self.assertEqual(MyConfig().mc_q, 73)
self.assertEqual(MyConfigWithoutSection().mc_r, 55)
self.assertEqual(MyConfigWithoutSection().mc_s, 99)
def test_use_config_class_2(self):
self.run_and_check(['NoopTask', '--MyConfig-mc-p', '99', '--mc-r', '55'])
self.assertEqual(MyConfig().mc_p, 99)
self.assertEqual(MyConfig().mc_q, 73)
self.assertEqual(MyConfigWithoutSection().mc_r, 55)
self.assertEqual(MyConfigWithoutSection().mc_s, 99)
def test_use_config_class_more_args(self):
self.run_and_check(['--MyConfig-mc-p', '99', '--mc-r', '55', 'NoopTask', '--mc-s', '123', '--MyConfig-mc-q', '42'])
self.assertEqual(MyConfig().mc_p, 99)
self.assertEqual(MyConfig().mc_q, 42)
self.assertEqual(MyConfigWithoutSection().mc_r, 55)
self.assertEqual(MyConfigWithoutSection().mc_s, 123)
@with_config({"MyConfig": {"mc_p": "666", "mc_q": "777"}})
def test_use_config_class_with_configuration(self):
self.run_and_check(['--mc-r', '555', 'NoopTask'])
self.assertEqual(MyConfig().mc_p, 666)
self.assertEqual(MyConfig().mc_q, 777)
self.assertEqual(MyConfigWithoutSection().mc_r, 555)
self.assertEqual(MyConfigWithoutSection().mc_s, 99)
@with_config({"MyConfigWithoutSection": {"mc_r": "999", "mc_s": "888"}})
def test_use_config_class_with_configuration_2(self):
self.run_and_check(['NoopTask', '--MyConfig-mc-p', '222', '--mc-r', '555'])
self.assertEqual(MyConfig().mc_p, 222)
self.assertEqual(MyConfig().mc_q, 73)
self.assertEqual(MyConfigWithoutSection().mc_r, 555)
self.assertEqual(MyConfigWithoutSection().mc_s, 888)
def test_misc_1(self):
class Dogs(luigi.Config):
n_dogs = luigi.IntParameter()
class CatsWithoutSection(luigi.Config):
use_cmdline_section = False
n_cats = luigi.IntParameter()
self.run_and_check(['--n-cats', '123', '--Dogs-n-dogs', '456', 'WithDefault'])
self.assertEqual(Dogs().n_dogs, 456)
self.assertEqual(CatsWithoutSection().n_cats, 123)
self.run_and_check(['WithDefault', '--n-cats', '321', '--Dogs-n-dogs', '654'])
self.assertEqual(Dogs().n_dogs, 654)
self.assertEqual(CatsWithoutSection().n_cats, 321)
def test_global_significant_param(self):
""" We don't want any kind of global param to be positional """
class MyTask(luigi.Task):
# This could typically be called "--test-dry-run"
x_g1 = luigi.Parameter(default='y', is_global=True, significant=True)
self.assertRaises(luigi.parameter.UnknownParameterException,
lambda: MyTask('arg'))
def test_global_insignificant_param(self):
""" We don't want any kind of global param to be positional """
class MyTask(luigi.Task):
# This could typically be "--yarn-pool=development"
x_g2 = luigi.Parameter(default='y', is_global=True, significant=False)
self.assertRaises(luigi.parameter.UnknownParameterException,
lambda: MyTask('arg'))
def test_mixed_params(self):
""" Essentially for what broke in a78338c and was reported in #738 """
class MyTask(luigi.Task):
# This could typically be "--num-threads=True"
x_g3 = luigi.Parameter(default='y', is_global=True)
local_param = luigi.Parameter()
MyTask('setting_local_param')
def test_mixed_params_inheritence(self):
""" A slightly more real-world like test case """
class TaskWithOneGlobalParam(luigi.Task):
non_positional_param = luigi.Parameter(default='y', is_global=True)
class TaskWithOnePositionalParam(TaskWithOneGlobalParam):
""" Try to mess with positional parameters by subclassing """
only_positional_param = luigi.Parameter()
def complete(self):
return True
class PositionalParamsRequirer(luigi.Task):
def requires(self):
return TaskWithOnePositionalParam('only_positional_value')
def run(self):
pass
self.run_and_check(['PositionalParamsRequirer'])
self.run_and_check(['PositionalParamsRequirer', '--non-positional-param', 'z'])
class TestParamWithDefaultFromConfig(unittest.TestCase):
def testNoSection(self):
self.assertRaises(ParameterException, lambda: luigi.Parameter(config_path=dict(section="foo", name="bar")).value)
@with_config({"foo": {}})
def testNoValue(self):
self.assertRaises(ParameterException, lambda: luigi.Parameter(config_path=dict(section="foo", name="bar")).value)
@with_config({"foo": {"bar": "baz"}})
def testDefault(self):
class A(luigi.Task):
p = luigi.Parameter(config_path=dict(section="foo", name="bar"))
self.assertEqual("baz", A().p)
self.assertEqual("boo", A(p="boo").p)
@with_config({"foo": {"bar": "2001-02-03T04"}})
def testDateHour(self):
p = luigi.DateHourParameter(config_path=dict(section="foo", name="bar"))
self.assertEqual(datetime.datetime(2001, 2, 3, 4, 0, 0), p.value)
@with_config({"foo": {"bar": "2001-02-03"}})
def testDate(self):
p = luigi.DateParameter(config_path=dict(section="foo", name="bar"))
self.assertEqual(datetime.date(2001, 2, 3), p.value)
@with_config({"foo": {"bar": "123"}})
def testInt(self):
p = luigi.IntParameter(config_path=dict(section="foo", name="bar"))
self.assertEqual(123, p.value)
@with_config({"foo": {"bar": "true"}})
def testBool(self):
p = luigi.BoolParameter(config_path=dict(section="foo", name="bar"))
self.assertEqual(True, p.value)
@with_config({"foo": {"bar": "2001-02-03-2001-02-28"}})
def testDateInterval(self):
p = luigi.DateIntervalParameter(config_path=dict(section="foo", name="bar"))
expected = luigi.date_interval.Custom.parse("2001-02-03-2001-02-28")
self.assertEqual(expected, p.value)
@with_config({"foo": {"bar": "1 day"}})
def testTimeDelta(self):
p = luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar"))
self.assertEqual(timedelta(days=1), p.value)
@with_config({"foo": {"bar": "2 seconds"}})
def testTimeDeltaPlural(self):
p = luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar"))
self.assertEqual(timedelta(seconds=2), p.value)
@with_config({"foo": {"bar": "3w 4h 5m"}})
def testTimeDeltaMultiple(self):
p = luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar"))
self.assertEqual(timedelta(weeks=3, hours=4, minutes=5), p.value)
@with_config({"foo": {"bar": "P4DT12H30M5S"}})
def testTimeDelta8601(self):
p = luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar"))
self.assertEqual(timedelta(days=4, hours=12, minutes=30, seconds=5), p.value)
@with_config({"foo": {"bar": "P5D"}})
def testTimeDelta8601NoTimeComponent(self):
p = luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar"))
self.assertEqual(timedelta(days=5), p.value)
@with_config({"foo": {"bar": "P5W"}})
def testTimeDelta8601Weeks(self):
p = luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar"))
self.assertEqual(timedelta(weeks=5), p.value)
@with_config({"foo": {"bar": "P3Y6M4DT12H30M5S"}})
def testTimeDelta8601YearMonthNotSupported(self):
def f():
return luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar")).value
self.assertRaises(luigi.parameter.ParameterException, f) # ISO 8601 durations with years or months are not supported
@with_config({"foo": {"bar": "PT6M"}})
def testTimeDelta8601MAfterT(self):
p = luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar"))
self.assertEqual(timedelta(minutes=6), p.value)
@with_config({"foo": {"bar": "P6M"}})
def testTimeDelta8601MBeforeT(self):
def f():
return luigi.TimeDeltaParameter(config_path=dict(section="foo", name="bar")).value
self.assertRaises(luigi.parameter.ParameterException, f) # ISO 8601 durations with months are not supported
def testHasDefaultNoSection(self):
luigi.Parameter(config_path=dict(section="foo", name="bar")).has_value
self.assertFalse(luigi.Parameter(config_path=dict(section="foo", name="bar")).has_value)
@with_config({"foo": {}})
def testHasDefaultNoValue(self):
self.assertFalse(luigi.Parameter(config_path=dict(section="foo", name="bar")).has_value)
@with_config({"foo": {"bar": "baz"}})
def testHasDefaultWithBoth(self):
self.assertTrue(luigi.Parameter(config_path=dict(section="foo", name="bar")).has_value)
@with_config({"foo": {"bar": "one\n\ttwo\n\tthree\n"}})
def testDefaultList(self):
p = luigi.Parameter(is_list=True, config_path=dict(section="foo", name="bar"))
self.assertEqual(('one', 'two', 'three'), p.value)
@with_config({"foo": {"bar": "1\n2\n3"}})
def testDefaultIntList(self):
p = luigi.IntParameter(is_list=True, config_path=dict(section="foo", name="bar"))
self.assertEqual((1, 2, 3), p.value)
@with_config({"foo": {"bar": "baz"}})
def testWithDefault(self):
p = luigi.Parameter(config_path=dict(section="foo", name="bar"), default='blah')
self.assertEqual('baz', p.value) # config overrides default
def testWithDefaultAndMissing(self):
p = luigi.Parameter(config_path=dict(section="foo", name="bar"), default='blah')
self.assertEqual('blah', p.value)
@with_config({"foo": {"bar": "baz"}})
def testGlobal(self):
p = luigi.Parameter(config_path=dict(section="foo", name="bar"), is_global=True, default='blah')
self.assertEqual('baz', p.value)
p.set_global('meh')
self.assertEqual('meh', p.value)
def testGlobalAndMissing(self):
p = luigi.Parameter(config_path=dict(section="foo", name="bar"), is_global=True, default='blah')
self.assertEqual('blah', p.value)
p.set_global('meh')
self.assertEqual('meh', p.value)
@with_config({"A": {"p": "p_default"}})
def testDefaultFromTaskName(self):
class A(luigi.Task):
p = luigi.Parameter()
self.assertEqual("p_default", A().p)
self.assertEqual("boo", A(p="boo").p)
@with_config({"A": {"p": "999"}})
def testDefaultFromTaskNameInt(self):
class A(luigi.Task):
p = luigi.IntParameter()
self.assertEqual(999, A().p)
self.assertEqual(777, A(p=777).p)
@with_config({"A": {"p": "p_default"}, "foo": {"bar": "baz"}})
def testDefaultFromConfigWithTaskNameToo(self):
class A(luigi.Task):
p = luigi.Parameter(config_path=dict(section="foo", name="bar"))
self.assertEqual("p_default", A().p)
self.assertEqual("boo", A(p="boo").p)
@with_config({"A": {"p": "p_default_2"}})
def testDefaultFromTaskNameWithDefault(self):
class A(luigi.Task):
p = luigi.Parameter(default="banana")
self.assertEqual("p_default_2", A().p)
self.assertEqual("boo_2", A(p="boo_2").p)
@with_config({"MyClass": {"p_wohoo": "p_default_3"}})
def testWithLongParameterName(self):
class MyClass(luigi.Task):
p_wohoo = luigi.Parameter(default="banana")
self.assertEqual("p_default_3", MyClass().p_wohoo)
self.assertEqual("boo_2", MyClass(p_wohoo="boo_2").p_wohoo)
@with_config({"RangeDaily": {"days_back": "123"}})
def testSettingOtherMember(self):
class A(luigi.Task):
pass
self.assertEqual(123, luigi.tools.range.RangeDaily(of=A).days_back)
self.assertEqual(70, luigi.tools.range.RangeDaily(of=A, days_back=70).days_back)
class OverrideEnvStuff(unittest.TestCase):
def setUp(self):
env_params_cls = luigi.interface.core
env_params_cls.scheduler_port.reset_global()
@with_config({"core": {"default-scheduler-port": '6543'}})
def testOverrideSchedulerPort(self):
env_params = luigi.interface.core()
self.assertEqual(env_params.scheduler_port, 6543)
@with_config({"core": {"scheduler-port": '6544'}})
def testOverrideSchedulerPort2(self):
env_params = luigi.interface.core()
self.assertEqual(env_params.scheduler_port, 6544)
@with_config({"core": {"scheduler_port": '6545'}})
def testOverrideSchedulerPort3(self):
env_params = luigi.interface.core()
self.assertEqual(env_params.scheduler_port, 6545)
if __name__ == '__main__':
luigi.run(use_optparse=True)
|
17zuoye/luigi
|
test/parameter_test.py
|
Python
|
apache-2.0
| 25,868
|
import copy
from threading import Lock
from .metrics_core import Metric
class CollectorRegistry(object):
"""Metric collector registry.
Collectors must have a no-argument method 'collect' that returns a list of
Metric objects. The returned metrics should be consistent with the Prometheus
exposition formats.
"""
def __init__(self, auto_describe=False):
self._collector_to_names = {}
self._names_to_collectors = {}
self._auto_describe = auto_describe
self._lock = Lock()
def register(self, collector):
"""Add a collector to the registry."""
with self._lock:
names = self._get_names(collector)
duplicates = set(self._names_to_collectors).intersection(names)
if duplicates:
raise ValueError(
'Duplicated timeseries in CollectorRegistry: {0}'.format(
duplicates))
for name in names:
self._names_to_collectors[name] = collector
self._collector_to_names[collector] = names
def unregister(self, collector):
"""Remove a collector from the registry."""
with self._lock:
for name in self._collector_to_names[collector]:
del self._names_to_collectors[name]
del self._collector_to_names[collector]
def _get_names(self, collector):
"""Get names of timeseries the collector produces."""
desc_func = None
# If there's a describe function, use it.
try:
desc_func = collector.describe
except AttributeError:
pass
# Otherwise, if auto describe is enabled use the collect function.
if not desc_func and self._auto_describe:
desc_func = collector.collect
if not desc_func:
return []
result = []
type_suffixes = {
'counter': ['_total', '_created'],
'summary': ['', '_sum', '_count', '_created'],
'histogram': ['_bucket', '_sum', '_count', '_created'],
'gaugehistogram': ['_bucket', '_gsum', '_gcount'],
'info': ['_info'],
}
for metric in desc_func():
for suffix in type_suffixes.get(metric.type, ['']):
result.append(metric.name + suffix)
return result
def collect(self):
"""Yields metrics from the collectors in the registry."""
collectors = None
with self._lock:
collectors = copy.copy(self._collector_to_names)
for collector in collectors:
for metric in collector.collect():
yield metric
def restricted_registry(self, names):
"""Returns object that only collects some metrics.
Returns an object which upon collect() will return
only samples with the given names.
Intended usage is:
generate_latest(REGISTRY.restricted_registry(['a_timeseries']))
Experimental."""
names = set(names)
collectors = set()
with self._lock:
for name in names:
if name in self._names_to_collectors:
collectors.add(self._names_to_collectors[name])
metrics = []
for collector in collectors:
for metric in collector.collect():
samples = [s for s in metric.samples if s[0] in names]
if samples:
m = Metric(metric.name, metric.documentation, metric.type)
m.samples = samples
metrics.append(m)
class RestrictedRegistry(object):
def collect(self):
return metrics
return RestrictedRegistry()
def get_sample_value(self, name, labels=None):
"""Returns the sample value, or None if not found.
This is inefficient, and intended only for use in unittests.
"""
if labels is None:
labels = {}
for metric in self.collect():
for s in metric.samples:
if s.name == name and s.labels == labels:
return s.value
return None
REGISTRY = CollectorRegistry(auto_describe=True)
|
cloudera/hue
|
desktop/core/ext-py/prometheus_client-0.7.1/prometheus_client/registry.py
|
Python
|
apache-2.0
| 4,226
|
"""Auto-generated file, do not edit by hand. 882 metadata"""
from phonenumbers.phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_882 = PhoneMetadata(id='001', country_code=882, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='\\d{9}', possible_length=(9,)),
mobile=PhoneNumberDesc(national_number_pattern='\\d{9}', example_number='123456789', possible_length=(9,)),
number_format=[NumberFormat(pattern='(\\d)(\\d{4})(\\d{4})', format='\\1 \\2 \\3')])
|
gencer/python-phonenumbers
|
python/tests/testdata/region_882.py
|
Python
|
apache-2.0
| 527
|
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "my-django-skeleton",
version = "0.1",
url = 'http://github.com/lemonad/my-django-skeleton',
license = 'BSD',
description = "A skeleton setup for starting a django project with virtualenv and buildout.",
long_description = read('README'),
author = 'Jonas Nockert',
author_email = 'jonasnockert@gmail.com',
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools'],
classifiers = [
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
]
)
|
lemonad/my-django-skeleton
|
setup.py
|
Python
|
bsd-2-clause
| 930
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.utils import timezone
from django.test import TestCase
from django.contrib.auth.models import User
from django.test.utils import override_settings
import six
from happenings.models import Event
@override_settings(CALENDAR_SHOW_LIST=True)
class SetMeUp(TestCase):
@classmethod
def setUpClass(cls):
super(SetMeUp, cls).setUpClass()
cls.user = User.objects.create_user(
'foo', 'bar@example.com', 'secret'
)
html = '">%d</a><a class='
cls.cal_str = lambda self, day: html % day
cls.event_div = '<div class="calendar-event">'
@classmethod
def tearDownClass(cls):
cls.user.delete()
def clean_whitespace(self, response):
"""Remove all newlines and all occurances of multiple spaces."""
if hasattr(response, 'content'):
is_response = True
content = response.content
else:
is_response = False
content = response
if isinstance(content, six.text_type):
content = content.encode('utf-8')
content = content.replace(b'\n', b'')
for num_spaces in range(7, 2, -1):
# reduce all multiple spaces to 2 spaces.
# We can process here only `num_spaces=3` with the same result, but it will be slower
while content.find(b' '*num_spaces) >= 0:
content = content.replace(b' '*num_spaces, b' '*2)
content = content.replace(b' '*2, b'')
if is_response:
response.content = content
else:
content = content.decode('utf-8')
return content
def create_event(created_by, title, description, all_day=False,
start_date=None, end_date=None, categories=None, tags=None,
repeat='NEVER', end_repeat=None, full=True, utc=False):
"""
A factory method for creating events. If start_date is supplied,
end_date must also be supplied, and they must both be either lists
or tuples e.g. start_date=[2014, 2, 2], end_date=[2014, 2, 3].
"""
if start_date and end_date:
# Set the start and end dates to local tz
if utc:
val = timezone.utc
else:
val = timezone.get_default_timezone()
start_date = timezone.make_aware(datetime.datetime(*start_date), val)
end_date = timezone.make_aware(datetime.datetime(*end_date), val)
elif start_date and not end_date or end_date and not start_date:
raise ValueError("Both start_date and end_date must be supplied or not"
" supplied at all when using create_event")
else:
start_date = timezone.now()
end_date = timezone.now()
event = Event.objects.create(
start_date=start_date,
end_date=end_date,
all_day=all_day,
created_by=created_by,
title=title,
description=description,
repeat=repeat,
end_repeat=end_repeat
)
if categories:
for category in categories:
event.categories.create(title=category)
if tags:
for tag in tags:
event.tags.create(name=tag)
if full:
event.full_clean()
event.save()
return event
|
imposeren/django-happenings
|
tests/integration_tests/event_factory.py
|
Python
|
bsd-2-clause
| 3,323
|
#
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1.type import char
from pyasn1.type import namedtype
from pyasn1.type import univ
from pyasn1_modules import pem
from pyasn1_modules import rfc5652
from pyasn1_modules import rfc6402
class BackwardCompatibilityTestCase(unittest.TestCase):
pem_text = """\
MIIEJQYJKoZIhvcNAQcCoIIEFjCCBBICAQMxCzAJBgUrDgMCGgUAMIIDAgYIKwYBBQUHDAKgggL0
BIIC8DCCAuwweDB2AgECBgorBgEEAYI3CgoBMWUwYwIBADADAgEBMVkwVwYJKwYBBAGCNxUUMUow
SAIBBQwZcGl0dWNoYTEuZW1lYS5ocHFjb3JwLm5ldAwMRU1FQVxwaXR1Y2hhDBpDTUNSZXFHZW5l
cmF0b3IudnNob3N0LmV4ZTCCAmqgggJmAgEBMIICXzCCAcgCAQAwADCBnzANBgkqhkiG9w0BAQEF
AAOBjQAwgYkCgYEA0jm7SSSm2wyEAzuNKtFZFJKo91SrJq9wQwEhEKHDavZwMQOm1rZ2PF8NWCEb
PqrhToQ7rtiGLSZa4dF4bzgmBqQ9aoSfEX4jISt31Vy+skHidXjHHpbsjT24NPhrZgANivL7CxD6
Ft+s7qS1gL4HRm2twQkqSwOLrE/q2QeXl2UCAwEAAaCCAR0wGgYKKwYBBAGCNw0CAzEMFgo2LjIu
OTIwMC4yMD4GCSqGSIb3DQEJDjExMC8wHQYDVR0OBBYEFMW2skn88gxhONWZQA4sWGBDb68yMA4G
A1UdDwEB/wQEAwIHgDBXBgkrBgEEAYI3FRQxSjBIAgEFDBlwaXR1Y2hhMS5lbWVhLmhwcWNvcnAu
bmV0DAxFTUVBXHBpdHVjaGEMGkNNQ1JlcUdlbmVyYXRvci52c2hvc3QuZXhlMGYGCisGAQQBgjcN
AgIxWDBWAgECHk4ATQBpAGMAcgBvAHMAbwBmAHQAIABTAHQAcgBvAG4AZwAgAEMAcgB5AHAAdABv
AGcAcgBhAHAAaABpAGMAIABQAHIAbwB2AGkAZABlAHIDAQAwDQYJKoZIhvcNAQEFBQADgYEAJZlu
mxjtCxSOQi27jsVdd3y8NSIlzNv0b3LqmzvAly6L+CstXcnuG2MPQqPH9R7tbJonGUniBQO9sQ7C
KhYWj2gfhiEkSID82lV5chINVUFKoUlSiEhWr0tPGgvOaqdsKQcrHfzrsBbFkhDqrFSVy7Yivbnh
qYszKrOjJKiiCPMwADAAMYH5MIH2AgEDgBTFtrJJ/PIMYTjVmUAOLFhgQ2+vMjAJBgUrDgMCGgUA
oD4wFwYJKoZIhvcNAQkDMQoGCCsGAQUFBwwCMCMGCSqGSIb3DQEJBDEWBBTFTkK/OifaFjwqHiJu
xM7qXcg/VzANBgkqhkiG9w0BAQEFAASBgKfC6jOi1Wgy4xxDCQVK9+e5tktL8wE/j2cb9JSqq+aU
5UxEgXEw7q7BoYZCAzcxMRriGzakXr8aXHcgkRJ7XcFvLPUjpmGg9SOZ2sGW4zQdWAwImN/i8loc
xicQmJP+VoMHo/ZpjFY9fYCjNZUArgKsEwK/s+p9yrVVeB1Nf8Mn
"""
def testDerCodec(self):
layers = { }
layers.update(rfc5652.cmsContentTypesMap)
getNextLayer = {
rfc5652.id_ct_contentInfo: lambda x: x['contentType'],
rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContentType'],
rfc6402.id_cct_PKIData: lambda x: None
}
getNextSubstrate = {
rfc5652.id_ct_contentInfo: lambda x: x['content'],
rfc5652.id_signedData: lambda x: x['encapContentInfo']['eContent'],
rfc6402.id_cct_PKIData: lambda x: None
}
substrate = pem.readBase64fromText(self.pem_text)
next_layer = rfc5652.id_ct_contentInfo
while next_layer:
asn1Object, rest = der_decoder(substrate, asn1Spec=layers[next_layer])
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
substrate = getNextSubstrate[next_layer](asn1Object)
next_layer = getNextLayer[next_layer](asn1Object)
def testOpenTypes(self):
class ClientInformation(univ.Sequence):
pass
ClientInformation.componentType = namedtype.NamedTypes(
namedtype.NamedType('clientId', univ.Integer()),
namedtype.NamedType('MachineName', char.UTF8String()),
namedtype.NamedType('UserName', char.UTF8String()),
namedtype.NamedType('ProcessName', char.UTF8String())
)
class EnrollmentCSP(univ.Sequence):
pass
EnrollmentCSP.componentType = namedtype.NamedTypes(
namedtype.NamedType('KeySpec', univ.Integer()),
namedtype.NamedType('Name', char.BMPString()),
namedtype.NamedType('Signature', univ.BitString())
)
openTypeMap = {
# attributes
univ.ObjectIdentifier('1.3.6.1.4.1.311.13.2.3'): char.IA5String(),
univ.ObjectIdentifier('1.3.6.1.4.1.311.13.2.2'): EnrollmentCSP(),
univ.ObjectIdentifier('1.3.6.1.4.1.311.21.20'): ClientInformation(),
# algorithm identifier parameters
univ.ObjectIdentifier('1.2.840.113549.1.1.1'): univ.Null(""),
univ.ObjectIdentifier('1.2.840.113549.1.1.5'): univ.Null(""),
univ.ObjectIdentifier('1.2.840.113549.1.1.11'): univ.Null(""),
}
openTypeMap.update(rfc5652.cmsAttributesMap)
openTypeMap.update(rfc6402.cmcControlAttributesMap)
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=rfc5652.ContentInfo(), decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
eci = asn1Object['content']['encapContentInfo']
self.assertEqual(rfc6402.id_cct_PKIData, eci['eContentType'])
substrate = eci['eContent']
asn1Object, rest = der_decoder(
substrate, asn1Spec=rfc6402.PKIData(), openTypes=openTypeMap,
decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
for req in asn1Object['reqSequence']:
cr = req['tcr']['certificationRequest']
sig_alg = cr['signatureAlgorithm']
self.assertIn(sig_alg['algorithm'], openTypeMap)
self.assertEqual(univ.Null(""), sig_alg['parameters'])
cri = cr['certificationRequestInfo']
spki_alg = cri['subjectPublicKeyInfo']['algorithm']
self.assertIn(spki_alg['algorithm'], openTypeMap)
self.assertEqual(univ.Null(""), spki_alg['parameters'])
attrs = cr['certificationRequestInfo']['attributes']
for attr in attrs:
self.assertIn( attr['attrType'], openTypeMap)
if attr['attrType'] == univ.ObjectIdentifier('1.3.6.1.4.1.311.13.2.3'):
self.assertEqual("6.2.9200.2", attr['attrValues'][0])
else:
self.assertTrue(attr['attrValues'][0].hasValue())
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
|
etingof/pyasn1-modules
|
tests/test_rfc6402.py
|
Python
|
bsd-2-clause
| 6,395
|
#!/usr/bin/env python
from __future__ import print_function
from builtins import input
import sys
import scipy
import numpy
import pmagpy.pmagplotlib as pmagplotlib
def main():
"""
NAME
plot_2cdfs.py
DESCRIPTION
makes plots of cdfs of data in input file
SYNTAX
plot_2cdfs.py [-h][command line options]
OPTIONS
-h prints help message and quits
-f FILE1 FILE2
-t TITLE
-fmt [svg,eps,png,pdf,jpg..] specify format of output figure, default is svg
"""
fmt='svg'
title=""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
X=numpy.loadtxt(file)
file=sys.argv[ind+2]
X2=numpy.loadtxt(file)
# else:
# X=numpy.loadtxt(sys.stdin,dtype=numpy.float)
else:
print('-f option required')
print(main.__doc__)
sys.exit()
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-t' in sys.argv:
ind=sys.argv.index('-t')
title=sys.argv[ind+1]
CDF={'X':1}
pmagplotlib.plot_init(CDF['X'],5,5)
pmagplotlib.plot_cdf(CDF['X'],X,'','r','')
pmagplotlib.plot_cdf(CDF['X'],X2,title,'b','')
D,p=scipy.stats.ks_2samp(X,X2)
if p>=.05:
print(D,p,' not rejected at 95%')
else:
print(D,p,' rejected at 95%')
pmagplotlib.draw_figs(CDF)
ans= input('S[a]ve plot, <Return> to quit ')
if ans=='a':
files={'X':'CDF_.'+fmt}
pmagplotlib.save_plots(CDF,files)
if __name__ == "__main__":
main()
|
lfairchild/PmagPy
|
programs/plot_2cdfs.py
|
Python
|
bsd-3-clause
| 1,652
|
import unittest
from distutils.errors import CompileError
from pythran.tests import TestFromDir
import os
import pythran
from pythran.syntax import PythranSyntaxError
from pythran.spec import Spec
class TestOpenMP(TestFromDir):
path = os.path.join(os.path.dirname(__file__), "openmp")
class TestOpenMP4(TestFromDir):
path = os.path.join(os.path.dirname(__file__), "openmp.4")
@staticmethod
def interface(name, file=None):
return Spec({name: []})
@staticmethod
def extract_runas(name, filepath):
return ['#runas {}()'.format(name)]
class TestOpenMPLegacy(TestFromDir):
'''
Test old style OpenMP constructs, not using comments but strings
and relying on function-scope locals
'''
path = os.path.join(os.path.dirname(__file__), "openmp.legacy")
@staticmethod
def interface(name, file=None):
return Spec({name: []})
@staticmethod
def extract_runas(name, filepath):
return ['#runas {}()'.format(name)]
# only activate OpenMP tests if the underlying compiler supports OpenMP
try:
pythran.compile_cxxcode("omp", '#include <omp.h>',
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'])
import omp
if '-fopenmp' in pythran.config.cfg.get('compiler', 'ldflags'):
TestOpenMP4.populate(TestOpenMP4)
TestOpenMP.populate(TestOpenMP)
TestOpenMPLegacy.populate(TestOpenMPLegacy)
except PythranSyntaxError:
raise
except (CompileError, ImportError):
pass
if __name__ == '__main__':
unittest.main()
|
serge-sans-paille/pythran
|
pythran/tests/test_openmp.py
|
Python
|
bsd-3-clause
| 1,597
|
import csv
import os
from datetime import datetime
import logging
import re
from dipper.sources.PostgreSQLSource import PostgreSQLSource
from dipper.models.assoc.Association import Assoc
from dipper.models.assoc.G2PAssoc import G2PAssoc
from dipper.models.Genotype import Genotype
from dipper.models.Reference import Reference
from dipper.models.Model import Model
from dipper import config
from dipper.models.GenomicFeature import Feature, makeChromID
LOG = logging.getLogger(__name__)
class MGI(PostgreSQLSource):
"""
This is the
[Mouse Genome Informatics](http://www.informatics.jax.org/) resource,
from which we process genotype and phenotype data about laboratory mice.
Genotypes leverage the GENO genotype model.
Here, we connect to their public database, and download a subset of
tables/views to get specifically at the geno-pheno data,
then iterate over the tables. We end up effectively performing joins
when adding nodes to the graph.
In order to use this parser, you will need to have user/password connection
details in your conf.yaml file, like:
dbauth : {'mgi' : {'user' : '<username>', 'password' : '<password>'}}
You can request access by contacting mgi-help@jax.org
"""
# CONSIDER IF WE NEED:
# mgi_organism_acc_view:
# Consider using this for the taxon mapping instead of
# the hashmap encoded below
# mgi_reference_allele_view:
# Don't believe this view is used in either
# the genotype of phenotype view
# all_allele_cellline_view: When we want to start dealing with cell lines
# mgi_note_strain_view: prose descriptions of strains.
# prb_strain_summary_view:
# Don't believe this view is used in
# either the genotype of phenotype view
# prb_strain_marker_view:
# eventually i think we want this because
# it has other relevant markers that are affected
resources = {
'query_map': [
{
'query': '../../resources/sql/mgi/mgi_dbinfo.sql',
'outfile': 'mgi_dbinfo',
'Force': True
},
{
'query': '../../resources/sql/mgi/gxd_genotype_view.sql',
'outfile': 'gxd_genotype_view'
},
{
'query': '../../resources/sql/mgi/gxd_genotype_summary_view.sql',
'outfile': 'gxd_genotype_summary_view'
},
{
'query': '../../resources/sql/mgi/gxd_allelepair_view.sql',
'outfile': 'gxd_allelepair_view'
},
{
'query': '../../resources/sql/mgi/all_summary_view.sql',
'outfile': 'all_summary_view'
},
{
'query': '../../resources/sql/mgi/all_allele_view.sql',
'outfile': 'all_allele_view'
},
{
'query': '../../resources/sql/mgi/all_allele_mutation_view.sql',
'outfile': 'all_allele_mutation_view'
},
{
'query': '../../resources/sql/mgi/mrk_marker_view.sql',
'outfile': 'mrk_marker_view'
},
{
'query': '../../resources/sql/mgi/voc_annot_view.sql',
'outfile': 'voc_annot_view'
},
{
'query': '../../resources/sql/mgi/evidence.sql',
'outfile': 'evidence_view'
},
{
'query': '../../resources/sql/mgi/bib_acc_view.sql',
'outfile': 'bib_acc_view'
},
{
'query': '../../resources/sql/mgi/prb_strain_view.sql',
'outfile': 'prb_strain_view'
},
{
'query': '../../resources/sql/mgi/mrk_summary_view.sql',
'outfile': 'mrk_summary_view'
},
{
'query': '../../resources/sql/mgi/mrk_acc_view.sql',
'outfile': 'mrk_acc_view'
},
{
'query': '../../resources/sql/mgi/prb_strain_acc_view.sql',
'outfile': 'prb_strain_acc_view'
},
{
'query': '../../resources/sql/mgi/prb_strain_genotype_view.sql',
'outfile': 'prb_strain_genotype_view'
},
{
'query': '../../resources/sql/mgi/mgi_note_vocevidence_view.sql',
'outfile': 'mgi_note_vocevidence_view'
},
{
'query': '../../resources/sql/mgi/mgi_note_allele_view.sql',
'outfile': 'mgi_note_allele_view'
},
{
'query': '../../resources/sql/mgi/mrk_location_cache.sql',
'outfile': 'mrk_location_cache' # gene locations
}
],
'test_keys': '../../resources/mgi_test_keys.yaml'
}
# with an existing set of (fresh) files in the shell; we can get a head start with:
# for v in raw/mgi/*;do echo -e "\t\t'${v##*/}': \
# {\n\t\t\t'columns': [";head -1 $v|tr '\t' '\n'|sed "s/\(.*\)/\t\t\t\t'\1',/";done
tables = {
'all_allele_mutation_view': {
'columns': [
'_allele_key',
'mutation']},
'all_allele_view': {
'columns': [
'_allele_key',
'_marker_key',
'_strain_key',
'symbol',
'name',
'iswildtype']},
'all_summary_view': {
'columns': [
'_object_key',
'preferred',
'mgiid',
'description',
'short_description']},
'bib_acc_view': {
'columns': [
'accid',
'prefixpart',
'numericpart',
'_object_key',
'logicaldb',
'_logicaldb_key']},
'evidence_view': {
'columns': [
'_annotevidence_key',
'_annot_key',
'evidencecode',
'jnumid',
'term',
'value',
'annottype']},
'gxd_allelepair_view': {
'columns': [
'_allelepair_key',
'_genotype_key',
'_allele_key_1',
'_allele_key_2',
'allele1',
'allele2',
'allelestate']},
'gxd_genotype_summary_view': {
'columns': [
'_object_key',
'preferred',
'mgiid',
'subtype',
'short_description']},
'gxd_genotype_view': {
'columns': [
'_genotype_key',
'_strain_key',
'strain',
'mgiid']},
'mgi_note_allele_view': {
'columns': [
'_object_key',
'notetype',
'note',
'sequencenum']},
'mgi_note_vocevidence_view': {
'columns': [
'_object_key',
'note']},
'mgi_relationship_transgene_genes': {
'columns': [
'rel_key',
'object_1',
'allele_id',
'allele_label',
'category_key',
'category_name',
'property_key',
'property_name',
'property_value']},
'mrk_acc_view': {
'columns': [
'accid',
'prefixpart',
'_logicaldb_key',
'_object_key',
'preferred',
'_organism_key']},
'mrk_location_cache': {
'columns': [
'_marker_key',
'_organism_key',
'chromosome',
'startcoordinate',
'endcoordinate',
'strand',
'version']},
'mrk_marker_view': {
'columns': [
'_marker_key',
'_organism_key',
'_marker_status_key',
'symbol',
'name',
'latinname',
'markertype']},
'mrk_summary_view': {
'columns': [
'accid',
'_logicaldb_key',
'_object_key',
'preferred',
'mgiid',
'subtype',
'short_description']},
'prb_strain_acc_view': {
'columns': [
'accid',
'prefixpart',
'_logicaldb_key',
'_object_key',
'preferred']},
'prb_strain_genotype_view': {
'columns': [
'_strain_key',
'_genotype_key']},
'prb_strain_view': {
'columns': [
'_strain_key',
'strain',
'species']},
'voc_annot_view': {
'columns': [
'_annot_key',
'annottype',
'_object_key',
'_term_key',
'_qualifier_key',
'qualifier',
'term',
'accid']},
}
# For ambiguous/undefined taxa terms that will
# conflict with seq alt_type portion of local tt
unknown_taxa = [
'Not Applicable',
'Not Specified',
]
# for testing purposes, this is a list of internal db keys
# to match and select only portions of the source
def __init__(
self,
graph_type,
are_bnodes_skolemized,
data_release_version=None
):
super().__init__(
graph_type=graph_type,
are_bnodes_skolemized=are_bnodes_skolemized,
data_release_version=data_release_version,
name='mgi',
ingest_title='Mouse Genome Informatics',
ingest_url='http://www.informatics.jax.org/',
ingest_logo="source-mgi.png",
license_url=None,
data_rights='http://www.informatics.jax.org/mgihome/other/copyright.shtml',
file_handle=None)
# so that we don't have to deal with BNodes,
# we will create hash lookups
# for the internal identifiers the hash will hold
# the type-specific-object-keys to MGI public identifiers.
# then, subsequent views of the table will lookup the identifiers
# in the hash. this allows us to do the 'joining' on the fly
self.idhash = {
'allele': {}, 'marker': {}, 'publication': {}, 'strain': {},
'genotype': {}, 'annot': {}, 'notes': {}, 'seqalt': {}}
# to store if a marker is a class or indiv
self.markers = {
'classes': [], 'indiv': []}
# use this to store internally generated labels for various features
self.label_hash = {}
# use this to store the genotype strain ids
# for building genotype labels
self.geno_bkgd = {}
self.strain_to_genotype_map = {}
self.wildtype_alleles = set()
# also add the gene ids from the test_ids
# in order to capture transgenes of the test set
if 'gene' in self.all_test_ids:
self.test_ids = self.all_test_ids['gene']
else:
LOG.warning("not configured with gene test ids.")
self.test_ids = []
self.test_keys = self.open_and_parse_yaml(self.resources['test_keys'])
def fetch(self, is_dl_forced=False):
"""
For the MGI resource, we connect to the remote database,
and pull the tables into local files.
We'll check the local table versions against the remote version
:return:
"""
# check if config exists; if it doesn't, error out and let user know
if 'dbauth' not in config.get_config() and 'mgi' \
not in config.get_config()['dbauth']:
LOG.error("not configured with PG user/password.")
# create the connection details for MGI
cxn = config.get_config()['dbauth']['mgi']
pg_iri = ''.join((
'jdbc:postgresql://', cxn['host'], ':', str(cxn['port']), '/',
cxn['database']))
self.dataset.set_ingest_source(pg_iri)
self.dataset.set_ingest_source_file_version_retrieved_on(
pg_iri,
datetime.today().strftime('%Y-%m-%d'))
# process the tables
# self.fetch_from_pgdb(self.tables, cxn, 100) # for testing only
# self.fetch_from_pgdb(self.tables, cxn, None, is_dl_forced)
for query_map in self.resources['query_map']:
query_fh = open(os.path.join(
os.path.dirname(__file__), query_map['query']), 'r')
query = query_fh.read()
# force = False
# if 'Force' in query_map: # unused
# force = query_map['Force']
self.fetch_query_from_pgdb(
query_map['outfile'], query, None, cxn)
# always get this - it has the verion info
self.fetch_transgene_genes_from_db(cxn)
datestamp = ver = None
# get the resource version information from
# table mgi_dbinfo, already fetched above
outfile = '/'.join((self.rawdir, 'mgi_dbinfo'))
if os.path.exists(outfile):
with open(outfile, 'r') as reader:
reader.readline() # read the header row; skip
info = reader.readline()
cols = info.split('\t')
ver = cols[0] # col 0 is public_version
ver = ver.replace('MGI ', '') # MGI 5.20 --> 5.20
# MGI has a datestamp for the data within the database;
# use it instead of the download date
# datestamp in the table: 2014-12-23 00:14:20[.12345]
# modification date without micro seconds
dat = cols[1].strip().split('.')[0]
datestamp = datetime.strptime(
dat, "%Y-%m-%d %H:%M:%S").strftime("%Y-%m-%d")
self.dataset.set_ingest_source_file_version_num(pg_iri, ver)
self.dataset.set_ingest_source_file_version_date(pg_iri, datestamp)
def parse(self, limit=None):
"""
We process each of the postgres tables in turn.
The order of processing is important here, as we build
up a hashmap of internal vs external identifers
(unique keys by type to MGI id). These include allele, marker (gene),
publication, strain, genotype, annotation (association),
and descriptive notes.
:param limit: Only parse this many rows in each table
:return:
"""
if limit is not None:
LOG.info("Only parsing first %d rows of each file", limit)
LOG.info("Parsing files...")
if self.test_only:
self.test_mode = True
# the following will provide us the hash-lookups
# These must be processed in a specific order
self._process_prb_strain_acc_view(limit)
self._process_mrk_acc_view()
self._process_all_summary_view(limit)
self._process_bib_acc_view(limit)
self._process_gxd_genotype_summary_view(limit)
# The following will use the hash populated above
# to lookup the ids when filling in the graph
self._process_prb_strain_view(limit)
# self._process_prb_strain_genotype_view(limit)
self._process_gxd_genotype_view(limit)
self._process_mrk_marker_view(limit)
self._process_mrk_acc_view_for_equiv(limit)
self._process_mrk_summary_view(limit)
self._process_all_allele_view(limit)
self._process_all_allele_mutation_view(limit)
self._process_gxd_allele_pair_view(limit)
self._process_voc_annot_view(limit)
self._process_evidence_view(limit)
self._process_mgi_note_vocevidence_view(limit)
self._process_mrk_location_cache(limit)
self.process_mgi_relationship_transgene_genes(limit)
self.process_mgi_note_allele_view(limit)
LOG.info("Finished parsing.")
LOG.info("Loaded %d nodes", len(self.graph))
def fetch_transgene_genes_from_db(self, cxn):
"""
This is a custom query to fetch the non-mouse genes that
are part of transgene alleles.
:param cxn:
:return:
"""
query = '''
SELECT r._relationship_key as rel_key,
r._object_key_1 as object_1,
a.accid as allele_id,
alabel.label as allele_label,
rc._category_key as category_key,
rc.name as category_name,
t._term_key as property_key,
t.term as property_name,
rp.value as property_value
FROM mgi_relationship r
JOIN mgi_relationship_category rc ON r._category_key = rc._category_key
JOIN acc_accession a ON r._object_key_1 = a._object_key
AND rc._mgitype_key_1 = a._mgitype_key
AND a._logicaldb_key = 1
JOIN all_label alabel ON a._object_key = alabel._allele_key
AND alabel._label_status_key = 1
AND alabel.priority = 1
JOIN mgi_relationship_property rp ON r._relationship_key = rp._relationship_key
AND rp._propertyname_key = 12948292
JOIN voc_term t ON rp._propertyname_key = t._term_key
WHERE r._category_key = 1004
'''
self.fetch_query_from_pgdb(
'mgi_relationship_transgene_genes', query, None, cxn)
def _process_gxd_genotype_view(self, limit=None):
"""
This table indicates the relationship between a genotype
and it's background strain. It leverages the Genotype class methods
to do this.
Makes these triples:
<MGI:genotypeid> GENO:has_reference_part <MGI:strainid>
<MGI:strainid> a GENO:genomic_background
If the genotype id isn't in the hashmap, it adds it here
(but this shouldn't happen):
<MGI:genotypeid> a GENO:genotype
If the strain isn't in the hashmap, it also adds it here with a
monarchized identifier using the unique key of the strain,
formatted like: :_mgistrainkey12345
:param limit:
:return:
"""
src_key = 'gxd_genotype_view'
line_num = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
geno = Genotype(graph)
model = Model(graph)
col = self.tables[src_key]['columns']
raw = '/'.join((self.rawdir, src_key))
LOG.info("getting genotypes and their backgrounds")
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
row = line.split('\t')
line_num += 1
genotype_key = row[col.index('_genotype_key')].strip()
strain_key = row[col.index('_strain_key')].strip()
strain = row[col.index('strain',)].strip()
mgiid = row[col.index('mgiid')].strip()
if self.test_mode is True:
if int(genotype_key) not in self.test_keys.get('genotype'):
continue
if self.idhash['genotype'].get(genotype_key) is None:
# just in case we haven't seen it before,
# catch and add the id mapping here
self.idhash['genotype'][genotype_key] = mgiid
geno.addGenotype(mgiid, None)
# the label is elsewhere...
# need to add the MGI label as a synonym
# if it's in the hash,
# assume that the individual was created elsewhere
strain_id = self.idhash['strain'].get(strain_key)
background_type = self.globaltt['genomic_background']
if strain_id is None or int(strain_key) < 0:
if strain_id is None:
# some of the strains don't have public identifiers!
# so we make one up, and add it to the hash
strain_id = self._make_internal_identifier('strain', strain_key)
self.idhash['strain'].update({strain_key: strain_id})
model.addComment(strain_id, "strain_key:" + strain_key)
elif int(strain_key) < 0:
# these are ones that are unidentified/unknown.
# so add instances of each.
strain_id = self._make_internal_identifier(
'strain', re.sub(r':', '', str(strain_id)))
strain_id += re.sub(r':', '', str(mgiid))
strain_id = re.sub(r'^_', '_:', strain_id)
strain_id = re.sub(r'::', ':', strain_id)
model.addDescription(
strain_id,
"This genomic background is unknown. " +
"This is a placeholder background for " +
mgiid + "."
)
background_type = self.globaltt[
'unspecified_genomic_background']
# add it back to the idhash
LOG.info(
"adding background as internal id: %s %s: %s",
strain_key, strain, strain_id)
geno.addGenomicBackgroundToGenotype(
strain_id, mgiid, background_type)
self.label_hash[strain_id] = strain
# add BG to a hash so we can build the genotype label later
self.geno_bkgd[mgiid] = strain_id
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_gxd_genotype_summary_view(self, limit=None):
"""
Add the genotype internal id to mgiid mapping to the idhashmap.
Also, add them as individuals to the graph.
We re-format the label to put the background strain in brackets
after the gvc.
We must pass through the file once to get the ids and
aggregate the vslcs into a hashmap into the genotype
Triples created:
<genotype id> a GENO:intrinsic_genotype
<genotype id> rdfs:label "<gvc> [bkgd]"
:param limit:
:return:
"""
src_key = 'gxd_genotype_summary_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
geno_hash = {}
raw = '/'.join((self.rawdir, src_key))
LOG.info("building labels for genotypes")
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
mgiid = row[col.index('mgiid')].strip()
subtype = row[col.index('subtype')].strip()
short_description = row[col.index('short_description')].strip()
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('genotype'):
continue
# add the internal genotype to mgi mapping
self.idhash['genotype'][object_key] = mgiid
if preferred == '1':
d = re.sub(r'\,', '/', short_description.strip())
if mgiid not in geno_hash:
geno_hash[mgiid] = {'vslcs': [d], 'subtype': subtype,
'key': object_key}
else:
vslcs = geno_hash[mgiid].get('vslcs')
vslcs.append(d)
else:
pass
# TODO what to do with != preferred
if not self.test_mode and limit is not None and line_num > limit:
break
# now, loop through the hash and add the genotypes as individuals
# we add the mgi genotype as a label
# (we generate our own label later and add as a synonym)
geno = Genotype(graph)
for gt in geno_hash:
genotype = geno_hash.get(gt)
gvc = sorted(genotype.get('vslcs'))
label = '; '.join(gvc) + ' [' + genotype.get('subtype') + ']'
model.addComment(
gt, self._make_internal_identifier(
'genotype', genotype.get('key')
)
)
geno.addGenotype(gt, label.strip())
def _process_all_summary_view(self, limit):
"""
Here, we get the allele definitions: id, label, description, type
We also add the id to this source's global idhash for lookup later
<alleleid> a OWL:NamedIndividual
rdfs:label "allele symbol"
dc:description "long allele name"
:param limit:
:return:
"""
src_key = 'all_summary_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
col_len = len(col)
LOG.info(
"alleles with labels and descriptions from all_summary_view")
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
# head -1 workspace/build-mgi-ttl/dipper/raw/mgi/all_summary_view|\
# tr '\t' '\n' | grep -n . | \
# awk -F':' '{col=$1;$1="";print $0,",\t #" col}'
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
# bail if the row is malformed
if col_len != len(row):
LOG.warning('Expected %i columns.', col_len)
LOG.warning('Received %i columns.', len(row))
LOG.warning(line.format())
continue
# no stray tab in the description column
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
mgiid = row[col.index('mgiid')].strip()
description = row[col.index('description')].strip()
short_description = row[col.index('short_description')].strip()
# NOTE: May want to filter alleles based on the preferred field
# (preferred = 1) or will get duplicates
# (24288, to be exact...
# Reduced to 480 if filtered on preferred = 1)
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('allele'):
continue
# we are setting the allele type to None,
# so that we can add the type later
# since we don't actually know
# if it's a reference or altered allele
# altype = None # temporary; we'll assign the type later
# set type to a parent term incase a more specific term is not found
altype = self.globaltt['allele']
# If we want to filter on preferred:
if preferred == '1':
# add the allele key to the hash for later lookup
self.idhash['allele'][object_key] = mgiid
# TODO consider not adding the individuals in this one
model.addIndividualToGraph(
mgiid, short_description.strip(), altype, description.strip()
)
self.label_hash[mgiid] = short_description.strip()
# TODO deal with non-preferreds, are these deprecated?
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_all_allele_view(self, limit):
"""
Add the allele as a variant locus (or reference locus if wild-type).
If the marker is specified, we add the link to the marker.
We assume that the MGI ids are available in the idhash,
added in all_summary_view.
We add the sequence alteration as a BNode here, if there is a marker.
Otherwise, the allele itself is a sequence alteration.
Triples:
<MGI:allele_id> a GENO:variant_locus
OR GENO:reference_locus
OR GENO:sequence_alteration IF no marker_id specified.
[GENO:has_variant_part OR GENO:has_reference_part] <MGI:marker_id>
GENO:derived_from <MGI:strain_id>
GENO:has_variant_part <_seq_alt_id>
<_seq_alt_id> a GENO:sequence_alteration
derives_from <strain_id>
:param limit:
:return:
"""
src_key = 'all_allele_view'
# transmission_key -> inheritance? Need to locate related table.
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
line_num = 0
LOG.info(
"adding alleles, mapping to markers, extracting their "
"sequence alterations from all_allele_view")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
col_len = len(col)
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
# bail if the row is malformed
if col_len != len(row):
LOG.warning('Expected %i columns.', col_len)
LOG.warning('Received %i columns.', len(row))
LOG.warning(line.format())
continue
allele_key = row[col.index('_allele_key')].strip()
marker_key = row[col.index('_marker_key')].strip()
strain_key = row[col.index('_strain_key')].strip()
symbol = row[col.index('symbol')].strip()
name = row[col.index('name')].strip()
iswildtype = row[col.index('iswildtype')].strip()
# TODO update processing to use this view better
# including jnums!
if self.test_mode is True and \
int(allele_key) not in self.test_keys.get('allele'):
continue
# so are allele_key ints or not? -TEC
allele_id = self.idhash['allele'].get(allele_key)
if allele_id is None:
LOG.error(
"what to do! can't find allele_id. skipping %s %s",
allele_key, symbol)
continue
marker_id = None
if marker_key is not None and marker_key != '':
# we make the assumption here that the markers
# have already been added to the table
marker_id = self.idhash['marker'].get(marker_key)
if marker_id is None:
LOG.error(
"what to do! can't find marker_id. skipping %s %s",
marker_key, symbol)
continue
iseqalt_id = self._make_internal_identifier('seqalt', allele_key)
# for non-wild type alleles:
if iswildtype == '0':
locus_type = self.globaltt['variant_locus']
locus_rel = self.globaltt['is_allele_of']
# for wild type alleles:
elif iswildtype == '1':
locus_type = self.globaltt['reference_locus']
locus_rel = self.globaltt['is_reference_allele_of']
# add the allele to the wildtype set for lookup later
self.wildtype_alleles.add(allele_id)
else:
locus_rel = None
locus_type = None
model.addIndividualToGraph(allele_id, symbol, locus_type)
model.makeLeader(allele_id)
self.label_hash[allele_id] = symbol
self.idhash['seqalt'][allele_key] = iseqalt_id
# HACK - if the label of the allele == marker,
# then make the thing a seq alt
allele_label = self.label_hash.get(allele_id)
marker_label = self.label_hash.get(marker_id)
if allele_label is not None and allele_label == marker_label:
# model.addSameIndividual(allele_id, marker_id)
# this causes disjoint category violations, see
# https://github.com/monarch-initiative/dipper/issues/519
self.idhash['seqalt'][allele_key] = allele_id
model.addComment(
allele_id,
self._make_internal_identifier('allele', allele_key)
)
if marker_id is not None:
# marker_id will be none if the allele
# is not linked to a marker
# (as in, it's not mapped to a locus)
geno.addAlleleOfGene(allele_id, marker_id, locus_rel)
# sequence alteration in strain
if iswildtype == '0':
sa_label = symbol
sa_id = iseqalt_id
if marker_key is not None \
and allele_label != marker_label and marker_key != '':
# sequence alteration has label reformatted(symbol)
if re.match(r".*<.*>.*", symbol):
sa_label = re.sub(r".*<", "<", symbol)
elif re.match(r"\+", symbol):
# TODO: Check to see if this is the proper handling
# as while symbol is just +,
# marker symbol has entries without any <+>.
sa_label = '<+>'
geno.addSequenceAlterationToVariantLocus(iseqalt_id,
allele_id)
else:
# make the sequence alteration == allele
sa_id = allele_id
# else this will end up adding the non-located transgenes
# as sequence alterations also removing the < and > from sa
sa_label = re.sub(r'[\<\>]', '', sa_label)
geno.addSequenceAlteration(sa_id, sa_label, None, name)
self.label_hash[sa_id] = sa_label
strain_id = self.idhash['strain'].get(strain_key)
# scrub out if the strain is "not specified"
if strain_id is not None and \
strain_id not in ['MGI:4867032', 'MGI:5649511']:
geno.addSequenceDerivesFrom(allele_id, strain_id)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_gxd_allele_pair_view(self, limit):
"""
This assumes that the genotype and alleles
have already been added to the id hashmap.
We use the Genotype methods to add all the parts we need.
Triples added:
<genotype_id> has_part <vslc>
<vslc> has_part <allele1>
<vslc> has_part <allele2>
<vslc> has_zygosity <zygosity>
:param limit:
:return:
"""
src_key = 'gxd_allelepair_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
LOG.info("processing allele pairs (VSLCs) for genotypes")
geno_hash = {}
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
allelepair_key = row[col.index('_allelepair_key')].strip()
genotype_key = row[col.index('_genotype_key')].strip()
allele_key_1 = row[col.index('_allele_key_1')].strip()
allele_key_2 = row[col.index('_allele_key_2')].strip()
allele1 = row[col.index('allele1')].strip()
allele2 = row[col.index('allele2')].strip()
allelestate = row[col.index('allelestate')].strip()
# NOTE: symbol = gene/marker,
# allele1 + allele2 = VSLC,
# allele1/allele2 = variant locus,
# allelestate = zygosity
# FIXME Need to handle alleles not in the *<*> format,
# incl gene traps, induced mut, & transgenics
if self.test_mode is True:
if int(genotype_key) not in self.test_keys.get('genotype'):
continue
genotype_id = self.idhash['genotype'].get(genotype_key)
if genotype_id not in geno_hash:
geno_hash[genotype_id] = set()
if genotype_id is None:
LOG.error(
"genotype_id not found for key %s; skipping", genotype_key)
continue
allele1_id = self.idhash['allele'].get(allele_key_1)
allele2_id = self.idhash['allele'].get(allele_key_2)
# Need to map the allelestate to a zygosity term
zygosity_id = self.resolve(allelestate.strip())
ivslc_id = self._make_internal_identifier('vslc', allelepair_key)
geno_hash[genotype_id].add(ivslc_id)
# TODO: VSLC label likely needs processing similar to
# the processing in the all_allele_view
# FIXME: handle null alleles
vslc_label = allele1 + '/'
if allele2_id is None:
if zygosity_id in [
self.globaltt['hemizygous insertion-linked'],
self.globaltt['hemizygous-x'],
self.globaltt['hemizygous-y'],
self.globaltt['hemizygous'],
]:
vslc_label += '0'
elif zygosity_id == self.globaltt['heterozygous']:
vslc_label += '+'
elif zygosity_id == self.globaltt['indeterminate']:
vslc_label += '?'
elif zygosity_id == self.globaltt['heteroplasmic']:
vslc_label += '?' # todo is there anything else to add here?
elif zygosity_id == self.globaltt['homoplasmic']:
vslc_label += '?' # todo is there anything else to add here?
elif zygosity_id == self.globaltt['homozygous']:
# we shouldn't get here, but for testing this is handy
vslc_label += allele1
else:
LOG.info(
"A different kind of zygosity found is: %s",
self.globaltcid[zygosity_id])
vslc_label += '?'
else:
vslc_label += allele2
model.addIndividualToGraph(
ivslc_id,
vslc_label,
self.globaltt['variant single locus complement']
)
self.label_hash[ivslc_id] = vslc_label
rel1 = rel2 = self.globaltt['has_variant_part']
if allele1_id in self.wildtype_alleles:
rel1 = self.globaltt['has_reference_part']
if allele2_id in self.wildtype_alleles:
rel2 = self.globaltt['has_reference_part']
geno.addPartsToVSLC(
ivslc_id, allele1_id, allele2_id, zygosity_id, rel1, rel2
)
# if genotype_id not in geno_hash:
# geno_hash[genotype_id] = [vslc_label]
# else:
# geno_hash[genotype_id] += [vslc_label]
if not self.test_mode and limit is not None and line_num > limit:
break
# build the gvc and the genotype label
for gt in geno_hash.keys():
if gt is None: # not sure why, but sometimes this is the case
continue
vslcs = sorted(list(geno_hash[gt]))
gvc_label = None
if len(vslcs) > 1:
gvc_id = re.sub(r'_', '', ('-'.join(vslcs)))
gvc_id = re.sub(r':', '', gvc_id)
gvc_id = self.make_id(gvc_id, '_')
vslc_labels = []
for v in vslcs:
vslc_labels.append(self.label_hash[v])
gvc_label = '; '.join(vslc_labels)
model.addIndividualToGraph(
gvc_id, gvc_label, self.globaltt['genomic_variation_complement'])
self.label_hash[gvc_id] = gvc_label
for v in vslcs:
geno.addParts(v, gvc_id, self.globaltt['has_variant_part'])
geno.addVSLCtoParent(v, gvc_id)
geno.addParts(gvc_id, gt, self.globaltt['has_variant_part'])
elif len(vslcs) == 1:
gvc_id = vslcs[0]
gvc_label = self.label_hash[gvc_id]
# type the VSLC as also a GVC
model.addIndividualToGraph(
gvc_id, gvc_label, self.globaltt['genomic_variation_complement']
)
geno.addVSLCtoParent(gvc_id, gt)
else:
LOG.info("No VSLCs for %s", gt)
# make the genotype label = gvc + background
bkgd_id = self.geno_bkgd.get(gt)
if bkgd_id is not None:
bkgd_label = self.label_hash.get(bkgd_id)
if bkgd_label is None:
bkgd_label = bkgd_id # just in case
else:
bkgd_label = 'unspecified background'
if gvc_label is not None:
genotype_label = gvc_label + ' [' + bkgd_label + ']'
else:
genotype_label = '[' + bkgd_label + ']'
self.label_hash[gt] = genotype_label
def _process_all_allele_mutation_view(self, limit):
"""
This fetches the mutation type for the alleles,
and maps them to the sequence alteration.
Note that we create a BNode for the sequence alteration because
it isn't publicly identified.
<sequence alteration id> a <SO:mutation_type>
:param limit:
:return:
"""
src_key = 'all_allele_mutation_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
LOG.info("getting mutation types for sequence alterations")
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
allele_key = row[col.index('_allele_key')].strip()
mutation = row[col.index('mutation')].strip()
iseqalt_id = self.idhash['seqalt'].get(allele_key)
if iseqalt_id is None:
continue
# nothing will ever connect w/these 350k bnode "individuals"
# iseqalt_id = self._make_internal_identifier('seqalt', allele_key)
if self.test_mode and int(allele_key) \
not in self.test_keys.get('allele'):
continue
# TODO we might need to map the seq alteration to the MGI id
# for unlocated things; need to use hashmap
# map the sequence_alteration_type
seq_alt_type_id = self.resolve(mutation, mandatory=False)
if seq_alt_type_id == mutation:
LOG.error("No mappjng found for seq alt '%s'", mutation)
LOG.info("Defaulting to 'sequence_alteration'")
seq_alt_type_id = self.globaltt['sequence_alteration']
# HACK - if the seq alteration is a transgene,
# then make sure it is a transgenic insertion
allele_id = self.idhash['allele'].get(allele_key)
if allele_id is not None:
allele_label = self.label_hash.get(allele_id)
if allele_label is not None and re.search(r'Tg\(', allele_label):
LOG.info(
"Found a transgenic insertion for %s", allele_label)
# transgenic_insertion, instead of plain old insertion
seq_alt_type_id = self.globaltt["transgenic_insertion"]
model.addIndividualToGraph(iseqalt_id, None, seq_alt_type_id)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_voc_annot_view(self, limit):
"""
This MGI table represents associations between things.
We add the internal annotation id to the idhashmap.
It is expected that the genotypes have already been added to the idhash
:param limit:
:return:
"""
# TODO also get Strain/Attributes (annottypekey = 1000)
# TODO what is Phenotype (Derived) vs
# non-derived? (annottypekey = 1015)
# TODO is evidence in this table? what is the evidence vocab key?
src_key = 'voc_annot_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
LOG.info("getting G2P associations")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
row = reader.readline().rstrip('\n').split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
row = line.rstrip('\n').split('\t')
annot_key = row[col.index('_annot_key')]
annot_type = row[col.index('annottype')]
object_key = row[col.index('_object_key')]
term_key = row[col.index('_term_key')]
qualifier_key = row[col.index('_qualifier_key')]
qualifier = row[col.index('qualifier')]
# term,
accid = row[col.index('accid')]
if self.test_mode is True:
if int(annot_key) not in self.test_keys.get('annot'):
continue
# qualifier of "norm" means the phenotype was measured but
# was normal, since we don't have negation or normal phenotypes
# modelled just yet, skip the row
if qualifier == 'norm':
continue
# iassoc_id = self._make_internal_identifier('annot', annot_key)
# assoc_id = self.make_id(iassoc_id)
assoc_id = None
# Mammalian Phenotype/Genotype are curated G2P assoc
if annot_type == 'Mammalian Phenotype/Genotype':
line_num += 1
# We expect the label for the phenotype
# to be taken care of elsewhere
model.addClassToGraph(accid, None)
genotype_id = self.idhash['genotype'].get(object_key)
if genotype_id is None:
LOG.error(
"can't find genotype id for %s", object_key)
else:
# add the association
assoc = G2PAssoc(graph, self.name, genotype_id, accid)
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
# OMIM/Genotype are disease-models
elif annot_type == 'DO/Genotype':
# skip NOT annotations for now FIXME
if qualifier_key == '1614157':
continue
genotype_id = self.idhash['genotype'].get(object_key)
if genotype_id is None:
LOG.error("can't find genotype id for %s", object_key)
else:
# add the association
assoc = Assoc(graph, self.name)
# TODO PYLINT
# Redefinition of assoc type from
# dipper.models.assoc.G2PAssoc.G2PAssoc to
# dipper.models.assoc.Association.Assoc
assoc.set_subject(genotype_id)
assoc.set_object(accid)
assoc.set_relationship(self.globaltt['is model of'])
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
elif annot_type == 'MCV/Marker':
# marker category == type
marker_id = self.idhash['marker'].get(object_key)
if str(term_key).strip() in self.localtt:
# check "Not Applicable": "reference_locus"
term_id = self.resolve(str(term_key).strip())
else:
term_id = None
logging.warning('No type mapping for: %s', term_key)
# note that the accid here is an internal mouse cv term,
# and we don't use it.
if term_id is not None and marker_id is not None:
# do something special for transgenics -
# make sure these are transgenic insertions
model.addType(marker_id, term_id)
elif annot_type == 'DO/Allele': # allele/Disease
allele_id = self.idhash['allele'].get(object_key)
if allele_id is None:
LOG.error("can't find genotype id for %s", object_key)
else:
# add the association
assoc = Assoc(graph, self.name)
assoc.set_subject(allele_id)
assoc.set_object(accid)
assoc.set_relationship(self.globaltt['is model of'])
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
if assoc_id is not None:
# add the assoc to the hashmap (using the monarch id)
self.idhash['annot'][annot_key] = assoc_id
model.addComment(assoc_id, "annot_key:" + annot_key)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_evidence_view(self, limit):
"""
Here we fetch the evidence (code and publication) for the associations.
The evidence codes are mapped from the standard GO codes to ECO.
J numbers are added for publications.
We will only add the evidence if the annotation is in our idhash.
We also pull in evidence qualifiers, as of June 2018 they are
Data Interpretation Center (eg IMPC)
external ref (eg UniProtKB:Q9JHI2-3 for Proteoform/Marker assoc)
Phenotyping Center (eg WTSI)
Resource Name (eg MGP)
MP-Sex-Specificity (eg NA, M, F)
Triples:
<annot_id> dc:evidence <evidence_id>
<pub_id> a owl:NamedIndividual
<annot_id> dc:source <pub_id>
:param limit:
:return:
"""
src_key = 'evidence_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
LOG.info("getting evidence and pubs for annotations")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
line = reader.readline()
line = line.rstrip("\n")
row = line.split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
row = line.split('\t')
line_num += 1
annot_evidence_key = row[col.index('_annotevidence_key')]
annot_key = row[col.index('_annot_key')]
evidence_code = row[col.index('evidencecode')]
jnumid = row[col.index('jnumid')]
qualifier = row[col.index('term')]
qualifier_value = row[col.index('value')]
# annotation_type = row[col.index('annottype')]
if self.test_mode and annot_key not in self.test_keys.get('annot'):
continue
# add the association id to map to the evidence key
# (to attach the right note to the right assn)
self.idhash['notes'][annot_evidence_key] = annot_key
assoc_id = self.idhash['annot'].get(annot_key)
if assoc_id is None:
# assume that we only want to add the evidence/source
# for annots that we have in our db
continue
evidence_id = self.resolve(evidence_code)
reference = Reference(graph, jnumid)
reference.addRefToGraph()
# add the ECO and citation information to the annot
model.addTriple(assoc_id, self.globaltt['has evidence'], evidence_id)
model.addTriple(assoc_id, self.globaltt['Source'], jnumid)
# For Mammalian Phenotype/Genotype annotation types
# MGI adds sex specificity qualifiers here
if qualifier == 'MP-Sex-Specificity' and qualifier_value in ('M', 'F'):
model._addSexSpecificity(assoc_id, self.resolve(qualifier_value))
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_bib_acc_view(self, limit):
"""
This traverses the table twice:
once to look up the internal key to J number mapping
for the id hashmap then again to make the equivalences.
All internal keys have both a J and MGI identifier.
This will make equivalences between the different pub ids
Triples:
<pub_id> a owl:NamedIndividual
<other_pub_id> a owl:NamedIndividual
<pub_id> owl:sameAs <other_pub_id>
:param limit:
:return:
"""
src_key = 'bib_acc_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
# firstpass, get the J number mapping, and add to the global hash
LOG.info('populating pub id hash')
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(filereader)
if not self.check_fileheader(col, row, src_key):
pass
for row in filereader:
accid = row[col.index('accid')]
prefixpart = row[col.index('prefixpart')]
# 'numericpart'
object_key = int(row[col.index('_object_key')]) # likely unstable
# logicaldb = row[col.index('logicaldb')]
# logicaldb_key = row[col.index('_logicaldb_key')]
if self.test_mode and object_key not in self.test_keys.get('pub'):
continue
# we use the J number here because
# it is the externally-accessible identifier
if prefixpart != 'J:':
continue
self.idhash['publication'][object_key] = accid
reference = Reference(graph, accid)
reference.addRefToGraph()
if not self.test_mode and limit is not None and \
filereader.line_num > limit:
break
# 2nd pass, look up the MGI identifier in the hash
LOG.info("getting pub equivalent ids")
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(filereader) # header already checked
for row in filereader:
accid = row[col.index('accid')]
prefixpart = row[col.index('prefixpart')]
# 'numericpart'
object_key = int(row[col.index('_object_key')])
logicaldb = row[col.index('logicaldb')].strip()
logicaldb_key = row[col.index('_logicaldb_key')]
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('pub'):
continue
jid = self.idhash['publication'].get(object_key)
pub_id = None
if logicaldb_key == '29': # pubmed
pub_id = 'PMID:' + accid
elif logicaldb_key == '1' and prefixpart[:4] == 'MGI:':
# don't get the J numbers,
# because we dont' need to make the equiv to itself.
pub_id = accid
elif logicaldb == 'Journal Link':
# some DOIs seem to have spaces
# FIXME MGI needs to FIX THESE UPSTREAM!!!!
# we'll scrub them here for the time being
accid = re.sub(r'\s+', '', accid)
# some DOIs have un-urlencoded brackets <>
accid = re.sub(r'<', '%3C', accid)
accid = re.sub(r'>', '%3E', accid)
pub_id = 'DOI:' + accid
elif logicaldb_key == '1' and re.match(r'J:', prefixpart):
# we can skip the J numbers
continue
if pub_id is not None:
# only add these to the graph if
# it's mapped to something we understand
reference = Reference(graph, pub_id)
# make the assumption that if it is a PMID, it is a journal
if re.match(r'PMID', pub_id):
reference.setType(self.globaltt['journal article'])
model.makeLeader(pub_id)
reference.addRefToGraph()
model.addSameIndividual(jid, pub_id)
else:
LOG.warning(
"Publication from (%s) not mapped for %s",
logicaldb, object_key)
if not self.test_mode and limit is not None and \
filereader.line_num > limit:
break
def _process_prb_strain_view(self, limit):
"""
Process a table to get strains (with internal ids), and their labels.
These strains are created as instances of the species that they are.
Triples:
<strain id> a GENO:intrinsic_genotype
rdfs:label "strain label"
RO:in_taxon <NCBI taxon id>
:param limit:
:return:
"""
src_key = 'prb_strain_view'
# Only 9 strain types if we want to map them
# recombinant congenci, inbred strain, NA,
# congenic, consomic, coisogenic,
# recombinant inbred, NS, conplastic
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
LOG.info("getting strains and adding their taxa")
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
strain_key = row[col.index('_strain_key')].strip()
strain = row[col.index('strain')].strip()
species = row[col.index('species')].strip()
if self.test_mode is True:
if int(strain_key) not in self.test_keys.get('strain'):
continue
strain_id = self.idhash['strain'].get(strain_key)
if strain_id is not None:
self.label_hash[strain_id] = strain
# add the species to the graph as a class
species = species.strip()
sp = self.resolve(species, False)
if sp == species:
LOG.error("No taxon mapping for " + species)
# they may tag a geo name on house mouse
if species[:17] == 'M. m. domesticus ':
LOG.warning("defaulting to Mus musculus")
sp = self.globaltt['Mus musculus']
else:
LOG.warning("defaulting to genus 'Mus'")
sp = self.globaltt['Mus']
elif species in MGI.unknown_taxa:
LOG.warning("defaulting to genus 'Mus'")
sp = self.globaltt['Mus']
model.addClassToGraph(sp, None)
geno.addTaxon(sp, strain_id)
model.addIndividualToGraph(strain_id, strain, sp)
if not self.test_mode and limit is not None and reader.line_num > limit:
break
def _process_mrk_marker_view(self, limit):
"""
This is the definition of markers
(as in genes, but other genomic loci types as well).
It looks up the identifiers in the hashmap
This includes their labels, specific class, and identifiers
TODO should we use the mrk_mouse_view instead?
Triples:
<marker_id> a owl:Class OR owl:NamedIndividual
GENO:marker_type
rdfs:label <symbol>
RO:in_taxon <NCBITaxon_id>
:param limit:
:return:
"""
src_key = 'mrk_marker_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
LOG.info("getting markers and assigning types")
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
(marker_key,
organism_key,
marker_status_key,
symbol,
name,
latin_name,
marker_type) = line.split('\t')
if self.test_mode is True:
if int(marker_key) not in self.test_keys.get('marker'):
continue
# use only non-withdrawn markers
if marker_status_key != '2':
marker_id = self.idhash['marker'].get(marker_key)
# only pull info for mouse genes for now
# other species should come from other dbs
if organism_key != '1':
continue
if marker_id is None:
LOG.error(
"can't find %s %s in the id hash", marker_key, symbol)
# check "Not Applicable" -> "reference_locus"
mapped_marker_type = self.resolve(marker_type.strip())
# if it's unlocated, or is not a gene,
# then don't add it as a class because
# it's not added as a gene.
# everything except for genes are modeled as individuals
if mapped_marker_type in [
self.globaltt['gene'],
self.globaltt['pseudogene']]:
model.addClassToGraph(
marker_id, symbol, mapped_marker_type, name
)
model.addSynonym(
marker_id, name, self.globaltt['has_exact_synonym']
)
self.markers['classes'].append(marker_id)
else:
model.addIndividualToGraph(
marker_id, symbol, mapped_marker_type, name
)
model.addSynonym(
marker_id, name, self.globaltt['has_exact_synonym']
)
self.markers['indiv'].append(marker_id)
self.label_hash[marker_id] = symbol
# add the taxon (default to Mus m.)
# latin_name is not always a proper binomial
if latin_name in MGI.unknown_taxa: # localtt conflict
latin_name = 'Mus'
taxon_id = self.resolve(
latin_name, default=self.globaltt['Mus musculus'])
geno.addTaxon(taxon_id, marker_id)
# make MGI the leader for mouse genes.
if taxon_id == self.globaltt['Mus musculus']:
model.makeLeader(marker_id)
if not self.test_mode and limit is not None \
and line_num > limit:
break
def _process_mrk_summary_view(self, limit):
"""
Here we pull the mgiid of the features, and make equivalent (or sameAs)
associations to referenced ids.
Only adding the ENSEMBL genes and NCBI gene ids.
Will wait on other ids later.
:param limit:
:return:
"""
src_key = 'mrk_summary_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("getting markers and equivalent ids from mrk_summary_view")
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')].strip()
logicaldb_key = row[col.index('_logicaldb_key')].strip()
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
mgiid = row[col.index('mgiid')].strip()
subtype = row[col.index('subtype')].strip()
short_description = row[col.index('short_description')].strip()
if self.test_mode is True and \
int(object_key) not in self.test_keys.get('marker'):
continue
if preferred == '1':
if self.idhash['marker'].get(object_key) is None:
# can't find the marker in the hash; add it here:
self.idhash['marker'][object_key] = mgiid
LOG.error(
"this marker hasn't been seen before %s %s",
mgiid, short_description)
if accid == mgiid:
# don't need to make equivalences to itself
continue
mapped_id = None
if logicaldb_key == '60':
mapped_id = 'ENSEMBL:' + accid
elif logicaldb_key == '1':
# don't need to add the equivalence to itself.
continue
elif logicaldb_key == '55':
mapped_id = 'NCBIGene:' + accid
if mapped_id is not None:
if mgiid in self.markers['classes'] \
or subtype in ['Gene', 'Pseudogene']:
model.addClassToGraph(mapped_id, None)
model.addEquivalentClass(mgiid, mapped_id)
elif mgiid in self.markers['indiv']:
model.addIndividualToGraph(mapped_id, None)
model.addSameIndividual(mgiid, mapped_id)
# could parse the "subtype" string
# to get the kind of thing the marker is
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_mrk_acc_view(self):
"""
Use this table to create the idmap between the internal marker id and
the public mgiid.
No triples are produced in this process
a second pass through the same file is made
:return:
"""
src_key = 'mrk_acc_view'
# make a pass through the table first,
# to create the mapping between the external and internal identifiers
line_num = 0
LOG.info("mapping markers to internal identifiers")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip('\n')
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')]
prefix_part = row[col.index('prefixpart')]
logicaldb_key = row[col.index('_logicaldb_key')]
object_key = row[col.index('_object_key')]
preferred = row[col.index('preferred')]
# = row[col.index('_organism_key')]
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('marker'):
continue
# get the hashmap of the identifiers
if logicaldb_key == '1' and prefix_part == 'MGI:' and preferred == '1':
self.idhash['marker'][object_key] = accid
def _process_mrk_acc_view_for_equiv(self, limit):
"""
Add the equivalences, either sameAs or equivalentClass,
depending on the nature of the marker.
We only process the ENSEMBL genes and NCBI gene ids.
:param limit:
:return:
"""
src_key = 'mrk_acc_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
# pass through the file again,
# and make the equivalence statements to a subset of the idspaces.
# TODO verify the difference between what the
# mrk_acc_view vs mrk_summary_view buys us here.
# if nothing, then we should remove one or the other.
LOG.info("mapping marker equivalent identifiers in mrk_acc_view")
line_num = 0
col = self.tables[src_key]['columns']
with open('/'.join((self.rawdir, src_key)), 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')]
prefix_part = row[col.index('prefixpart')]
logicaldb_key = row[col.index('_logicaldb_key')]
object_key = row[col.index('_object_key')]
preferred = row[col.index('preferred')]
organism_key = row[col.index('_organism_key')]
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('marker'):
continue
# right now not caring about other organisms
if organism_key != 1:
continue
mgiid = self.idhash['marker'].get(object_key)
if mgiid is None:
# presumably we've already added the relevant MGI ids,
# so skip those that we can't find
LOG.debug("can't find mgiid for %s", object_key)
continue
marker_id = None
if preferred == '1': # TODO what does it mean if it's 0?
if logicaldb_key == '55': # entrez/ncbi
marker_id = 'NCBIGene:' + accid
elif logicaldb_key == '1' and prefix_part != 'MGI:':
marker_id = accid
elif logicaldb_key == '60':
marker_id = 'ENSEMBL:' + accid
# TODO get non-preferred ids==deprecated?
if marker_id is not None:
if mgiid in self.markers['classes']:
model.addClassToGraph(marker_id, None)
model.addEquivalentClass(mgiid, marker_id)
elif mgiid in self.markers['indiv']:
model.addIndividualToGraph(marker_id, None)
model.addSameIndividual(mgiid, marker_id)
else:
LOG.error("mgiid not in class or indiv hash %s", mgiid)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_prb_strain_acc_view(self, limit):
"""
Use this table to create the idmap between
the internal marker id and the public mgiid.
Also, add the equivalence statements between strains for MGI and JAX
Triples:
<strain_id> a GENO:intrinsic genotype
<other_strain_id> a GENO:intrinsic_genotype
<strain_id> owl:sameAs <other_strain_id>
:param limit:
:return:
"""
src_key = 'prb_strain_acc_view'
# make a pass through the table first,
# to create the mapping between the external and internal identifiers
line_num = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("mapping strains to internal identifiers")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
tax_id = self.globaltt["Mus musculus"]
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')].strip()
prefixpart = row[col.index('prefixpart')].strip()
logicaldb_key = row[col.index('_logicaldb_key')].strip()
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
# scrub out the backticks from accids
# TODO notify the source upstream
accid = re.sub(r'`', '', accid)
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('strain'):
continue
# get the hashmap of the identifiers
if logicaldb_key == '1' and prefixpart == 'MGI:' and preferred == '1':
self.idhash['strain'][object_key] = accid
model.addIndividualToGraph(
accid, self.globaltt['intrinsic genotype'], tax_id)
# The following are the stock centers for the strains
# (asterisk indicates complete)
# *1 MGI Mouse Genome Informatics
# *22 JAX Registry (null)
# *37 EMMA European Mutant Mouse Archive
# *38 MMRRC Mutant Mouse Regional Resource Center
# 39 Harwell Mammalian Genome Unit Stock List
# *40 ORNL Oak Ridge National Lab mutant resource
# *54 NCIMR NCI Mouse Repository
# *56 NMICE Neuromice.org, a consortium of three NIH-sponsored
# mutagenesis projects designed to search for
# neurological mutations
# 57 CARD Center for Animal Resources and Development @ Kumamoto U
# *70 RIKEN BRC RIKEN BioResource Center
# *71 CMMR Canadian Mouse Mutant Resource
# 84 JPGA The Center for New Mouse Models of
# Heart, Lung, BLood and Sleep Disorders,
# JAX-PGA at The Jackson Laboratory
# *87 MUGEN Network of Excellence in Integrated Functional Genomics
# in Mutant Mouse Models as Tools to Investigate the
# Complexity of Human Immunological Disease
# *90 APB Australian Phenomics Bank
# ? 91 EMS Elizabeth M. Simpson
# ? 93 NIG National Institute of Genetics,
# Mammalian Genetics Laboratory, Japan
# 94 TAC Taconic
# 154 OBS Oriental BioService , Inc.
# 161 RMRC-NLAC National Applied Research Laboratories,Taiwan, R.O.C.
# pass through the file again,
# and make the equivalence statements to a subset of the idspaces
LOG.info("mapping strain equivalent identifiers")
line_num = 0
with open(raw, 'r') as reader:
reader.readline() # read the header row; skip
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')].strip()
prefixpart = row[col.index('prefixpart')].strip()
logicaldb_key = row[col.index('_logicaldb_key')].strip()
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
# scrub out the backticks from accids
# TODO notify the source upstream
accid = re.sub(r'`', '', accid)
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('strain'):
continue
mgiid = self.idhash['strain'].get(object_key)
if mgiid is None:
# presumably we've already added the relevant MGI ids,
# so skip those that we can't find
# LOG.info("can't find mgiid for %s",object_key)
continue
strain_id = None
deprecated = False
comment = None
if preferred == '1': # what does it mean if it's 0?
if logicaldb_key == '22': # JAX
# scrub out the backticks from accids
# TODO notify the source upstream
accid = re.sub(r'`', '', accid).strip()
strain_id = 'JAX:' + accid
elif logicaldb_key == '38': # MMRRC
strain_id = accid
if not re.match(r'MMRRC:', strain_id):
strain_id = 'MMRRC:' + strain_id
elif logicaldb_key == '37': # EMMA
# replace EM: prefix with EMMA:, or for accid's
# with bare digits (e.g. 06335) prepend 'EMMA:'
strain_id = re.sub(r'^(EM:)*', 'EMMA:', accid)
elif logicaldb_key == '90': # APB
strain_id = 'APB:' + accid # Check
elif logicaldb_key == '40': # ORNL
# ORNL is not in existence any more.
# these are deprecated, and we will prefix with JAX
strain_id = 'JAX:' + accid
comment = "Originally from ORNL."
deprecated = True
# add these as synonyms of the MGI mouse
model.addSynonym(mgiid, accid)
elif logicaldb_key == '54': # NCIMR
strain_id = 'NCIMR:' + accid
# CMMR not great - doesn't resolve well
# elif logicaldb_key == '71':
# strain_id = 'CMMR:'+accid
elif logicaldb_key == '56': # neuromice
# neuromice.org doesn't exist any more.
# but all these are actually MGI ids
strain_id = accid
elif logicaldb_key == '70': # RIKEN
# like
# http://www2.brc.riken.jp/lab/animal/detail.php?brc_no=RBRC00160
strain_id = 'RBRC:RBRC' + accid
elif logicaldb_key == '87':
strain_id = 'MUGEN:' + accid
# I can't figure out how to get to some of the strains
# TODO get non-preferred ids==deprecated?
# TODO make these strains, rather than instance of taxon?
if strain_id is not None:
model.addIndividualToGraph(strain_id, None, tax_id)
if deprecated:
model.addDeprecatedIndividual(strain_id, [mgiid])
model.addSynonym(mgiid, accid)
else:
model.addSameIndividual(mgiid, strain_id)
if re.match(r'MMRRC', strain_id):
model.makeLeader(strain_id)
if comment is not None:
model.addComment(strain_id, comment)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_mgi_note_vocevidence_view(self, limit):
"""
Here we fetch the free text descriptions of the phenotype associations.
Triples:
<annot_id> dc:description "description text"
:param limit:
:return:
"""
src_key = 'mgi_note_vocevidence_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("getting free text descriptions for annotations")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
object_key = row[col.index('_object_key')].strip()
note = row[col.index('note')].strip()
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('notes'):
continue
# object_key == evidence._annotevidence_key
annotkey = self.idhash['notes'].get(object_key)
annot_id = self.idhash['annot'].get(annotkey)
# only add the description for the annotations
# we have captured through processing
if annot_id is not None:
model.addDescription(annot_id, note.strip())
if not self.test_mode and limit is not None and reader.line_num > limit:
break
def _process_mrk_location_cache(self, limit):
src_key = 'mrk_location_cache'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("getting marker locations")
raw = '/'.join((self.rawdir, src_key))
geno = Genotype(graph)
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
marker_key = row[col.index('_marker_key')].strip()
organism_key = row[col.index('_organism_key')].strip()
chromosome = row[col.index('chromosome')].strip()
startcoordinate = row[col.index('startcoordinate')].strip()
endcoordinate = row[col.index('endcoordinate')].strip()
strand = row[col.index('strand')].strip()
version = row[col.index('version')].strip()
# only get the location information for mouse
if str(organism_key) != '1' or str(chromosome) == 'UN':
continue
if self.test_mode is True:
if int(marker_key) not in self.test_keys.get('marker'):
continue
# make the chromosome, and the build-instance
chrom_id = makeChromID(chromosome, 'NCBITaxon:10090', 'CHR')
if version is not None and version != '' and version != '(null)':
# switch on maptype or mapkey
assembly = version
build_id = 'NCBIGenome:' + assembly
geno.addChromosomeInstance(
chromosome, build_id, assembly, chrom_id)
chrom_id = makeChromID(chromosome, build_id, 'MONARCH')
if marker_key in self.idhash['marker']:
gene_id = self.idhash['marker'][marker_key]
feature = Feature(graph, gene_id, None, None)
if strand == '(null)' or strand == '':
strand = None
if startcoordinate == '(null)' or startcoordinate == '':
startcoordinate = None
if endcoordinate == '(null)' or endcoordinate == '':
endcoordinate = None
if startcoordinate is not None:
feature.addFeatureStartLocation(
int(float(startcoordinate)), chrom_id, strand)
else:
feature.addFeatureStartLocation(
startcoordinate, chrom_id, strand,
[self.globaltt['FuzzyPosition']])
if endcoordinate is not None:
feature.addFeatureEndLocation(
int(float(endcoordinate)), chrom_id, strand)
# note we don't add the uncertain end coordinate,
# because we don't know what it is.
add_as_class = False
if gene_id in self.markers['classes']:
add_as_class = True
feature.addFeatureToGraph(True, None, add_as_class)
else:
LOG.warning('marker key %s not in idhash', str(marker_key))
if not self.test_mode and limit is not None and reader.line_num > limit:
break
def process_mgi_relationship_transgene_genes(self, limit=None):
"""
Here, we have the relationship between MGI transgene alleles,
and the non-mouse gene ids that are part of them.
We augment the allele with the transgene parts.
:param limit:
:return:
"""
src_key = 'mgi_relationship_transgene_genes'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("getting transgene genes")
raw = '/'.join((self.rawdir, src_key))
geno = Genotype(graph)
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
# rel_key = row[col.index('rel_key')].strip()
allele_key = int(row[col.index('object_1')])
allele_id = row[col.index('allele_id')]
# allele_label = row[col.index('allele_label')].strip()
# category_key = row[col.index('category_key')].strip()
# category_name = row[col.index('category_name')].strip()
# property_key = row[col.index('property_key')].strip()
# property_name = row[col.index('property_name')].strip()
gene_num = int(row[col.index('property_value')])
if self.test_mode and allele_key not in self.test_keys.get('allele') \
and gene_num not in self.test_ids:
continue
gene_id = 'NCBIGene:' + str(gene_num)
# geno.addParts(gene_id, allele_id, self.globaltt['has_variant_part'])
seqalt_id = self.idhash['seqalt'].get(allele_key)
if seqalt_id is None:
seqalt_id = allele_id
geno.addSequenceDerivesFrom(seqalt_id, gene_id)
if not self.test_mode and limit is not None and \
reader.line_num > limit:
break
def process_mgi_note_allele_view(self, limit=None):
"""
These are the descriptive notes about the alleles.
Note that these notes have embedded HTML -
should we do anything about that?
:param limit:
:return:
"""
src_key = 'mgi_note_allele_view'
line_num = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Assembling notes on alleles")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
notehash = {}
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
object_key = row[col.index('_object_key')].strip()
notetype = row[col.index('notetype')].strip()
note = row[col.index('note')].strip()
sequencenum = row[col.index('sequencenum')].strip()
# read all the notes into a hash to concatenate
if object_key not in notehash:
notehash[object_key] = {}
if notetype not in notehash[object_key]:
notehash[object_key][notetype] = []
if len(notehash[object_key][notetype]) < int(sequencenum):
for i in range(
len(notehash[object_key][notetype]),
int(sequencenum)
):
notehash[object_key][notetype].append('')
notehash[object_key][notetype][int(sequencenum) - 1] = note.strip()
# finish iteration over notes
line_num = 0
for allele_key in notehash:
line_num += 1
if self.test_mode is True:
if int(allele_key) not in self.test_keys.get('allele'):
continue
allele_id = self.idhash['allele'].get(allele_key)
if allele_id is None:
continue
for n in notehash[allele_key]:
# pretty chatty for expected behavior
# LOG.info(
# "found %d %s notes for %s",
# len(notehash[allele_key]), n, allele_id)
notes = ''.join(notehash[allele_key][n])
notes += ' [' + n + ']'
model.addDescription(allele_id, notes)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_prb_strain_genotype_view(self, limit=None):
"""
Here we fetch the free text descriptions of the phenotype associations.
Triples:
<annot_id> dc:description "description text"
:param limit:
:return:
"""
src_key = 'prb_strain_genotype_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("Getting genotypes for strains")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
strain_key = row[col.index('_strain_key')].strip()
genotype_key = row[col.index('_genotype_key')].strip()
if self.test_mode is True and \
int(genotype_key) not in self.test_keys.get('genotype') \
and int(strain_key) not in self.test_keys.get('strain'):
continue
strain_id = self.idhash['strain'].get(strain_key)
if strain_id is None:
strain_id = self._make_internal_identifier(
'strain', strain_key)
genotype_id = self.idhash['genotype'].get(genotype_key)
if genotype_id is None:
genotype_id = self._make_internal_identifier(
'genotype', genotype_key)
if strain_id is not None and genotype_id is not None:
self.strain_to_genotype_map[strain_id] = genotype_id
graph.addTriple(strain_id, self.globaltt['has_genotype'], genotype_id)
# TODO
# verify if this should be contingent on the exactness or not
# if qualifier == 'Exact':
# gu.addTriple(
# graph, strain_id,
# self.globaltt['has_genotype'],
# genotype_id)
# else:
# gu.addXref(graph, strain_id, genotype_id)
if not self.test_mode and limit is not None and reader.line_num > limit:
break
def _make_internal_identifier(self, prefix, key):
"""
This is a special MGI-to-MONARCH-ism.
MGI tables have unique keys that we use here, but don't want to
necessarily re-distribute those internal identifiers.
Therefore, we make them into keys in a consistent way here.
:param prefix: the object type to prefix the key with,
since the numbers themselves are not unique across tables
:param key: the number
:return:
"""
# these are just more blank node identifiers
iid = self.make_id('mgi' + prefix + 'key' + key, '_')
return iid
# def _querysparql(self):
#
# #load the graph
# vg = Graph()
# vg.parse(self.outfile, format="turtle")
#
# qres = g.query(
# """SELECT DISTINCT ?aname ?bname
# WHERE {
# ?a foaf:knows ?b .
# ?a foaf:name ?aname .
# ?b foaf:name ?bname .
# }""")
#
# for row in qres:
# print("%s knows %s" % row)
|
TomConlin/dipper
|
dipper/sources/MGI.py
|
Python
|
bsd-3-clause
| 99,120
|
from pygraz_website import filters
class TestFilters(object):
def test_url_detection(self):
"""
Test that urls are found correctly.
"""
no_urls_string = '''This is a test without any urls in it.'''
urls_string = '''This string has one link in it: http://pygraz.org . But it also has some text after it :D'''
assert filters.urlize(no_urls_string) == no_urls_string
assert filters.urlize(urls_string) == '''This string has one link in it: <a href="http://pygraz.org">http://pygraz.org</a> . But it also has some text after it :D'''
assert filters.urlize(urls_string, True).matches == {'urls': set(['http://pygraz.org'])}
assert filters.urlize(None) == u''
assert filters.urlize("'http://test.com'") == """'<a href="http://test.com">http://test.com</a>'"""
def test_namehandles(self):
"""
Tests the discory of linkable names.
"""
string_with_handles = 'Hallo @pygraz.'
assert filters.urlize(string_with_handles) == 'Hallo <a href="http://twitter.com/pygraz">@pygraz</a>.'
assert filters.urlize(string_with_handles, True).matches == {'handles': set(['pygraz'])}
def test_hashtags(self):
string_with_tags = 'This is a #test for #hashtags'
assert filters.urlize(string_with_tags) == 'This is a <a href="http://twitter.com/search?q=%23test">#test</a> for <a href="http://twitter.com/search?q=%23hashtags">#hashtags</a>'
assert filters.urlize(string_with_tags, True).matches == {'hashtags': set(['test', 'hashtags'])}
|
pygraz/old-flask-website
|
pygraz_website/tests/test_filters.py
|
Python
|
bsd-3-clause
| 1,577
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Script example of tissue classification
"""
from __future__ import print_function # Python 2/3 compatibility
import numpy as np
from nipy import load_image, save_image
from nipy.core.image.image_spaces import (make_xyz_image,
xyz_affine)
from nipy.externals.argparse import ArgumentParser
from nipy.algorithms.segmentation import BrainT1Segmentation
def fuzzy_dice(gold_ppm, ppm, mask):
"""
Fuzzy dice index.
"""
dices = np.zeros(3)
if gold_ppm == None:
return dices
for k in range(3):
pk = gold_ppm[mask][:, k]
qk = ppm[mask][:, k]
PQ = np.sum(np.sqrt(np.maximum(pk * qk, 0)))
P = np.sum(pk)
Q = np.sum(qk)
dices[k] = 2 * PQ / float(P + Q)
return dices
# Parse command line
description = 'Perform brain tissue classification from skull stripped T1 \
image in CSF, GM and WM. If no mask image is provided, the mask is defined by \
thresholding the input image above zero (strictly).'
parser = ArgumentParser(description=description)
parser.add_argument('img', metavar='img', nargs='+', help='input image')
parser.add_argument('--mask', dest='mask', help='mask image')
parser.add_argument('--niters', dest='niters',
help='number of iterations (default=%d)' % 25)
parser.add_argument('--beta', dest='beta',
help='Markov random field beta parameter (default=%f)' % 0.5)
parser.add_argument('--ngb_size', dest='ngb_size',
help='Markov random field neighborhood system (default=%d)' % 6)
parser.add_argument('--probc', dest='probc', help='csf probability map')
parser.add_argument('--probg', dest='probg',
help='gray matter probability map')
parser.add_argument('--probw', dest='probw',
help='white matter probability map')
args = parser.parse_args()
def get_argument(dest, default):
val = args.__getattribute__(dest)
if val == None:
return default
else:
return val
# Input image
img = load_image(args.img[0])
# Input mask image
mask_img = get_argument('mask', None)
if mask_img == None:
mask_img = img
else:
mask_img = load_image(mask_img)
# Other optional arguments
niters = int(get_argument('niters', 25))
beta = float(get_argument('beta', 0.5))
ngb_size = int(get_argument('ngb_size', 6))
# Perform tissue classification
mask = mask_img.get_data() > 0
S = BrainT1Segmentation(img.get_data(), mask=mask, model='5k',
niters=niters, beta=beta, ngb_size=ngb_size)
# Save label image
outfile = 'hard_classif.nii'
save_image(make_xyz_image(S.label, xyz_affine(img), 'scanner'),
outfile)
print('Label image saved in: %s' % outfile)
# Compute fuzzy Dice indices if a 3-class fuzzy model is provided
if not args.probc == None and \
not args.probg == None and \
not args.probw == None:
print('Computing Dice index')
gold_ppm = np.zeros(S.ppm.shape)
gold_ppm_img = (args.probc, args.probg, args.probw)
for k in range(3):
img = load_image(gold_ppm_img[k])
gold_ppm[..., k] = img.get_data()
d = fuzzy_dice(gold_ppm, S.ppm, np.where(mask_img.get_data() > 0))
print('Fuzzy Dice indices: %s' % d)
|
bthirion/nipy
|
examples/tissue_classification.py
|
Python
|
bsd-3-clause
| 3,308
|
>>> import random
>>> random.sample(range(10), 5)
[7, 6, 3, 5, 1]
>>> all(a < b for a, b in zip(_,_[1:]))
False
>>>
|
gusyussh/learntosolveit
|
languages/python/algorithm_is_sorted.py
|
Python
|
bsd-3-clause
| 116
|
import logging
import sys
import traceback
from collections import namedtuple
import numpy as np
import pandas as pd
from scipy.stats import chisquare
from . import categorizer as cat
from . import draw
from .ipf.ipf import calculate_constraints
from .ipu.ipu import household_weights
logger = logging.getLogger("synthpop")
FitQuality = namedtuple(
'FitQuality',
('people_chisq', 'people_p'))
BlockGroupID = namedtuple(
'BlockGroupID', ('state', 'county', 'tract', 'block_group'))
def enable_logging():
handler = logging.StreamHandler(stream=sys.stdout)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def synthesize(h_marg, p_marg, h_jd, p_jd, h_pums, p_pums,
marginal_zero_sub=.01, jd_zero_sub=.001, hh_index_start=0):
# this is the zero marginal problem
h_marg = h_marg.replace(0, marginal_zero_sub)
p_marg = p_marg.replace(0, marginal_zero_sub)
# zero cell problem
h_jd.frequency = h_jd.frequency.replace(0, jd_zero_sub)
p_jd.frequency = p_jd.frequency.replace(0, jd_zero_sub)
# ipf for households
logger.info("Running ipf for households")
h_constraint, _ = calculate_constraints(h_marg, h_jd.frequency)
h_constraint.index = h_jd.cat_id
logger.debug("Household constraint")
logger.debug(h_constraint)
logger.debug(h_constraint.sum())
# ipf for persons
logger.info("Running ipf for persons")
p_constraint, _ = calculate_constraints(p_marg, p_jd.frequency)
p_constraint.index = p_jd.cat_id
logger.debug("Person constraint")
logger.debug(p_constraint)
logger.debug(p_constraint.sum())
# make frequency tables that the ipu expects
household_freq, person_freq = cat.frequency_tables(p_pums, h_pums,
p_jd.cat_id,
h_jd.cat_id)
# do the ipu to match person marginals
logger.info("Running ipu")
import time
t1 = time.time()
best_weights, fit_quality, iterations = household_weights(household_freq,
person_freq,
h_constraint,
p_constraint)
logger.info("Time to run ipu: %.3fs" % (time.time()-t1))
logger.debug("IPU weights:")
logger.debug(best_weights.describe())
logger.debug(best_weights.sum())
logger.debug("Fit quality:")
logger.debug(fit_quality)
logger.debug("Number of iterations:")
logger.debug(iterations)
num_households = int(h_marg.groupby(level=0).sum().mean())
print "Drawing %d households" % num_households
best_chisq = np.inf
return draw.draw_households(
num_households, h_pums, p_pums, household_freq, h_constraint,
p_constraint, best_weights, hh_index_start=hh_index_start)
def synthesize_all(recipe, num_geogs=None, indexes=None,
marginal_zero_sub=.01, jd_zero_sub=.001):
"""
Parameters
----------
write_households_csv, write_persons_csv : str
Name of households and persons csv file to write.
Pass None to return these rather than write.
Returns
-------
households, people : pandas.DataFrame
Only returns these if `write_households_csv` and `write_persons_csv`
are None.
fit_quality : dict of FitQuality
Keys are geographic IDs, values are namedtuples with attributes
``.household_chisq``, ``household_p``, ``people_chisq``,
and ``people_p``.
"""
print "Synthesizing at geog level: '{}' (number of geographies is {})".\
format(recipe.get_geography_name(), recipe.get_num_geographies())
if indexes is None:
indexes = recipe.get_available_geography_ids()
hh_list = []
people_list = []
cnt = 0
fit_quality = {}
hh_index_start = 0
# TODO will parallelization work here?
for geog_id in indexes:
print "Synthesizing geog id:\n", geog_id
h_marg = recipe.get_household_marginal_for_geography(geog_id)
logger.debug("Household marginal")
logger.debug(h_marg)
p_marg = recipe.get_person_marginal_for_geography(geog_id)
logger.debug("Person marginal")
logger.debug(p_marg)
h_pums, h_jd = recipe.\
get_household_joint_dist_for_geography(geog_id)
logger.debug("Household joint distribution")
logger.debug(h_jd)
p_pums, p_jd = recipe.get_person_joint_dist_for_geography(geog_id)
logger.debug("Person joint distribution")
logger.debug(p_jd)
try:
households, people, people_chisq, people_p = \
synthesize(
h_marg, p_marg, h_jd, p_jd, h_pums, p_pums,
marginal_zero_sub=marginal_zero_sub, jd_zero_sub=jd_zero_sub,
hh_index_start=hh_index_start)
if not recipe.write_households(geog_id, households):
hh_list.append(households)
if not recipe.write_persons(geog_id, people):
people_list.append(people)
key = tuple(geog_id.values)
# key = BlockGroupID(
# geog_id['state'], geog_id['county'], geog_id['tract'],
# geog_id['block group'])
fit_quality[key] = FitQuality(people_chisq, people_p)
cnt += 1
if len(households) > 0:
hh_index_start = households.index.values[-1] + 1
if num_geogs is not None and cnt >= num_geogs:
break
except Exception as e:
print "Exception caught: ", sys.exc_info()[0]
print traceback.format_exc()
# continue
return (pd.concat(hh_list) if len(hh_list) > 0 else None,
pd.concat(people_list, ignore_index=True) if len(people_list) > 0 else None,
fit_quality)
|
sfcta/synthpop
|
synthpop/synthesizer.py
|
Python
|
bsd-3-clause
| 6,015
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
router = DefaultRouter()
router.register(r'minion', views.MinionViewSet, 'minion')
router.register(r'data', views.MinionDataViewSet, 'data')
urlpatterns = [
url(
r'^',
include(router.urls)
),
]
|
Farforr/overlord
|
overlord/minions/api/v1/urls.py
|
Python
|
bsd-3-clause
| 478
|
# -*- coding: utf-8 -*-
import httplib
import pprint
import json
import sys
import logging
import datetime
import os
import os.path
import codecs
class cmdb( object ):
def __init__( self, args , info=None ):
self.res = {}
self.result = {}
self.info = info
self.args = args
self.device_type = '机架服务器'
self.conn = httplib.HTTPConnection( self.args['host'],self.args['port'] , timeout=10 )
def search( self, manifest, total ,start , limit , conditions ):
cond = ''
for x in conditions:
cond += '%20and%20'+x['name']+x['tag']+x['value']
if total :
limit = 1
rr = {}
url = (self.args['baseURL']+'username='+self.args['user']
+ '&auth='+self.args['key']
+ '&num='+str(limit)+'&return_total=1&start='+ str(start)
+ '&q=manifest=='+manifest+cond
)
self.conn.connect()
self.conn.request( 'GET', url ,'',self.args['headers'] )
res = self.conn.getresponse( )
if res.status == 200 :
rs = json.loads( res.read())
try:
if len( rs['result'] ) != 0 :
if total :
rr = rs['total']
else:
rr = rs['result']
else:
self.logger('info' , 'Search: rack server %s is not in cmdb ' % an)
except:
pass
else:
self.logger('info', an + 'bad request' )
self.conn.close()
return rr
def update( self ):
pass
def logger( self, level , loginfo ):
dt = datetime.datetime.now()
ds = dt.strftime('%Y%m%d%H%M%S')
logfile = ds + self.args['logfile']
logging.basicConfig( filename = os.path.join(os.getcwd()+self.args['logPath'],logfile),
level = logging.WARN,
filemode = 'w',
format = '%(asctime)s - %(levelname)s: %(message)s'
)
if level == 'info': logging.info( loginfo )
if level == 'warn' :logging.warning( loginfo )
if level == 'error' :logging.error( loginfo )
def dataFormat( self,data,cmdb_node ):
rr = {}
if cmdb_node != {}:
rr['id'] = cmdb_node['.id']
rr['manifest'] = cmdb_node['.manifest']
rr['value'] = data
else:
rr['id'] = ''
rr['manifest'] = ''
rr['value'] = data
return rr
if __name__ == '__main__':
import conf.cmdb_config as conf
conditions = [
{'name':'rack','tag':'~','value':r'永丰'},
{'name':'state','tag':'~','value':r'在线'}
]
num = 100
cmdb = cmdb( args=conf.CMDBAPI , info=None )
total = cmdb.search( 'rack_server' , True, 0, 1 , conditions )
if total % num == 0 :
times = total / num
else:
times = total / num + 1
print 'servers total is ' + str(total) + ', run '+ str(times) + '.'
wfile = WorkProfile( )
start = 0
for i in range( times ) :
print 'run time ' + str(i+1)
res = cmdb.search( 'rack_server' , False, start, num , conditions )
start = start + num
content = ''
for r in res :
content += r['asset_number'] +"\t"+ r['sn'] +"\t"+ r['rack'].split('.')[0].strip() +"\t"+ r['ips'] + "\n"
wfile.writeFile( None , 'servers.txt', content )
|
hackshel/metaCollecter
|
src/metaManager/modules/cmdbServer.py
|
Python
|
bsd-3-clause
| 3,610
|
# -*- coding: utf-8 -*-
from django.core.files.storage import FileSystemStorage
import os
from datetime import datetime
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.utils.timezone import utc
from autofixture.compat import get_GenericForeignKey
from autofixture.compat import get_GenericRelation
try:
from django.db.models import GenericIPAddressField as IPAddressField
except ImportError:
from django.models import IPAddressField
filepath = os.path.dirname(os.path.abspath(__file__))
def y2k():
return datetime(2000, 1, 1).replace(tzinfo=utc)
class SimpleModel(models.Model):
name = models.CharField(max_length=50)
class OtherSimpleModel(models.Model):
name = models.CharField(max_length=50)
class UniqueNullFieldModel(models.Model):
name = models.CharField(max_length=15, null=True, blank=True, unique=True)
class UniqueTogetherNullFieldModel(models.Model):
field_one = models.CharField(max_length=15, null=True, blank=True)
field_two = models.CharField(max_length=15, null=True, blank=True)
class Meta:
unique_together = ['field_one', 'field_two']
class MultipleUniqueTogetherNullFieldModel(models.Model):
field_one = models.CharField(max_length=15, null=True, blank=True)
field_two = models.CharField(max_length=15, null=True, blank=True)
field_three = models.CharField(max_length=15, null=True, blank=True)
field_four = models.CharField(max_length=15, null=True, blank=True)
field_five = models.CharField(max_length=15, null=True, blank=True)
class Meta:
verbose_name = 'Multi unique_together null field'
unique_together = (
['field_one', 'field_two'],
['field_three', 'field_four', 'field_five'],
)
class DeepLinkModel1(models.Model):
related = models.ForeignKey('SimpleModel')
related2 = models.ForeignKey('SimpleModel',
related_name='deeplinkmodel1_rel2',
null=True,
blank=True)
class DeepLinkModel2(models.Model):
related = models.ForeignKey('DeepLinkModel1')
class NullableFKModel(models.Model):
m2m = models.ManyToManyField('SimpleModel', null=True, blank=True)
class BasicModel(models.Model):
chars = models.CharField(max_length=50)
shortchars = models.CharField(max_length=2)
blankchars = models.CharField(max_length=100, blank=True)
nullchars = models.CharField(max_length=100, blank=True, null=True)
slugfield = models.SlugField()
textfield = models.TextField()
blankfloatfield = models.FloatField(null=True, blank=True)
floatfield = models.FloatField()
defaultint = models.IntegerField(default=1)
intfield = models.IntegerField()
pintfield = models.PositiveIntegerField()
sintfield = models.SmallIntegerField()
psintfield = models.PositiveSmallIntegerField()
STRING_CHOICES = (
('a', 'A'),
('b', 'B'),
('c', 'C'),
)
choicefield = models.CharField(choices=STRING_CHOICES, max_length=1)
datefield = models.DateField()
datetimefield = models.DateTimeField()
defaultdatetime = models.DateTimeField(default=y2k)
timefield = models.TimeField()
decimalfield = models.DecimalField(max_digits=10, decimal_places=4)
emailfield = models.EmailField()
ipaddressfield = IPAddressField()
urlfield = models.URLField()
rfilepathfield = models.FilePathField(path=filepath, recursive=True)
filepathfield = models.FilePathField(path=filepath)
mfilepathfield = models.FilePathField(path=filepath, match=r'^.+\.py$')
imgfield = models.ImageField(upload_to='_autofixtures')
class UniqueTestModel(models.Model):
CHOICES = [(i, i) for i in range(10)]
choice1 = models.PositiveIntegerField(choices=CHOICES, unique=True)
class UniqueTogetherTestModel(models.Model):
CHOICES = [(i, i) for i in range(10)]
choice1 = models.PositiveIntegerField(choices=CHOICES)
choice2 = models.PositiveIntegerField(choices=CHOICES)
class Meta:
unique_together = ('choice1', 'choice2')
class RelatedModel(models.Model):
related = models.ForeignKey(BasicModel, related_name='rel1')
limitedfk = models.ForeignKey(SimpleModel,
limit_choices_to={'name__exact': 'foo'},
related_name='rel2',
null=True,
blank=True)
class O2OModel(models.Model):
o2o = models.OneToOneField(SimpleModel)
class O2OPrimaryKeyModel(models.Model):
o2o = models.OneToOneField(SimpleModel, primary_key=True)
class InheritModel(SimpleModel):
extrafloatfield = models.FloatField()
class InheritUniqueTogetherModel(SimpleModel):
extrafloatfield = models.FloatField()
class Meta:
unique_together = ('extrafloatfield', 'simplemodel_ptr')
class SelfReferencingModel(models.Model):
parent_self = models.ForeignKey('self', blank=True, null=True)
class SelfReferencingModelNoNull(models.Model):
parent_self = models.ForeignKey('self')
class M2MModel(models.Model):
m2m = models.ManyToManyField(SimpleModel, related_name='m2m_rel1')
secondm2m = models.ManyToManyField(
OtherSimpleModel, related_name='m2m_rel2', null=True, blank=True)
class ThroughModel(models.Model):
simple = models.ForeignKey('SimpleModel')
other = models.ForeignKey('M2MModelThrough')
class M2MModelThrough(models.Model):
m2m = models.ManyToManyField(
SimpleModel, related_name='m2mthrough_rel1', through=ThroughModel)
class GFKModel(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = get_GenericForeignKey()('content_type', 'object_id')
class GRModel(models.Model):
gr = get_GenericRelation()('GFKModel')
class DummyStorage(FileSystemStorage):
pass
dummy_storage = DummyStorage()
class ImageModel(models.Model):
imgfield = models.ImageField(upload_to='_autofixtures',
storage=dummy_storage)
class RelationWithCustomAutofixtureModel(models.Model):
user = models.ForeignKey('auth.User', related_name='user1+')
users = models.ManyToManyField('auth.User', related_name='user2+')
|
gregmuellegger/django-autofixture
|
autofixture_tests/models.py
|
Python
|
bsd-3-clause
| 6,350
|
from __future__ import absolute_import, print_function
__all__ = ['DummyProvider']
from django.http import HttpResponse
from sentry.identity.base import Provider
from sentry.pipeline import PipelineView
class AskEmail(PipelineView):
def dispatch(self, request, pipeline):
if 'email' in request.POST:
pipeline.bind_state('email', request.POST.get('email'))
return pipeline.next_step()
return HttpResponse(DummyProvider.TEMPLATE)
class DummyProvider(Provider):
name = 'Dummy'
key = 'dummy'
TEMPLATE = '<form method="POST"><input type="email" name="email" /></form>'
def get_pipeline_views(self):
return [AskEmail()]
def build_identity(self, state):
return {
'id': state['email'],
'email': state['email'],
'name': 'Dummy',
}
|
looker/sentry
|
src/sentry/identity/providers/dummy.py
|
Python
|
bsd-3-clause
| 857
|
from eventkit_cloud.settings.prod import * # NOQA
# Override settings here for test purposes.
TESTING = True
CELERY_ALWAYS_EAGER = True
BROKER_BACKEND = "memory"
PASSWORD_HASHERS = ("django.contrib.auth.hashers.MD5PasswordHasher",)
|
venicegeo/eventkit-cloud
|
eventkit_cloud/settings/tests.py
|
Python
|
bsd-3-clause
| 235
|
#!/usr/bin/env python
"""
Let's say we play a game where I keep flipping a coin until I get
heads. If the first time I get heads is on the nth coin, then I pay
you 2n-1 dollars. How much would you pay me to play this game?
You should end up with a sequence that you need to find the closed
form of. If you don't know how to do this, write some python code that
sums the first 100.
E(W) = sum_{n >= 1} (2n-1)/2^n = 3
"""
import matplotlib.pyplot as plt
import numpy as np
## simulate the number of flips before heads
def coin():
tails, num_flips = True, 0
while tails:
num_flips += 1
if np.random.binomial(1,0.5):
tails = False
return num_flips
if __name__ == '__main__':
## simulate
flips = [coin() for k in xrange(10000)]
## get the distribution of counts condition on the number of flips
range_flips = range(1, max(flips) + 1)
counts = np.array([flips.count(k)*1. for k in range_flips])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.bar(range_flips,counts,alpha=0.4)
ax.set_ylabel("counts")
ax.set_xlabel("num flips to win")
#print [int(i) for i in counts]
winnings = sum([counts[k - 1]*(2*(k)-1)/sum(counts) for k in range_flips])
#print range_flips
print winnings
plt.show()
|
ajrichards/bayesian-examples
|
hypothesis-testing/binomial_prob.py
|
Python
|
bsd-3-clause
| 1,308
|
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import calendar
import datetime
import decimal
from functools import partial
import locale
import math
import re
import time
import dateutil
import numpy as np
import pytest
import pytz
import pandas._libs.json as ujson
from pandas._libs.tslib import Timestamp
import pandas.compat as compat
from pandas.compat import StringIO, range, u
from pandas import DataFrame, DatetimeIndex, Index, NaT, Series, date_range
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
def _clean_dict(d):
"""
Sanitize dictionary for JSON by converting all keys to strings.
Parameters
----------
d : dict
The dictionary to convert.
Returns
-------
cleaned_dict : dict
"""
return {str(k): v for k, v in compat.iteritems(d)}
@pytest.fixture(params=[
None, # Column indexed by default.
"split",
"records",
"values",
"index"])
def orient(request):
return request.param
@pytest.fixture(params=[None, True])
def numpy(request):
return request.param
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encode_decimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = ujson.decode(encoded)
assert decoded == 0.9
sut = decimal.Decimal("1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "2.0"
decoded = ujson.decode(encoded)
assert decoded == 2.0
sut = decimal.Decimal("-1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "-2.0"
decoded = ujson.decode(encoded)
assert decoded == -2.0
sut = decimal.Decimal("0.995")
encoded = ujson.encode(sut, double_precision=2)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.9995")
encoded = ujson.encode(sut, double_precision=3)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.99999999999999944")
encoded = ujson.encode(sut, double_precision=15)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
@pytest.mark.parametrize("ensure_ascii", [True, False])
def test_encode_string_conversion(self, ensure_ascii):
string_input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
'\\r \\t <\\/script> &"')
html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
'\\u003c\\/script\\u003e \\u0026"')
def helper(expected_output, **encode_kwargs):
output = ujson.encode(string_input,
ensure_ascii=ensure_ascii,
**encode_kwargs)
assert output == expected_output
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, encode_html_chars=True)
@pytest.mark.parametrize("long_number", [
-4342969734183514, -12345678901234.56789012, -528656961.4399388
])
def test_double_long_numbers(self, long_number):
sut = {u("a"): long_number}
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_encode_non_c_locale(self):
lc_category = locale.LC_NUMERIC
# We just need one of these locales to work.
for new_locale in ("it_IT.UTF-8", "Italian_Italy"):
if tm.can_set_locale(new_locale, lc_category):
with tm.set_locale(new_locale, lc_category):
assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60
assert ujson.loads("4.78", precise_float=True) == 4.78
break
def test_decimal_decode_test_precise(self):
sut = {u("a"): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
assert sut == decoded
@pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3,
reason="buggy on win-64 for py2")
def test_encode_double_tiny_exponential(self):
num = 1e-40
assert num == ujson.decode(ujson.encode(num))
num = 1e-100
assert num == ujson.decode(ujson.encode(num))
num = -1e-45
assert num == ujson.decode(ujson.encode(num))
num = -1e-145
assert np.allclose(num, ujson.decode(ujson.encode(num)))
@pytest.mark.parametrize("unicode_key", [
u("key1"), u("بن")
])
def test_encode_dict_with_unicode_keys(self, unicode_key):
unicode_dict = {unicode_key: u("value1")}
assert unicode_dict == ujson.decode(ujson.encode(unicode_dict))
@pytest.mark.parametrize("double_input", [
math.pi,
-math.pi # Should work with negatives too.
])
def test_encode_double_conversion(self, double_input):
output = ujson.encode(double_input)
assert round(double_input, 5) == round(json.loads(output), 5)
assert round(double_input, 5) == round(ujson.decode(output), 5)
def test_encode_with_decimal(self):
decimal_input = 1.0
output = ujson.encode(decimal_input)
assert output == "1.0"
def test_encode_array_of_nested_arrays(self):
nested_input = [[[[]]]] * 20
output = ujson.encode(nested_input)
assert nested_input == json.loads(output)
assert nested_input == ujson.decode(output)
nested_input = np.array(nested_input)
tm.assert_numpy_array_equal(nested_input, ujson.decode(
output, numpy=True, dtype=nested_input.dtype))
def test_encode_array_of_doubles(self):
doubles_input = [31337.31337, 31337.31337,
31337.31337, 31337.31337] * 10
output = ujson.encode(doubles_input)
assert doubles_input == json.loads(output)
assert doubles_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(doubles_input),
ujson.decode(output, numpy=True))
def test_double_precision(self):
double_input = 30.012345678901234
output = ujson.encode(double_input, double_precision=15)
assert double_input == json.loads(output)
assert double_input == ujson.decode(output)
for double_precision in (3, 9):
output = ujson.encode(double_input,
double_precision=double_precision)
rounded_input = round(double_input, double_precision)
assert rounded_input == json.loads(output)
assert rounded_input == ujson.decode(output)
@pytest.mark.parametrize("invalid_val", [
20, -1, "9", None
])
def test_invalid_double_precision(self, invalid_val):
double_input = 30.12345678901234567890
expected_exception = (ValueError if isinstance(invalid_val, int)
else TypeError)
with pytest.raises(expected_exception):
ujson.encode(double_input, double_precision=invalid_val)
def test_encode_string_conversion2(self):
string_input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(string_input)
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'
@pytest.mark.parametrize("unicode_input", [
"Räksmörgås اسامة بن محمد بن عوض بن لادن",
"\xe6\x97\xa5\xd1\x88"
])
def test_encode_unicode_conversion(self, unicode_input):
enc = ujson.encode(unicode_input)
dec = ujson.decode(enc)
assert enc == json_unicode(unicode_input)
assert dec == json.loads(enc)
def test_encode_control_escaping(self):
escaped_input = "\x19"
enc = ujson.encode(escaped_input)
dec = ujson.decode(enc)
assert escaped_input == dec
assert enc == json_unicode(escaped_input)
def test_encode_unicode_surrogate_pair(self):
surrogate_input = "\xf0\x90\x8d\x86"
enc = ujson.encode(surrogate_input)
dec = ujson.decode(enc)
assert enc == json_unicode(surrogate_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8(self):
four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8highest(self):
four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_array_in_array(self):
arr_in_arr_input = [[[[]]]]
output = ujson.encode(arr_in_arr_input)
assert arr_in_arr_input == json.loads(output)
assert output == json.dumps(arr_in_arr_input)
assert arr_in_arr_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(arr_in_arr_input),
ujson.decode(output, numpy=True))
@pytest.mark.parametrize("num_input", [
31337,
-31337, # Negative number.
-9223372036854775808 # Large negative number.
])
def test_encode_num_conversion(self, num_input):
output = ujson.encode(num_input)
assert num_input == json.loads(output)
assert output == json.dumps(num_input)
assert num_input == ujson.decode(output)
def test_encode_list_conversion(self):
list_input = [1, 2, 3, 4]
output = ujson.encode(list_input)
assert list_input == json.loads(output)
assert list_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(list_input),
ujson.decode(output, numpy=True))
def test_encode_dict_conversion(self):
dict_input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
output = ujson.encode(dict_input)
assert dict_input == json.loads(output)
assert dict_input == ujson.decode(output)
@pytest.mark.parametrize("builtin_value", [None, True, False])
def test_encode_builtin_values_conversion(self, builtin_value):
output = ujson.encode(builtin_value)
assert builtin_value == json.loads(output)
assert output == json.dumps(builtin_value)
assert builtin_value == ujson.decode(output)
def test_encode_datetime_conversion(self):
datetime_input = datetime.datetime.fromtimestamp(time.time())
output = ujson.encode(datetime_input, date_unit="s")
expected = calendar.timegm(datetime_input.utctimetuple())
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
def test_encode_date_conversion(self):
date_input = datetime.date.fromtimestamp(time.time())
output = ujson.encode(date_input, date_unit="s")
tup = (date_input.year, date_input.month, date_input.day, 0, 0, 0)
expected = calendar.timegm(tup)
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
@pytest.mark.parametrize("test", [
datetime.time(),
datetime.time(1, 2, 3),
datetime.time(10, 12, 15, 343243),
])
def test_encode_time_conversion_basic(self, test):
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_pytz(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, pytz.utc)
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_dateutil(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
@pytest.mark.parametrize("decoded_input", [
NaT,
np.datetime64("NaT"),
np.nan,
np.inf,
-np.inf
])
def test_encode_as_null(self, decoded_input):
assert ujson.encode(decoded_input) == "null", "Expected null"
def test_datetime_units(self):
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
roundtrip = ujson.decode(ujson.encode(val, date_unit='s'))
assert roundtrip == stamp.value // 10**9
roundtrip = ujson.decode(ujson.encode(val, date_unit='ms'))
assert roundtrip == stamp.value // 10**6
roundtrip = ujson.decode(ujson.encode(val, date_unit='us'))
assert roundtrip == stamp.value // 10**3
roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
assert roundtrip == stamp.value
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
ujson.encode(val, date_unit='foo')
def test_encode_to_utf8(self):
unencoded = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(unencoded, ensure_ascii=False)
dec = ujson.decode(enc)
assert enc == json_unicode(unencoded, ensure_ascii=False)
assert dec == json.loads(enc)
def test_decode_from_unicode(self):
unicode_input = u("{\"obj\": 31337}")
dec1 = ujson.decode(unicode_input)
dec2 = ujson.decode(str(unicode_input))
assert dec1 == dec2
def test_encode_recursion_max(self):
# 8 is the max recursion depth
class O2(object):
member = 0
pass
class O1(object):
member = 0
pass
decoded_input = O1()
decoded_input.member = O2()
decoded_input.member.member = decoded_input
with pytest.raises(OverflowError):
ujson.encode(decoded_input)
def test_decode_jibberish(self):
jibberish = "fdsa sda v9sa fdsa"
with pytest.raises(ValueError):
ujson.decode(jibberish)
@pytest.mark.parametrize("broken_json", [
"[", # Broken array start.
"{", # Broken object start.
"]", # Broken array end.
"}", # Broken object end.
])
def test_decode_broken_json(self, broken_json):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("too_big_char", [
"[",
"{",
])
def test_decode_depth_too_big(self, too_big_char):
with pytest.raises(ValueError):
ujson.decode(too_big_char * (1024 * 1024))
@pytest.mark.parametrize("bad_string", [
"\"TESTING", # Unterminated.
"\"TESTING\\\"", # Unterminated escape.
"tru", # Broken True.
"fa", # Broken False.
"n", # Broken None.
])
def test_decode_bad_string(self, bad_string):
with pytest.raises(ValueError):
ujson.decode(bad_string)
@pytest.mark.parametrize("broken_json", [
'{{1337:""}}',
'{{"key":"}',
'[[[true',
])
def test_decode_broken_json_leak(self, broken_json):
for _ in range(1000):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("invalid_dict", [
"{{{{31337}}}}", # No key.
"{{{{\"key\":}}}}", # No value.
"{{{{\"key\"}}}}", # No colon or value.
])
def test_decode_invalid_dict(self, invalid_dict):
with pytest.raises(ValueError):
ujson.decode(invalid_dict)
@pytest.mark.parametrize("numeric_int_as_str", [
"31337", "-31337" # Should work with negatives.
])
def test_decode_numeric_int(self, numeric_int_as_str):
assert int(numeric_int_as_str) == ujson.decode(numeric_int_as_str)
@pytest.mark.skipif(compat.PY3, reason="only PY2")
def test_encode_unicode_4bytes_utf8_fail(self):
with pytest.raises(OverflowError):
ujson.encode("\xfd\xbf\xbf\xbf\xbf\xbf")
def test_encode_null_character(self):
wrapped_input = "31337 \x00 1337"
output = ujson.encode(wrapped_input)
assert wrapped_input == json.loads(output)
assert output == json.dumps(wrapped_input)
assert wrapped_input == ujson.decode(output)
alone_input = "\x00"
output = ujson.encode(alone_input)
assert alone_input == json.loads(output)
assert output == json.dumps(alone_input)
assert alone_input == ujson.decode(output)
assert '" \\u0000\\r\\n "' == ujson.dumps(u(" \u0000\r\n "))
def test_decode_null_character(self):
wrapped_input = "\"31337 \\u0000 31337\""
assert ujson.decode(wrapped_input) == json.loads(wrapped_input)
def test_encode_list_long_conversion(self):
long_input = [9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807]
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert long_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(long_input),
ujson.decode(output, numpy=True,
dtype=np.int64))
def test_encode_long_conversion(self):
long_input = 9223372036854775807
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert output == json.dumps(long_input)
assert long_input == ujson.decode(output)
@pytest.mark.parametrize("int_exp", [
"1337E40", "1.337E40", "1337E+9", "1.337e+40", "1.337E-4"
])
def test_decode_numeric_int_exp(self, int_exp):
assert ujson.decode(int_exp) == json.loads(int_exp)
def test_dump_to_file(self):
f = StringIO()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.getvalue()
def test_dump_to_file_like(self):
class FileLike(object):
def __init__(self):
self.bytes = ''
def write(self, data_bytes):
self.bytes += data_bytes
f = FileLike()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.bytes
def test_dump_file_args_error(self):
with pytest.raises(TypeError):
ujson.dump([], "")
def test_load_file(self):
data = "[1,2,3,4]"
exp_data = [1, 2, 3, 4]
f = StringIO(data)
assert exp_data == ujson.load(f)
f = StringIO(data)
tm.assert_numpy_array_equal(np.array(exp_data),
ujson.load(f, numpy=True))
def test_load_file_like(self):
class FileLike(object):
def read(self):
try:
self.end
except AttributeError:
self.end = True
return "[1,2,3,4]"
exp_data = [1, 2, 3, 4]
f = FileLike()
assert exp_data == ujson.load(f)
f = FileLike()
tm.assert_numpy_array_equal(np.array(exp_data),
ujson.load(f, numpy=True))
def test_load_file_args_error(self):
with pytest.raises(TypeError):
ujson.load("[]")
def test_version(self):
assert re.match(r'^\d+\.\d+(\.\d+)?$', ujson.__version__), \
"ujson.__version__ must be a string like '1.4.0'"
def test_encode_numeric_overflow(self):
with pytest.raises(OverflowError):
ujson.encode(12839128391289382193812939)
def test_encode_numeric_overflow_nested(self):
class Nested(object):
x = 12839128391289382193812939
for _ in range(0, 100):
with pytest.raises(OverflowError):
ujson.encode(Nested())
@pytest.mark.parametrize("val", [
3590016419, 2**31, 2**32, (2**32) - 1
])
def test_decode_number_with_32bit_sign_bit(self, val):
# Test that numbers that fit within 32 bits but would have the
# sign bit set (2**31 <= x < 2**32) are decoded properly.
doc = '{{"id": {val}}}'.format(val=val)
assert ujson.decode(doc)["id"] == val
def test_encode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5"
escape_input = base * 1024 * 1024 * 2
ujson.encode(escape_input)
def test_decode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5"
quote = compat.str_to_bytes("\"")
escape_input = quote + (base * 1024 * 1024 * 2) + quote
ujson.decode(escape_input)
def test_to_dict(self):
d = {u("key"): 31337}
class DictTest(object):
def toDict(self):
return d
o = DictTest()
output = ujson.encode(o)
dec = ujson.decode(output)
assert dec == d
def test_default_handler(self):
class _TestObject(object):
def __init__(self, val):
self.val = val
@property
def recursive_attr(self):
return _TestObject("recursive_attr")
def __str__(self):
return str(self.val)
msg = "Maximum recursion level reached"
with pytest.raises(OverflowError, match=msg):
ujson.encode(_TestObject("foo"))
assert '"foo"' == ujson.encode(_TestObject("foo"),
default_handler=str)
def my_handler(_):
return "foobar"
assert '"foobar"' == ujson.encode(_TestObject("foo"),
default_handler=my_handler)
def my_handler_raises(_):
raise TypeError("I raise for anything")
with pytest.raises(TypeError, match="I raise for anything"):
ujson.encode(_TestObject("foo"), default_handler=my_handler_raises)
def my_int_handler(_):
return 42
assert ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_int_handler)) == 42
def my_obj_handler(_):
return datetime.datetime(2013, 2, 3)
assert (ujson.decode(ujson.encode(datetime.datetime(2013, 2, 3))) ==
ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_obj_handler)))
obj_list = [_TestObject("foo"), _TestObject("bar")]
assert (json.loads(json.dumps(obj_list, default=str)) ==
ujson.decode(ujson.encode(obj_list, default_handler=str)))
class TestNumpyJSONTests(object):
@pytest.mark.parametrize("bool_input", [True, False])
def test_bool(self, bool_input):
b = np.bool(bool_input)
assert ujson.decode(ujson.encode(b)) == b
def test_bool_array(self):
bool_array = np.array([
True, False, True, True,
False, True, False, False], dtype=np.bool)
output = np.array(ujson.decode(
ujson.encode(bool_array)), dtype=np.bool)
tm.assert_numpy_array_equal(bool_array, output)
def test_int(self, any_int_dtype):
klass = np.dtype(any_int_dtype).type
num = klass(1)
assert klass(ujson.decode(ujson.encode(num))) == num
def test_int_array(self, any_int_dtype):
arr = np.arange(100, dtype=np.int)
arr_input = arr.astype(any_int_dtype)
arr_output = np.array(ujson.decode(ujson.encode(arr_input)),
dtype=any_int_dtype)
tm.assert_numpy_array_equal(arr_input, arr_output)
def test_int_max(self, any_int_dtype):
if any_int_dtype in ("int64", "uint64") and compat.is_platform_32bit():
pytest.skip("Cannot test 64-bit integer on 32-bit platform")
klass = np.dtype(any_int_dtype).type
# uint64 max will always overflow,
# as it's encoded to signed.
if any_int_dtype == "uint64":
num = np.iinfo("int64").max
else:
num = np.iinfo(any_int_dtype).max
assert klass(ujson.decode(ujson.encode(num))) == num
def test_float(self, float_dtype):
klass = np.dtype(float_dtype).type
num = klass(256.2013)
assert klass(ujson.decode(ujson.encode(num))) == num
def test_float_array(self, float_dtype):
arr = np.arange(12.5, 185.72, 1.7322, dtype=np.float)
float_input = arr.astype(float_dtype)
float_output = np.array(ujson.decode(
ujson.encode(float_input, double_precision=15)),
dtype=float_dtype)
tm.assert_almost_equal(float_input, float_output)
def test_float_max(self, float_dtype):
klass = np.dtype(float_dtype).type
num = klass(np.finfo(float_dtype).max / 10)
tm.assert_almost_equal(klass(ujson.decode(
ujson.encode(num, double_precision=15))), num)
def test_array_basic(self):
arr = np.arange(96)
arr = arr.reshape((2, 2, 2, 2, 3, 2))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
@pytest.mark.parametrize("shape", [
(10, 10),
(5, 5, 4),
(100, 1),
])
def test_array_reshaped(self, shape):
arr = np.arange(100)
arr = arr.reshape(shape)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
def test_array_list(self):
arr_list = ["a", list(), dict(), dict(), list(),
42, 97.8, ["a", "b"], {"key": "val"}]
arr = np.array(arr_list)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
def test_array_float(self):
dtype = np.float32
arr = np.arange(100.202, 200.202, 1, dtype=dtype)
arr = arr.reshape((5, 5, 4))
arr_out = np.array(ujson.decode(ujson.encode(arr)), dtype=dtype)
tm.assert_almost_equal(arr, arr_out)
arr_out = ujson.decode(ujson.encode(arr), numpy=True, dtype=dtype)
tm.assert_almost_equal(arr, arr_out)
def test_0d_array(self):
with pytest.raises(TypeError):
ujson.encode(np.array(1))
@pytest.mark.parametrize("bad_input,exc_type,kwargs", [
([{}, []], ValueError, {}),
([42, None], TypeError, {}),
([["a"], 42], ValueError, {}),
([42, {}, "a"], TypeError, {}),
([42, ["a"], 42], ValueError, {}),
(["a", "b", [], "c"], ValueError, {}),
([{"a": "b"}], ValueError, dict(labelled=True)),
({"a": {"b": {"c": 42}}}, ValueError, dict(labelled=True)),
([{"a": 42, "b": 23}, {"c": 17}], ValueError, dict(labelled=True))
])
def test_array_numpy_except(self, bad_input, exc_type, kwargs):
with pytest.raises(exc_type):
ujson.decode(ujson.dumps(bad_input), numpy=True, **kwargs)
def test_array_numpy_labelled(self):
labelled_input = {"a": []}
output = ujson.loads(ujson.dumps(labelled_input),
numpy=True, labelled=True)
assert (np.empty((1, 0)) == output[0]).all()
assert (np.array(["a"]) == output[1]).all()
assert output[2] is None
labelled_input = [{"a": 42}]
output = ujson.loads(ujson.dumps(labelled_input),
numpy=True, labelled=True)
assert (np.array([u("a")]) == output[2]).all()
assert (np.array([42]) == output[0]).all()
assert output[1] is None
# see gh-10837: write out the dump explicitly
# so there is no dependency on iteration order
input_dumps = ('[{"a": 42, "b":31}, {"a": 24, "c": 99}, '
'{"a": 2.4, "b": 78}]')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expected_vals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expected_vals == output[0]).all()
assert output[1] is None
assert (np.array([u("a"), "b"]) == output[2]).all()
input_dumps = ('{"1": {"a": 42, "b":31}, "2": {"a": 24, "c": 99}, '
'"3": {"a": 2.4, "b": 78}}')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expected_vals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expected_vals == output[0]).all()
assert (np.array(["1", "2", "3"]) == output[1]).all()
assert (np.array(["a", "b"]) == output[2]).all()
class TestPandasJSONTests(object):
def test_dataframe(self, orient, numpy):
if orient == "records" and numpy:
pytest.skip("Not idiomatic pandas")
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"])
encode_kwargs = {} if orient is None else dict(orient=orient)
decode_kwargs = {} if numpy is None else dict(numpy=numpy)
output = ujson.decode(ujson.encode(df, **encode_kwargs),
**decode_kwargs)
# Ensure proper DataFrame initialization.
if orient == "split":
dec = _clean_dict(output)
output = DataFrame(**dec)
else:
output = DataFrame(output)
# Corrections to enable DataFrame comparison.
if orient == "values":
df.columns = [0, 1, 2]
df.index = [0, 1]
elif orient == "records":
df.index = [0, 1]
elif orient == "index":
df = df.transpose()
tm.assert_frame_equal(output, df, check_dtype=False)
def test_dataframe_nested(self, orient):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"])
nested = {"df1": df, "df2": df.copy()}
kwargs = {} if orient is None else dict(orient=orient)
exp = {"df1": ujson.decode(ujson.encode(df, **kwargs)),
"df2": ujson.decode(ujson.encode(df, **kwargs))}
assert ujson.decode(ujson.encode(nested, **kwargs)) == exp
def test_dataframe_numpy_labelled(self, orient):
if orient in ("split", "values"):
pytest.skip("Incompatible with labelled=True")
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"], dtype=np.int)
kwargs = {} if orient is None else dict(orient=orient)
output = DataFrame(*ujson.decode(ujson.encode(df, **kwargs),
numpy=True, labelled=True))
if orient is None:
df = df.T
elif orient == "records":
df.index = [0, 1]
tm.assert_frame_equal(output, df)
def test_series(self, orient, numpy):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
encode_kwargs = {} if orient is None else dict(orient=orient)
decode_kwargs = {} if numpy is None else dict(numpy=numpy)
output = ujson.decode(ujson.encode(s, **encode_kwargs),
**decode_kwargs)
if orient == "split":
dec = _clean_dict(output)
output = Series(**dec)
else:
output = Series(output)
if orient in (None, "index"):
s.name = None
output = output.sort_values()
s.index = ["6", "7", "8", "9", "10", "15"]
elif orient in ("records", "values"):
s.name = None
s.index = [0, 1, 2, 3, 4, 5]
tm.assert_series_equal(output, s, check_dtype=False)
def test_series_nested(self, orient):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
nested = {"s1": s, "s2": s.copy()}
kwargs = {} if orient is None else dict(orient=orient)
exp = {"s1": ujson.decode(ujson.encode(s, **kwargs)),
"s2": ujson.decode(ujson.encode(s, **kwargs))}
assert ujson.decode(ujson.encode(nested, **kwargs)) == exp
def test_index(self):
i = Index([23, 45, 18, 98, 43, 11], name="index")
# Column indexed.
output = Index(ujson.decode(ujson.encode(i)), name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i), numpy=True), name="index")
tm.assert_index_equal(i, output)
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split")))
output = Index(**dec)
tm.assert_index_equal(i, output)
assert i.name == output.name
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split"),
numpy=True))
output = Index(**dec)
tm.assert_index_equal(i, output)
assert i.name == output.name
output = Index(ujson.decode(ujson.encode(i, orient="values")),
name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i, orient="values"),
numpy=True), name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i, orient="records")),
name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i, orient="records"),
numpy=True), name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i, orient="index")),
name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i, orient="index"),
numpy=True), name="index")
tm.assert_index_equal(i, output)
def test_datetime_index(self):
date_unit = "ns"
rng = date_range("1/1/2000", periods=20)
encoded = ujson.encode(rng, date_unit=date_unit)
decoded = DatetimeIndex(np.array(ujson.decode(encoded)))
tm.assert_index_equal(rng, decoded)
ts = Series(np.random.randn(len(rng)), index=rng)
decoded = Series(ujson.decode(ujson.encode(ts, date_unit=date_unit)))
idx_values = decoded.index.values.astype(np.int64)
decoded.index = DatetimeIndex(idx_values)
tm.assert_series_equal(ts, decoded)
@pytest.mark.parametrize("invalid_arr", [
"[31337,]", # Trailing comma.
"[,31337]", # Leading comma.
"[]]", # Unmatched bracket.
"[,]", # Only comma.
])
def test_decode_invalid_array(self, invalid_arr):
with pytest.raises(ValueError):
ujson.decode(invalid_arr)
@pytest.mark.parametrize("arr", [
[], [31337]
])
def test_decode_array(self, arr):
assert arr == ujson.decode(str(arr))
@pytest.mark.parametrize("extreme_num", [
9223372036854775807, -9223372036854775808
])
def test_decode_extreme_numbers(self, extreme_num):
assert extreme_num == ujson.decode(str(extreme_num))
@pytest.mark.parametrize("too_extreme_num", [
"9223372036854775808", "-90223372036854775809"
])
def test_decode_too_extreme_numbers(self, too_extreme_num):
with pytest.raises(ValueError):
ujson.decode(too_extreme_num)
def test_decode_with_trailing_whitespaces(self):
assert {} == ujson.decode("{}\n\t ")
def test_decode_with_trailing_non_whitespaces(self):
with pytest.raises(ValueError):
ujson.decode("{}\n\t a")
def test_decode_array_with_big_int(self):
with pytest.raises(ValueError):
ujson.loads("[18446098363113800555]")
@pytest.mark.parametrize("float_number", [
1.1234567893, 1.234567893, 1.34567893,
1.4567893, 1.567893, 1.67893,
1.7893, 1.893, 1.3,
])
@pytest.mark.parametrize("sign", [-1, 1])
def test_decode_floating_point(self, sign, float_number):
float_number *= sign
tm.assert_almost_equal(float_number,
ujson.loads(str(float_number)),
check_less_precise=15)
def test_encode_big_set(self):
s = set()
for x in range(0, 100000):
s.add(x)
# Make sure no Exception is raised.
ujson.encode(s)
def test_encode_empty_set(self):
assert "[]" == ujson.encode(set())
def test_encode_set(self):
s = {1, 2, 3, 4, 5, 6, 7, 8, 9}
enc = ujson.encode(s)
dec = ujson.decode(enc)
for v in dec:
assert v in s
|
GuessWhoSamFoo/pandas
|
pandas/tests/io/json/test_ujson.py
|
Python
|
bsd-3-clause
| 38,511
|
import numpy as np
from parakeet import jit, testing_helpers
@jit
def true_divided(x):
return True / x
def test_true_divided_bool():
testing_helpers.expect(true_divided, [True], True)
def test_true_divided_int():
testing_helpers.expect(true_divided, [1], 1)
testing_helpers.expect(true_divided, [2], 0)
def test_true_divided_float():
testing_helpers.expect(true_divided, [1.0], 1.0)
testing_helpers.expect(true_divided, [2.0], 0.5)
def test_true_divided_uint8():
testing_helpers.expect(true_divided, [np.uint8(1)], 1)
testing_helpers.expect(true_divided, [np.uint8(2)], 0)
if __name__ == '__main__':
testing_helpers.run_local_tests()
|
pombredanne/parakeet
|
test/core_language/test_div_bool.py
|
Python
|
bsd-3-clause
| 679
|
# coding: utf-8
from __future__ import absolute_import
from django.apps import apps
from django.conf import settings
from sentry.models import Organization, Project, ProjectKey, Team, User
from sentry.receivers.core import create_default_projects, DEFAULT_SENTRY_PROJECT_ID
from sentry.testutils import TestCase
class CreateDefaultProjectsTest(TestCase):
def test_simple(self):
user, _ = User.objects.get_or_create(is_superuser=True, defaults={"username": "test"})
Organization.objects.all().delete()
Team.objects.filter(slug="sentry").delete()
Project.objects.filter(id=settings.SENTRY_PROJECT).delete()
config = apps.get_app_config("sentry")
create_default_projects(config)
project = Project.objects.get(id=settings.SENTRY_PROJECT)
assert project.public is False
assert project.name == "Internal"
assert project.slug == "internal"
team = project.teams.first()
assert team.slug == "sentry"
pk = ProjectKey.objects.get(project=project)
assert not pk.roles.api
assert pk.roles.store
# ensure that we dont hit an error here
create_default_projects(config)
def test_without_user(self):
User.objects.filter(is_superuser=True).delete()
Team.objects.filter(slug="sentry").delete()
Project.objects.filter(id=settings.SENTRY_PROJECT).delete()
config = apps.get_app_config("sentry")
create_default_projects(config)
project = Project.objects.get(id=settings.SENTRY_PROJECT)
assert project.public is False
assert project.name == "Internal"
assert project.slug == "internal"
team = project.teams.first()
assert team.slug == "sentry"
pk = ProjectKey.objects.get(project=project)
assert not pk.roles.api
assert pk.roles.store
# ensure that we dont hit an error here
create_default_projects(config)
def test_no_sentry_project(self):
with self.settings(SENTRY_PROJECT=None):
User.objects.filter(is_superuser=True).delete()
Team.objects.filter(slug="sentry").delete()
Project.objects.filter(id=DEFAULT_SENTRY_PROJECT_ID).delete()
config = apps.get_app_config("sentry")
create_default_projects(config)
project = Project.objects.get(id=DEFAULT_SENTRY_PROJECT_ID)
assert project.public is False
assert project.name == "Internal"
assert project.slug == "internal"
team = project.teams.first()
assert team.slug == "sentry"
pk = ProjectKey.objects.get(project=project)
assert not pk.roles.api
assert pk.roles.store
# ensure that we dont hit an error here
create_default_projects(config)
|
beeftornado/sentry
|
tests/sentry/receivers/test_core.py
|
Python
|
bsd-3-clause
| 2,858
|
# coding=utf-8
"""
Tests for wham_rad.
"""
import unittest
import os
from md_utils.md_common import silent_remove, diff_lines
from md_utils.press_dups import avg_rows, compress_dups, main
__author__ = 'mayes'
DATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data')
DUPS_DIR = os.path.join(DATA_DIR, 'press_dups')
HEAD_RAW = os.path.join(DUPS_DIR, 'proc_data_all_head0.75.csv')
HEAD_STD = os.path.join(DUPS_DIR, 'std_proc_data_all_head0.75.csv')
HEAD_PRESS = os.path.join(DUPS_DIR, 'pressed_' + 'proc_data_all_head0.75.csv')
# Shared Methods #
class TestAvgRows(unittest.TestCase):
def testThree(self):
data = [{"a": 1.3, "b": 3.0, "c": 8.5}, {"a": 1.3, "b": 1.0, "c": -4.2},
{"a": 1.3, "b": 2.2, "c": 19.0}]
avg = avg_rows(data)
self.assertAlmostEqual(avg['a'], 1.3)
self.assertAlmostEqual(avg['b'], 2.066666666666)
self.assertAlmostEqual(avg['c'], 7.766666666666)
class TestPressDups(unittest.TestCase):
def testThree(self):
data = [{"a": 1.3, "b": 3.0, "c": 8.5}, {"a": 1.3, "b": 1.0, "c": -4.2},
{"a": 1.3, "b": 2.2, "c": 19.0}, {"a": 99, "b": 1.0, "c": -4.2},
{"a": -22, "b": 1.0, "c": -4.2}]
avg = compress_dups(data, "a")
self.assertEqual(3, len(avg))
class TestMainNoOutput(unittest.TestCase):
def testNoArg(self):
main([])
class TestMain(unittest.TestCase):
def testWithHead075Data(self):
try:
main(argv=[HEAD_RAW])
self.assertFalse(diff_lines(HEAD_STD, HEAD_PRESS))
finally:
silent_remove(HEAD_PRESS)
# pass
|
cmayes/md_utils
|
tests/test_press_dups.py
|
Python
|
bsd-3-clause
| 1,638
|
#!/usr/bin/env python
from __future__ import unicode_literals
from prompt_toolkit import prompt
if __name__ == '__main__':
print('If you press meta-! or esc-! at the following prompt, you can enter system commands.')
answer = prompt('Give me some input: ', enable_system_bindings=True)
print('You said: %s' % answer)
|
niklasf/python-prompt-toolkit
|
examples/system-prompt.py
|
Python
|
bsd-3-clause
| 331
|
# -*- coding: utf-8 -*-
"""
Bit Writing Request/Response
------------------------------
TODO write mask request/response
"""
import struct
from pymodbus3.constants import ModbusStatus
from pymodbus3.pdu import ModbusRequest
from pymodbus3.pdu import ModbusResponse
from pymodbus3.pdu import ModbusExceptions
from pymodbus3.utilities import pack_bitstring, unpack_bitstring
from collections import Iterable
# Local Constants
# These are defined in the spec to turn a coil on/off
_turn_coil_on = struct.pack('>H', ModbusStatus.On)
_turn_coil_off = struct.pack('>H', ModbusStatus.Off)
class WriteSingleCoilRequest(ModbusRequest):
"""
This function code is used to write a single output to either ON or OFF
in a remote device.
The requested ON/OFF state is specified by a constant in the request
data field. A value of FF 00 hex requests the output to be ON. A value
of 00 00 requests it to be OFF. All other values are illegal and will
not affect the output.
The Request PDU specifies the address of the coil to be forced. Coils
are addressed starting at zero. Therefore coil numbered 1 is addressed
as 0. The requested ON/OFF state is specified by a constant in the Coil
Value field. A value of 0XFF00 requests the coil to be ON. A value of
0X0000 requests the coil to be off. All other values are illegal and
will not affect the coil.
"""
function_code = 5
_rtu_frame_size = 8
def __init__(self, address=None, value=None, **kwargs):
""" Initializes a new instance
:param address: The variable address to write
:param value: The value to write at address
"""
ModbusRequest.__init__(self, **kwargs)
self.address = address
self.value = bool(value)
def encode(self):
""" Encodes write coil request
:returns: The byte encoded message
"""
result = struct.pack('>H', self.address)
if self.value:
result += _turn_coil_on
else:
result += _turn_coil_off
return result
def decode(self, data):
""" Decodes a write coil request
:param data: The packet data to decode
"""
self.address, value = struct.unpack('>HH', data)
self.value = (value == ModbusStatus.On)
def execute(self, context):
""" Run a write coil request against a datastore
:param context: The datastore to request from
:returns: The populated response or exception message
"""
'''if self.value not in [ModbusStatus.Off, ModbusStatus.On]:
return self.do_exception(ModbusExceptions.IllegalValue)'''
if not context.validate(self.function_code, self.address, 1):
return self.do_exception(ModbusExceptions.IllegalAddress)
context.set_values(self.function_code, self.address, [self.value])
values = context.get_values(self.function_code, self.address, 1)
return WriteSingleCoilResponse(self.address, values[0])
def __str__(self):
""" Returns a string representation of the instance
:return: A string representation of the instance
"""
return 'WriteCoilRequest({0}, {1}) => '.format(
self.address, self.value
)
class WriteSingleCoilResponse(ModbusResponse):
"""
The normal response is an echo of the request, returned after the coil
state has been written.
"""
function_code = 5
_rtu_frame_size = 8
def __init__(self, address=None, value=None, **kwargs):
""" Initializes a new instance
:param address: The variable address written to
:param value: The value written at address
"""
ModbusResponse.__init__(self, **kwargs)
self.address = address
self.value = value
def encode(self):
""" Encodes write coil response
:return: The byte encoded message
"""
result = struct.pack('>H', self.address)
if self.value:
result += _turn_coil_on
else:
result += _turn_coil_off
return result
def decode(self, data):
""" Decodes a write coil response
:param data: The packet data to decode
"""
self.address, value = struct.unpack('>HH', data)
self.value = (value == ModbusStatus.On)
def __str__(self):
""" Returns a string representation of the instance
:returns: A string representation of the instance
"""
return 'WriteCoilResponse({0}) => {1}'.format(self.address, self.value)
class WriteMultipleCoilsRequest(ModbusRequest):
"""
"This function code is used to force each coil in a sequence of coils to
either ON or OFF in a remote device. The Request PDU specifies the coil
references to be forced. Coils are addressed starting at zero. Therefore
coil numbered 1 is addressed as 0.
The requested ON/OFF states are specified by contents of the request
data field. A logical '1' in a bit position of the field requests the
corresponding output to be ON. A logical '0' requests it to be OFF."
"""
function_code = 15
_rtu_byte_count_pos = 6
def __init__(self, address=None, values=None, **kwargs):
""" Initializes a new instance
:param address: The starting request address
:param values: The values to write
"""
ModbusRequest.__init__(self, **kwargs)
self.address = address
if not values:
values = []
elif not isinstance(values, Iterable):
values = [values]
self.values = values
self.byte_count = (len(self.values) + 7) // 8
def encode(self):
""" Encodes write coils request
:returns: The byte encoded message
"""
count = len(self.values)
self.byte_count = (count + 7) // 8
packet = struct.pack('>HHB', self.address, count, self.byte_count)
packet += pack_bitstring(self.values)
return packet
def decode(self, data):
""" Decodes a write coils request
:param data: The packet data to decode
"""
self.address, count, self.byte_count = struct.unpack('>HHB', data[0:5])
values = unpack_bitstring(data[5:])
self.values = values[:count]
def execute(self, context):
""" Run a write coils request against a datastore
:param context: The datastore to request from
:returns: The populated response or exception message
"""
count = len(self.values)
if not (1 <= count <= 0x07b0):
return self.do_exception(ModbusExceptions.IllegalValue)
if self.byte_count != ((count + 7) // 8):
return self.do_exception(ModbusExceptions.IllegalValue)
if not context.validate(self.function_code, self.address, count):
return self.do_exception(ModbusExceptions.IllegalAddress)
context.set_values(self.function_code, self.address, self.values)
return WriteMultipleCoilsResponse(self.address, count)
def __str__(self):
""" Returns a string representation of the instance
:returns: A string representation of the instance
"""
return 'WriteMultipleCoilRequest ({0}) => {1} '.format(
self.address, len(self.values)
)
class WriteMultipleCoilsResponse(ModbusResponse):
"""
The normal response returns the function code, starting address, and
quantity of coils forced.
"""
function_code = 15
_rtu_frame_size = 8
def __init__(self, address=None, count=None, **kwargs):
""" Initializes a new instance
:param address: The starting variable address written to
:param count: The number of values written
"""
ModbusResponse.__init__(self, **kwargs)
self.address = address
self.count = count
def encode(self):
""" Encodes write coils response
:returns: The byte encoded message
"""
return struct.pack('>HH', self.address, self.count)
def decode(self, data):
""" Decodes a write coils response
:param data: The packet data to decode
"""
self.address, self.count = struct.unpack('>HH', data)
def __str__(self):
""" Returns a string representation of the instance
:returns: A string representation of the instance
"""
return 'WriteMultipleCoilResponse({0}, {1})'.format(
self.address, self.count
)
# Exported symbols
__all__ = [
'WriteSingleCoilRequest',
'WriteSingleCoilResponse',
'WriteMultipleCoilsRequest',
'WriteMultipleCoilsResponse',
]
|
uzumaxy/pymodbus3
|
pymodbus3/bit_write_message.py
|
Python
|
bsd-3-clause
| 8,708
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
def softmax():
# Model variables
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
# Setup cost function and Gradient Descent Optimizer
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# Setup input variables and session
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
for i in xrange(1000):
if (i + 1) % 100 == 0:
print "training step {}".format(i + 1)
batch_xs, batch_ys = mnist.train.next_batch(50)
train_step.run(feed_dict={x: batch_xs, y_: batch_ys})
# Evaluate model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print "Test accuracy: {}".format(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def convolutional_network():
# Model variables
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
# Layer 1
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Layer 2
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# Layer 3
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Softmax readout layer
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# Evaluate model
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Setup input variables and session
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
for i in xrange(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g" % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print("Test accuracy: %g" % accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
def main():
print "\nRunning softmax model..."
softmax()
print "\nRunning convolutional neural network..."
convolutional_network()
if __name__ == '__main__':
main()
|
jonkrohn/study-group
|
weekly-work/week1/softmax_vs_convolutional_nn.py
|
Python
|
mit
| 3,870
|
import collections
from mock import MagicMock
from mock import call
from mock import patch
from honcho.test.helpers import TestCase
from honcho.export.upstart import Export
FakeProcess = collections.namedtuple('FakeProcess', 'name')
FIX_1PROC = [FakeProcess('web.1')]
FIX_NPROC = [FakeProcess('web.1'),
FakeProcess('worker.1'),
FakeProcess('worker.2')]
class TestExportUpstart(TestCase):
def setUp(self): # noqa
self.export = Export()
self.master = MagicMock()
self.process_master = MagicMock()
self.process = MagicMock()
def _get_template(name):
if name.endswith('process_master.conf'):
return self.process_master
elif name.endswith('process.conf'):
return self.process
elif name.endswith('master.conf'):
return self.master
else:
raise RuntimeError("tests don't know about that template")
self.get_template_patcher = patch.object(Export, 'get_template')
self.get_template = self.get_template_patcher.start()
self.get_template.side_effect = _get_template
def tearDown(self): # noqa
self.get_template_patcher.stop()
def test_render_master(self):
out = self.export.render(FIX_1PROC, {'app': 'elephant'})
self.assertIn(('elephant.conf', self.master.render.return_value),
out)
self.master.render.assert_called_once_with({'app': 'elephant'})
def test_render_process_master(self):
out = self.export.render(FIX_1PROC, {'app': 'elephant'})
self.assertIn(('elephant-web.conf',
self.process_master.render.return_value),
out)
expected = {'app': 'elephant',
'group_name': 'elephant-web'}
self.process_master.render.assert_called_once_with(expected)
def test_render_process(self):
out = self.export.render(FIX_1PROC, {'app': 'elephant'})
self.assertIn(('elephant-web-1.conf',
self.process.render.return_value),
out)
expected = {'app': 'elephant',
'group_name': 'elephant-web',
'process': FIX_1PROC[0]}
self.process.render.assert_called_once_with(expected)
def test_render_multiple_process_groups(self):
out = self.export.render(FIX_NPROC, {'app': 'elephant'})
self.assertIn(('elephant-web.conf',
self.process_master.render.return_value),
out)
self.assertIn(('elephant-worker.conf',
self.process_master.render.return_value),
out)
expected = [call({'app': 'elephant',
'group_name': 'elephant-web'}),
call({'app': 'elephant',
'group_name': 'elephant-worker'})]
self.assertEqual(expected, self.process_master.render.call_args_list)
def test_render_multiple_processes(self):
out = self.export.render(FIX_NPROC, {'app': 'elephant'})
self.assertIn(('elephant-web-1.conf',
self.process.render.return_value),
out)
self.assertIn(('elephant-worker-1.conf',
self.process.render.return_value),
out)
self.assertIn(('elephant-worker-2.conf',
self.process.render.return_value),
out)
expected = [call({'app': 'elephant',
'group_name': 'elephant-web',
'process': FIX_NPROC[0]}),
call({'app': 'elephant',
'group_name': 'elephant-worker',
'process': FIX_NPROC[1]}),
call({'app': 'elephant',
'group_name': 'elephant-worker',
'process': FIX_NPROC[2]})]
self.assertEqual(expected, self.process.render.call_args_list)
|
janusnic/honcho
|
honcho/test/unit/export/test_upstart.py
|
Python
|
mit
| 4,088
|
from __future__ import unicode_literals
import os
import paramiko
from django.utils import six
from reviewboard.ssh.client import SSHClient
from reviewboard.ssh.errors import (BadHostKeyError, SSHAuthenticationError,
SSHError)
from reviewboard.ssh.policy import RaiseUnknownHostKeyPolicy
SSH_PORT = 22
try:
import urlparse
uses_netloc = urlparse.uses_netloc
urllib_parse = urlparse.urlparse
except ImportError:
import urllib.parse
uses_netloc = urllib.parse.uses_netloc
urllib_parse = urllib.parse.urlparse
# A list of known SSH URL schemes.
ssh_uri_schemes = ["ssh", "sftp"]
uses_netloc.extend(ssh_uri_schemes)
def humanize_key(key):
"""Returns a human-readable key as a series of hex characters."""
return ':'.join(["%02x" % ord(c) for c in key.get_fingerprint()])
def is_ssh_uri(url):
"""Returns whether or not a URL represents an SSH connection."""
return urllib_parse(url)[0] in ssh_uri_schemes
def check_host(netloc, username=None, password=None, namespace=None):
"""
Checks if we can connect to a host with a known key.
This will raise an exception if we cannot connect to the host. The
exception will be one of BadHostKeyError, UnknownHostKeyError, or
SCMError.
"""
from django.conf import settings
client = SSHClient(namespace=namespace)
client.set_missing_host_key_policy(RaiseUnknownHostKeyPolicy())
kwargs = {}
if ':' in netloc:
hostname, port = netloc.split(':')
port = int(port)
else:
hostname = netloc
port = SSH_PORT
# We normally want to notify on unknown host keys, but not when running
# unit tests.
if getattr(settings, 'RUNNING_TEST', False):
client.set_missing_host_key_policy(paramiko.WarningPolicy())
kwargs['allow_agent'] = False
try:
client.connect(hostname, port, username=username, password=password,
pkey=client.get_user_key(), **kwargs)
except paramiko.BadHostKeyException as e:
raise BadHostKeyError(e.hostname, e.key, e.expected_key)
except paramiko.AuthenticationException as e:
# Some AuthenticationException instances have allowed_types set,
# and some don't.
allowed_types = getattr(e, 'allowed_types', [])
if 'publickey' in allowed_types:
key = client.get_user_key()
else:
key = None
raise SSHAuthenticationError(allowed_types=allowed_types, user_key=key)
except paramiko.SSHException as e:
msg = six.text_type(e)
if msg == 'No authentication methods available':
raise SSHAuthenticationError
else:
raise SSHError(msg)
def register_rbssh(envvar):
"""Registers rbssh in an environment variable.
This is a convenience method for making sure that rbssh is set properly
in the environment for different tools. In some cases, we need to
specifically place it in the system environment using ``os.putenv``,
while in others (Mercurial, Bazaar), we need to place it in ``os.environ``.
"""
envvar = envvar.encode('utf-8')
os.putenv(envvar, b'rbssh')
os.environ[envvar] = b'rbssh'
|
KnowNo/reviewboard
|
reviewboard/ssh/utils.py
|
Python
|
mit
| 3,229
|
import socket
try:
import requests
httplib2 = None
except ImportError:
requests = None
try:
import httplib2
except ImportError:
raise ImportError('No module named requests or httplib2')
ConnectionError = requests.exceptions.ConnectionError if requests else socket.error
def wrap_http_connection(http_connection=None):
if not http_connection:
http_connection = requests.Session() if requests else httplib2.Http()
if not is_requests_instance(http_connection):
http_connection = RequestWrapper(http_connection)
return http_connection
def is_requests_instance(obj):
return hasattr(obj, 'get') and hasattr(obj, 'post')
class RequestWrapper(object):
"""
Wraps an `httplib2` instance to make it behave enough like a
`requests` instance for our purposes
"""
def __init__(self, conn):
self.conn = conn
def request(self, method, url, data=None, headers=None):
response, content = self.conn.request(url, method=method, body=data, headers=headers)
return ResponseWrapper(response, content)
class ResponseWrapper(object):
"""
Wraps an `httplib2` response pair to make it behave enough like a
`requests` response object for our purposes
"""
def __init__(self, response, content):
self.status_code = response.status
self.content = content
|
tow/sunburnt
|
sunburnt/http.py
|
Python
|
mit
| 1,389
|
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from collections import defaultdict
import os
import ansiblelint.utils
class AnsibleLintRule(object):
def __repr__(self):
return self.id + ": " + self.shortdesc
def verbose(self):
return self.id + ": " + self.shortdesc + "\n " + self.description
def match(self, file="", line=""):
return []
def matchlines(self, file, text):
matches = []
# arrays are 0-based, line numbers are 1-based
# so use prev_line_no as the counter
for (prev_line_no, line) in enumerate(text.split("\n")):
result = self.match(file, line)
if result:
message = None
if isinstance(result, str):
message = result
matches.append(Match(prev_line_no+1, line,
file['path'], self, message))
return matches
def matchtask(self, file="", task=None):
return []
def matchtasks(self, file, text):
matches = []
yaml = ansiblelint.utils.parse_yaml_linenumbers(text)
if yaml:
for task in ansiblelint.utils.get_action_tasks(yaml, file):
if 'skip_ansible_lint' in task.get('tags', []):
continue
if 'action' in task:
result = self.matchtask(file, task)
if result:
message = None
if isinstance(result, str):
message = result
taskstr = "Task/Handler: " + ansiblelint.utils.task_to_str(task)
matches.append(Match(task[ansiblelint.utils.LINE_NUMBER_KEY], taskstr,
file['path'], self, message))
return matches
def matchyaml(self, file, text):
matches = []
yaml = ansiblelint.utils.parse_yaml_linenumbers(text)
if yaml and hasattr(self, 'matchplay'):
for play in yaml:
result = self.matchplay(file, play)
if result:
(section, message) = result
matches.append(Match(play[ansiblelint.utils.LINE_NUMBER_KEY], section,
file['path'], self, message))
return matches
class RulesCollection(object):
def __init__(self):
self.rules = []
def register(self, obj):
self.rules.append(obj)
def __iter__(self):
return iter(self.rules)
def __len__(self):
return len(self.rules)
def extend(self, more):
self.rules.extend(more)
def run(self, playbookfile, tags=set(), skip_tags=set()):
text = ""
matches = list()
with open(playbookfile['path'], 'Ur') as f:
text = f.read()
for rule in self.rules:
if not tags or not set(rule.tags).isdisjoint(tags):
if set(rule.tags).isdisjoint(skip_tags):
matches.extend(rule.matchlines(playbookfile, text))
matches.extend(rule.matchtasks(playbookfile, text))
matches.extend(rule.matchyaml(playbookfile, text))
return matches
def __repr__(self):
return "\n".join([rule.verbose()
for rule in sorted(self.rules, key=lambda x: x.id)])
def listtags(self):
tags = defaultdict(list)
for rule in self.rules:
for tag in rule.tags:
tags[tag].append("[{0}]".format(rule.id))
results = []
for tag in sorted(tags):
results.append("{0} {1}".format(tag, tags[tag]))
return "\n".join(results)
@classmethod
def create_from_directory(cls, rulesdir):
result = cls()
result.rules = ansiblelint.utils.load_plugins(os.path.expanduser(rulesdir))
return result
class Match:
def __init__(self, linenumber, line, filename, rule, message=None):
self.linenumber = linenumber
self.line = line
self.filename = filename
self.rule = rule
self.message = message or rule.shortdesc
def __repr__(self):
formatstr = "[{0}] ({1}) matched {2}:{3} {4}"
return formatstr.format(self.rule.id, self.message,
self.filename, self.linenumber, self.line)
class Runner:
def __init__(self, rules, playbooks, tags, skip_tags):
self.rules = rules
self.playbooks = set()
for pb in playbooks:
self.playbooks.add((pb, 'playbook'))
self.tags = tags
self.skip_tags = skip_tags
def run(self):
files = list()
for playbook in self.playbooks:
files.append({'path': playbook[0], 'type': playbook[1]})
visited = set()
while (visited != self.playbooks):
for arg in self.playbooks - visited:
for file in ansiblelint.utils.find_children(arg):
self.playbooks.add((file['path'], file['type']))
files.append(file)
visited.add(arg)
matches = list()
for file in files:
matches.extend(self.rules.run(file, tags=set(self.tags),
skip_tags=set(self.skip_tags)))
return matches
|
schlueter/ansible-lint
|
lib/ansiblelint/__init__.py
|
Python
|
mit
| 6,392
|
import re
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from apps.rss_feeds.models import Feed
from apps.reader.models import UserSubscription
from optparse import make_option
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option("-u", "--user", dest="user", nargs=1, help="Specify user id or username"),
)
def handle(self, *args, **options):
if re.match(r"([0-9]+)", options['user']):
user = User.objects.get(pk=int(options['user']))
else:
user = User.objects.get(username=options['user'])
subscriptions = UserSubscription.objects.filter(user=user)
print " ---> Indexing %s feeds..." % subscriptions.count()
for sub in subscriptions:
try:
sub.feed.index_stories_for_search()
except Feed.DoesNotExist:
print " ***> Couldn't find %s" % sub.feed_id
|
slava-sh/NewsBlur
|
apps/search/management/commands/index_stories.py
|
Python
|
mit
| 993
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_column('django_mailbox_message', 'from_address', 'address')
db.rename_column('django_mailbox_message', 'received', 'processed')
def backwards(self, orm):
db.rename_column('django_mailbox_message', 'address', 'from_address')
db.rename_column('django_mailbox_message', 'processed', 'received')
models = {
'django_mailbox.mailbox': {
'Meta': {'object_name': 'Mailbox'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uri': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'django_mailbox.message': {
'Meta': {'object_name': 'Message'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailbox': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['django_mailbox.Mailbox']"}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'outgoing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'processed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['django_mailbox']
|
leifurhauks/django-mailbox
|
django_mailbox/south_migrations/0005_rename_fields.py
|
Python
|
mit
| 1,958
|
"""
Script used to publish GitHub release notes extracted from CHANGELOG.rst.
This script is meant to be executed after a successful deployment in GitHub actions.
Uses the following environment variables:
* GIT_TAG: the name of the tag of the current commit.
* GH_RELEASE_NOTES_TOKEN: a personal access token with 'repo' permissions.
Create one at:
https://github.com/settings/tokens
This token should be set in a secret in the repository, which is exposed as an
environment variable in the main.yml workflow file.
The script also requires ``pandoc`` to be previously installed in the system.
Requires Python3.6+.
"""
import os
import re
import sys
from pathlib import Path
import github3
import pypandoc
def publish_github_release(slug, token, tag_name, body):
github = github3.login(token=token)
owner, repo = slug.split("/")
repo = github.repository(owner, repo)
return repo.create_release(tag_name=tag_name, body=body)
def parse_changelog(tag_name):
p = Path(__file__).parent.parent / "doc/en/changelog.rst"
changelog_lines = p.read_text(encoding="UTF-8").splitlines()
title_regex = re.compile(r"pytest (\d\.\d+\.\d+) \(\d{4}-\d{2}-\d{2}\)")
consuming_version = False
version_lines = []
for line in changelog_lines:
m = title_regex.match(line)
if m:
# found the version we want: start to consume lines until we find the next version title
if m.group(1) == tag_name:
consuming_version = True
# found a new version title while parsing the version we want: break out
elif consuming_version:
break
if consuming_version:
version_lines.append(line)
return "\n".join(version_lines)
def convert_rst_to_md(text):
return pypandoc.convert_text(
text, "md", format="rst", extra_args=["--wrap=preserve"]
)
def main(argv):
if len(argv) > 1:
tag_name = argv[1]
else:
tag_name = os.environ.get("GITHUB_REF")
if not tag_name:
print("tag_name not given and $GITHUB_REF not set", file=sys.stderr)
return 1
if tag_name.startswith("refs/tags/"):
tag_name = tag_name[len("refs/tags/") :]
token = os.environ.get("GH_RELEASE_NOTES_TOKEN")
if not token:
print("GH_RELEASE_NOTES_TOKEN not set", file=sys.stderr)
return 1
slug = os.environ.get("GITHUB_REPOSITORY")
if not slug:
print("GITHUB_REPOSITORY not set", file=sys.stderr)
return 1
rst_body = parse_changelog(tag_name)
md_body = convert_rst_to_md(rst_body)
if not publish_github_release(slug, token, tag_name, md_body):
print("Could not publish release notes:", file=sys.stderr)
print(md_body, file=sys.stderr)
return 5
print()
print(f"Release notes for {tag_name} published successfully:")
print(f"https://github.com/{slug}/releases/tag/{tag_name}")
print()
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
pytest-dev/pytest
|
scripts/publish-gh-release-notes.py
|
Python
|
mit
| 3,049
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Sebastien Helleu <flashcode@flashtux.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# WeeChat theme manager.
# (this script requires WeeChat 0.3.5 or newer)
#
# History:
#
# 2011-09-28, Sebastien Helleu <flashcode@flashtux.org>:
# version 0.1: dev snapshot
# 2011-02-22, Sebastien Helleu <flashcode@flashtux.org>:
# start dev
#
SCRIPT_NAME = 'theme'
SCRIPT_AUTHOR = 'Sebastien Helleu <flashcode@flashtux.org>'
SCRIPT_VERSION = '0.1-dev'
SCRIPT_LICENSE = 'GPL3'
SCRIPT_DESC = 'WeeChat theme manager'
SCRIPT_COMMAND = 'theme'
import_weechat_ok = True
import_other_ok = True
try:
import weechat
except ImportError:
import_weechat_ok = False
try:
import sys
import os
import re
import datetime
import time
except ImportError as e:
print('Missing package(s) for %s: %s' % (SCRIPT_NAME, e))
import_other_ok = False
THEME_CONFIG_FILE_NAME = "theme"
COLOR_ATTRIBUTES = ('*', '_', '!')
# config file and options
theme_config_file = ""
theme_config_option = {}
theme_bars = 'input|nicklist|status|title'
theme_plugins = 'weechat|alias|aspell|charset|fifo|irc|logger|relay|rmodifier|xfer'
theme_options_include_re = (
r'^weechat\.bar\.(%s)\.color.*' % theme_bars,
r'^weechat\.look\.buffer_time_format$',
r'^(%s)\.color\..*' % theme_plugins,
r'^(%s)\.look\..*color.*' % theme_plugins,
)
theme_options_exclude_re = (
r'^weechat.look.color_pairs_auto_reset$',
r'^weechat.look.color_real_white$',
r'^weechat.look.color_basic_force_bold$',
r'^irc\.look\.',
)
# =================================[ config ]=================================
def theme_config_init():
"""Initialization of configuration file. Sections: ???."""
global theme_config_file, theme_config_option
theme_config_file = weechat.config_new(THEME_CONFIG_FILE_NAME,
'theme_config_reload_cb', '')
if not theme_config_file:
return
# section "color"
section_color = weechat.config_new_section(
theme_config_file, 'color', 0, 0, '', '', '', '', '', '', '', '', '', '')
if not section_color:
weechat.config_free(theme_config_file)
return
theme_config_option['color_script'] = weechat.config_new_option(
theme_config_file, section_color,
'script', 'color', 'Color for script names', '', 0, 0,
'cyan', 'cyan', 0, '', '', '', '', '', '')
theme_config_option['color_installed'] = weechat.config_new_option(
theme_config_file, section_color,
'installed', 'color', 'Color for "installed" indicator', '', 0, 0,
'yellow', 'yellow', 0, '', '', '', '', '', '')
theme_config_option['color_running'] = weechat.config_new_option(
theme_config_file, section_color,
'running', 'color', 'Color for "running" indicator', '', 0, 0,
'lightgreen', 'lightgreen', 0, '', '', '', '', '', '')
theme_config_option['color_obsolete'] = weechat.config_new_option(
theme_config_file, section_color,
'obsolete', 'color', 'Color for "obsolete" indicator', '', 0, 0,
'lightmagenta', 'lightmagenta', 0, '', '', '', '', '', '')
theme_config_option['color_unknown'] = weechat.config_new_option(
theme_config_file, section_color,
'unknown', 'color', 'Color for "unknown status" indicator', '', 0, 0,
'lightred', 'lightred', 0, '', '', '', '', '', '')
theme_config_option['color_language'] = weechat.config_new_option(
theme_config_file, section_color,
'language', 'color', 'Color for language names', '', 0, 0,
'lightblue', 'lightblue', 0, '', '', '', '', '', '')
# section "themes"
section_themes = weechat.config_new_section(
theme_config_file, 'themes', 0, 0, '', '', '', '', '', '', '', '', '', '')
if not section_themes:
weechat.config_free(theme_config_file)
return
theme_config_option['themes_url'] = weechat.config_new_option(
theme_config_file, section_themes,
'url', 'string', 'URL for file with list of themes', '', 0, 0,
'http://www.weechat.org/files/themes.json.gz',
'http://www.weechat.org/files/themes.json.gz', 0, '', '', '', '', '', '')
theme_config_option['themes_cache_expire'] = weechat.config_new_option(
theme_config_file, section_themes,
'cache_expire', 'integer', 'Local cache expiration time, in minutes '
'(-1 = never expires, 0 = always expires)', '',
-1, 60*24*365, '60', '60', 0, '', '', '', '', '', '')
theme_config_option['themes_dir'] = weechat.config_new_option(
theme_config_file, section_themes,
'dir', 'string', 'Local directory for themes', '', 0, 0,
'%h/themes', '%h/themes', 0, '', '', '', '', '', '')
def theme_config_reload_cb(data, config_file):
"""Reload configuration file."""
return weechat.config_read(config_file)
def theme_config_read():
"""Read configuration file."""
global theme_config_file
return weechat.config_read(theme_config_file)
def theme_config_write():
"""Write configuration file."""
global theme_config_file
return weechat.config_write(theme_config_file)
def theme_config_color(color):
"""Get a color from configuration."""
global theme_config_option
option = theme_config_option.get('color_%s' % color, '')
if not option:
return ''
return weechat.color(weechat.config_string(option))
def theme_config_get_dir():
"""Return themes directory, with expanded WeeChat home dir."""
global theme_config_option
return weechat.config_string(
theme_config_option['themes_dir']).replace('%h',
weechat.info_get('weechat_dir', ''))
def theme_config_get_backup():
"""Return name of backup theme (by default "~/.weechat/themes/_backup.theme")."""
return '%s/_backup.theme' % theme_config_get_dir()
def theme_config_get_undo():
"""Return name of undo file (by default "~/.weechat/themes/_undo.theme")."""
return '%s/_undo.theme' % theme_config_get_dir()
def theme_config_create_dir():
"""Create "themes" directory."""
directory = theme_config_get_dir()
if not os.path.isdir(directory):
os.makedirs(directory, mode=0700)
def theme_config_get_cache_filename():
"""Get local cache filename, based on URL."""
global theme_config_option
return '%s/%s' % (theme_config_get_dir(),
os.path.basename(weechat.config_string(theme_config_option['scripts_url'])))
# =================================[ themes ]=================================
class Theme:
def __init__(self, filename=None):
self.filename = filename
self.props = {}
self.listprops = []
self.options = {}
self.theme_ok = True
if self.filename:
self.theme_ok = self.load(self.filename)
else:
self.init_weechat()
self.nick_prefixes = self._get_nick_prefixes()
def isok(self):
return self.theme_ok
def _option_is_used(self, option):
global theme_options_include_re, theme_options_exclude_re
for regex in theme_options_exclude_re:
if re.search(regex, option):
return False
for regex in theme_options_include_re:
if re.search(regex, option):
return True
return False
def _get_nick_prefixes(self):
"""Get dict with nick prefixes."""
prefixes = {}
for prefix in self.options.get('irc.color.nick_prefixes', '').split(';'):
values = prefix.split(':', 1)
if len(values) == 2:
prefixes[values[0]] = values[1]
return prefixes
def _get_attr_color(self, color):
"""Return tuple with attributes and color."""
m = re.match('([*_!]*)(.*)', color)
if m:
return m.group(1), m.group(2)
return '', color
def _get_color_without_alias(self, color):
"""Return color without alias (color can be "fg", "fg,bg" or "fg:bg")."""
pos = color.find(',')
if pos < 0:
pos = color.find(':')
if pos > 0:
fg = color[0:pos]
bg = color[pos + 1:]
else:
fg = color
bg = ''
attr, col = self._get_attr_color(fg)
fg = '%s%s' % (attr, self.palette.get(col, col))
attr, col = self._get_attr_color(bg)
bg = '%s%s' % (attr, self.palette.get(col, col))
if bg:
return '%s%s%s' % (fg, color[pos:pos + 1], bg)
return fg
def _replace_color_alias(self, match):
value = match.group()[2:-1]
if value in self.palette:
value = self.palette[value]
return '${%s}' % value
def init_weechat(self):
"""Initialize theme using current WeeChat options (aliases are replaced with their values from palette)."""
# get palette options
self.palette = {}
infolist = weechat.infolist_get('option', '', 'weechat.palette.*')
while weechat.infolist_next(infolist):
option_name = weechat.infolist_string(infolist, 'option_name')
value = weechat.infolist_string(infolist, 'value')
self.palette[value] = option_name
weechat.infolist_free(infolist)
# get color options (replace aliases by values from palette)
self.options = {}
infolist = weechat.infolist_get('option', '', '')
while weechat.infolist_next(infolist):
full_name = weechat.infolist_string(infolist, 'full_name')
if self._option_is_used(full_name):
value = weechat.infolist_string(infolist, 'value')
self.options[full_name] = self._get_color_without_alias(value)
weechat.infolist_free(infolist)
# replace aliases in chat_nick_colors
option = 'weechat.color.chat_nick_colors'
colors = []
for color in self.options.get(option, '').split(','):
colors.append(self._get_color_without_alias(color))
if colors:
self.options[option] = ','.join(colors)
# replace aliases in buffer_time_format
option = 'weechat.look.buffer_time_format'
if option in self.options:
value = re.compile(r'\$\{[^\}]+\}').sub(self._replace_color_alias, self.options[option])
if value:
self.options[option] = value
# build dict with nick prefixes (and replace alisases)
prefixes = []
option = 'irc.color.nick_prefixes'
for prefix in self.options.get(option, '').split(';'):
values = prefix.split(':', 1)
if len(values) == 2:
prefixes.append('%s:%s' % (values[0], self._get_color_without_alias(values[1])))
if prefixes:
self.options[option] = ';'.join(prefixes)
# delete palette
del self.palette
def prnt(self, message):
try:
weechat.prnt('', message)
except:
print(message)
def prnt_error(self, message):
try:
weechat.prnt('', '%s%s' % (weechat.prefix('error'), message))
except:
print(message)
def load(self, filename):
self.options = {}
try:
lines = open(filename, 'rb').readlines()
for line in lines:
line = str(line.strip().decode('utf-8'))
if line.startswith('#'):
m = re.match('^# \\$([A-Za-z]+): (.*)', line)
if m:
self.props[m.group(1)] = m.group(2)
self.listprops.append(m.group(1))
else:
items = line.split('=', 1)
if len(items) == 2:
value = items[1].strip()
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
self.options[items[0].strip()] = value
return True
except:
self.prnt('Error loading theme "%s"' % filename)
return False
def save(self, filename):
names = self.options.keys()
names.sort()
try:
f = open(filename, 'w')
version = weechat.info_get('version', '')
pos = version.find('-')
if pos > 0:
version = version[0:pos]
header = ('#',
'# -- WeeChat theme --',
'# $name: %s' % os.path.basename(filename),
'# $date: %s' % datetime.date.today(),
'# $weechat: %s' % version,
'# $script: %s.py %s' % (SCRIPT_NAME, SCRIPT_VERSION),
'#\n')
f.write('\n'.join(header))
for option in names:
f.write('%s = "%s"\n' % (option, self.options[option]))
f.close()
self.prnt('Theme saved to "%s"' % filename)
except:
self.prnt_error('Error writing theme to "%s"' % filename)
raise
def show(self, header):
"""Display content of theme."""
names = self.options.keys()
names.sort()
self.prnt('')
self.prnt(header)
for name in names:
self.prnt(' %s %s= %s%s' % (name, weechat.color('chat_delimiters'),
weechat.color('chat_value'), self.options[name]))
def info(self, header):
"""Display info about theme."""
self.prnt('')
self.prnt(header)
for prop in self.listprops:
self.prnt(' %s: %s%s' % (prop, weechat.color('chat_value'), self.props[prop]))
numerrors = 0
for name in self.options:
if not weechat.config_get(name):
numerrors += 1
if numerrors == 0:
text = 'all OK'
else:
text = 'WARNING: %d option(s) not found in your WeeChat' % numerrors
self.prnt(' options: %s%d%s (%s)' % (weechat.color('chat_value'), len(self.options), weechat.color('reset'), text))
def install(self):
try:
numset = 0
numerrors = 0
for name in self.options:
option = weechat.config_get(name)
if option:
if weechat.config_option_set(option, self.options[name], 1) == weechat.WEECHAT_CONFIG_OPTION_SET_ERROR:
self.prnt_error('Error setting option "%s" to value "%s" (running an old WeeChat?)' % (name, self.options[name]))
numerrors += 1
else:
numset += 1
else:
self.prnt('Warning: option not found: "%s" (running an old WeeChat?)' % name)
numerrors += 1
errors = ''
if numerrors > 0:
errors = ', %d error(s)' % numerrors
if self.filename:
self.prnt('Theme "%s" installed (%d options set%s)' % (self.filename, numset, errors))
else:
self.prnt('Theme installed (%d options set%s)' % (numset, errors))
except:
if self.filename:
self.prnt_error('Failed to install theme "%s"' % self.filename)
else:
self.prnt_error('Failed to install theme')
def nick_prefix_color(self, prefix):
"""Get color for a nick prefix."""
modes = 'qaohv'
prefixes = '~&@%+'
pos = prefixes.find(prefix)
if pos < 0:
return ''
while pos < len(modes):
if modes[pos] in self.nick_prefixes:
return self.nick_prefixes[modes[pos]]
pos += 1
return self.nick_prefixes.get('*', '')
# =============================[ themes / html ]==============================
class HtmlTheme(Theme):
def __init__(self, filename=None, chat_width=85, chat_height=25, prefix_width=10, nicklist_width=10):
Theme.__init__(self, filename)
self.chat_width = chat_width
self.chat_height = chat_height
self.prefix_width = prefix_width
self.nicklist_width = nicklist_width
def html_color(self, index):
"""Return HTML color with index in table of 256 colors."""
terminal_colors = \
'000000cd000000cd00cdcd000000cdcd00cd00cdcde5e5e54d4d4dff000000ff00ffff000000ffff00ff00ffffffffff' \
'00000000002a0000550000800000aa0000d4002a00002a2a002a55002a80002aaa002ad400550000552a005555005580' \
'0055aa0055d400800000802a0080550080800080aa0080d400aa0000aa2a00aa5500aa8000aaaa00aad400d40000d42a' \
'00d45500d48000d4aa00d4d42a00002a002a2a00552a00802a00aa2a00d42a2a002a2a2a2a2a552a2a802a2aaa2a2ad4' \
'2a55002a552a2a55552a55802a55aa2a55d42a80002a802a2a80552a80802a80aa2a80d42aaa002aaa2a2aaa552aaa80' \
'2aaaaa2aaad42ad4002ad42a2ad4552ad4802ad4aa2ad4d455000055002a5500555500805500aa5500d4552a00552a2a' \
'552a55552a80552aaa552ad455550055552a5555555555805555aa5555d455800055802a5580555580805580aa5580d4' \
'55aa0055aa2a55aa5555aa8055aaaa55aad455d40055d42a55d45555d48055d4aa55d4d480000080002a800055800080' \
'8000aa8000d4802a00802a2a802a55802a80802aaa802ad480550080552a8055558055808055aa8055d480800080802a' \
'8080558080808080aa8080d480aa0080aa2a80aa5580aa8080aaaa80aad480d40080d42a80d45580d48080d4aa80d4d4' \
'aa0000aa002aaa0055aa0080aa00aaaa00d4aa2a00aa2a2aaa2a55aa2a80aa2aaaaa2ad4aa5500aa552aaa5555aa5580' \
'aa55aaaa55d4aa8000aa802aaa8055aa8080aa80aaaa80d4aaaa00aaaa2aaaaa55aaaa80aaaaaaaaaad4aad400aad42a' \
'aad455aad480aad4aaaad4d4d40000d4002ad40055d40080d400aad400d4d42a00d42a2ad42a55d42a80d42aaad42ad4' \
'd45500d4552ad45555d45580d455aad455d4d48000d4802ad48055d48080d480aad480d4d4aa00d4aa2ad4aa55d4aa80' \
'd4aaaad4aad4d4d400d4d42ad4d455d4d480d4d4aad4d4d40808081212121c1c1c2626263030303a3a3a4444444e4e4e' \
'5858586262626c6c6c7676768080808a8a8a9494949e9e9ea8a8a8b2b2b2bcbcbcc6c6c6d0d0d0dadadae4e4e4eeeeee'
color = terminal_colors[index*6:(index*6)+6]
#if color in ('000000', 'e5e5e5'): # keep black or 'default' (gray)
# return color
r = int(color[0:2], 16)
g = int(color[2:4], 16)
b = int(color[4:6], 16)
r = min(r * (1.5 - (r / 510.0)), 255)
g = min(g * (1.5 - (r / 510.0)), 255)
b = min(b * (1.5 - (r / 510.0)), 255)
return '%02x%02x%02x' % (r, g, b)
def html_style(self, fg, bg):
"""Return HTML style with WeeChat fg and bg colors."""
weechat_basic_colors = {
'default': 7, 'black': 0, 'darkgray': 8, 'red': 1, 'lightred': 9,
'green': 2, 'lightgreen': 10, 'brown': 3, 'yellow': 11, 'blue': 4,
'lightblue': 12, 'magenta': 5, 'lightmagenta': 13, 'cyan': 6,
'lightcyan': 14, 'gray': 7, 'white': 15 }
delim = max(fg.find(','), fg.find(':'))
if delim > 0:
bg = fg[delim + 1:]
fg = fg[0:delim]
bold = ''
underline = ''
reverse = False
while fg[0] in COLOR_ATTRIBUTES:
if fg[0] == '*':
bold = '; font-weight: bold'
elif fg[0] == '_':
underline = '; text-decoration: underline'
elif fg[0] == '!':
reverse = True
fg = fg[1:]
while bg[0] in COLOR_ATTRIBUTES:
bg = bg[1:]
if fg == 'default':
fg = self.options['fg']
if bg == 'default':
bg = self.options['bg']
if bold and fg in ('black', '0'):
fg = 'darkgray'
reverse = ''
if reverse:
fg2 = bg
bg = fg
fg = fg2
if fg == 'white' and self.whitebg:
fg = 'black'
num_fg = 0
num_bg = 0
if fg in weechat_basic_colors:
num_fg = weechat_basic_colors[fg]
else:
try:
num_fg = int(fg)
except:
self.prnt('Warning: unknown fg color "%s", using "default" instead' % fg)
num_fg = weechat_basic_colors['default']
if bg in weechat_basic_colors:
num_bg = weechat_basic_colors[bg]
else:
try:
num_bg = int(bg)
except:
self.prnt('Warning: unknown bg color "%s", using "default" instead' % bg)
num_bg = weechat_basic_colors['default']
style = 'color: #%s; background-color: #%s%s%s' % (self.html_color(num_fg),
self.html_color(num_bg),
bold, underline)
return style
def html_string(self, string, maxlen, optfg='fg', optbg='bg'):
"""Write html string using fg/bg colors."""
fg = optfg
bg = optbg
if fg in self.options:
fg = self.options[optfg]
if bg in self.options:
bg = self.options[optbg]
if maxlen >= 0:
string = string.ljust(maxlen)
else:
string = string.rjust(maxlen * -1)
return '<span style="%s">%s</span>' % (self.html_style (fg, bg), string)
def html_nick(self, nicks, index, prefix, usecolor, highlight, maxlen, optfg='fg', optbg='bg'):
"""Print a nick."""
nick = nicks[index]
nickfg = optfg
if usecolor and optfg != 'weechat.color.nicklist_away':
nick_colors = self.options['weechat.color.chat_nick_colors'].split(',')
nickfg = nick_colors[index % len(nick_colors)]
if usecolor and nick == self.html_nick_self:
nickfg = 'weechat.color.chat_nick_self'
if nick[0] in ('@', '%', '+'):
color = self.nick_prefix_color(nick[0]) or optfg
str_prefix = self.html_string(nick[0], 1, color, optbg)
nick = nick[1:]
else:
str_prefix = self.html_string(' ', 1, optfg, optbg)
length = 1 + len(nick)
if not prefix:
str_prefix = ''
maxlen += 1
length -= 1
padding = ''
if length < abs(maxlen):
padding = self.html_string('', abs(maxlen) - length, optfg, optbg)
if highlight:
nickfg = 'weechat.color.chat_highlight'
optbg = 'weechat.color.chat_highlight_bg'
string = str_prefix + self.html_string(nick, 0, nickfg, optbg)
if maxlen < 0:
return padding + string
return string + padding
def html_concat(self, messages, width, optfg, optbg):
"""Concatenate some messages with colors."""
string = ''
remaining = width
for msg in messages:
if msg[0] != '':
string += self.html_string(msg[1], 0, msg[0], optbg)
remaining -= len(msg[1])
else:
string += self.html_nick((msg[1],), 0, False, True, False, 0, optfg, optbg)
remaining -= len(msg[1])
if msg[1][0] in ('@', '%', '+'):
remaining += 1
string += self.html_string('', remaining, optfg, optbg)
return string
def _html_apply_colors(self, match):
string = match.group()
end = string.find('}')
if end < 0:
return string
color = string[2:end]
text = string[end + 1:]
return self.html_string(text, 0, color)
def _html_apply_color_chat_time_delimiters(self, match):
return self.html_string(match.group(), 0, 'weechat.color.chat_time_delimiters')
def html_chat_time(self, msgtime):
"""Return formatted time with colors."""
option = 'weechat.look.buffer_time_format'
if self.options[option].find('${') >= 0:
str_without_colors = re.sub(r'\$\{[^\}]+\}', '', self.options[option])
length = len(time.strftime(str_without_colors, msgtime))
value = re.compile(r'\$\{[^\}]+\}[^\$]*').sub(self._html_apply_colors, self.options[option])
else:
value = time.strftime(self.options[option], msgtime)
length = len(value)
value = re.compile(r'[^0-9]+').sub(self._html_apply_color_chat_time_delimiters, value)
value = self.html_string(value, 0, 'weechat.color.chat_time')
return (time.strftime(value, msgtime), length)
def html_chat(self, hhmmss, prefix, messages):
"""Print a message in chat area."""
delimiter = self.html_string(':', 0, 'weechat.color.chat_time_delimiters', 'weechat.color.chat_bg')
str_datetime = '2010-12-25 %02d:%02d:%02d' % (hhmmss[0], hhmmss[1], hhmmss[2])
t = time.strptime(str_datetime, '%Y-%m-%d %H:%M:%S')
(str_time, length_time) = self.html_chat_time(t)
return str_time + prefix + \
self.html_string(' │ ', 0, 'weechat.color.chat_prefix_suffix', 'weechat.color.chat_bg') + \
self.html_concat(messages, self.chat_width - length_time - self.prefix_width - 3,
'weechat.color.chat', 'weechat.color.chat_bg')
def to_html(self):
"""Print HTML version of theme."""
self.html_nick_self = 'mario'
channel = '#weechat'
oldtopic = 'Welcome'
newtopic = 'Welcome to %s - help channel for WeeChat' % channel
nicks = ('@carl', '@jessika', '@louise', '%Melody', '%Diego', '+Max',
'sheryl', 'Harold^', 'richard', 'celia', 'Eva', 'freddy', 'lee',
'madeleine', self.html_nick_self, 'mila', 'peter', 'tina', 'Vince', 'warren', 'warren2')
nicks_hosts = ('test@foo.com', 'something@host.com')
chat_msgs = ('Hello!',
'hi mario, I just tested your patch',
'I would like to ask something',
'just ask!',
'WeeChat is great?',
'yes',
'indeed',
'sure',
'of course!',
'affirmative',
'all right',
'obviously...',
'certainly!')
html = []
#html.append('<pre style="line-height: 1.2em">')
html.append('<pre>')
width = self.chat_width + 1 + self.nicklist_width
# title bar
html.append(self.html_string(newtopic, width,
'weechat.bar.title.color_fg', 'weechat.bar.title.color_bg'))
# chat
chat = []
str_prefix_join = self.html_string('-->', self.prefix_width * -1, 'weechat.color.chat_prefix_join', 'weechat.color.chat_bg')
str_prefix_quit = self.html_string('<--', self.prefix_width * -1, 'weechat.color.chat_prefix_quit', 'weechat.color.chat_bg')
str_prefix_network = self.html_string('--', self.prefix_width * -1, 'weechat.color.chat_prefix_network', 'weechat.color.chat_bg')
str_prefix_empty = self.html_string('', self.prefix_width * -1, 'weechat.color.chat', 'weechat.color.chat_bg')
chat.append(self.html_chat((9, 10, 00),
str_prefix_join,
(('', self.html_nick_self),
('weechat.color.chat_delimiters', ' ('),
('weechat.color.chat_host', nicks_hosts[0]),
('weechat.color.chat_delimiters', ')'),
('irc.color.message_join', ' has joined '),
('weechat.color.chat_channel', channel))))
chat.append(self.html_chat((9, 10, 25),
self.html_nick(nicks, 8, True, True, False, self.prefix_width * -1),
(('weechat.color.chat', chat_msgs[0]),)))
chat.append(self.html_chat((9, 11, 2),
str_prefix_network,
(('', nicks[0]),
('weechat.color.chat', ' has changed topic for '),
('weechat.color.chat_channel', channel),
('weechat.color.chat', ' from "'),
('irc.color.topic_old', oldtopic),
('weechat.color.chat', '"'))))
chat.append(self.html_chat((9, 11, 2),
str_prefix_empty,
(('weechat.color.chat', 'to "'),
('irc.color.topic_new', newtopic),
('weechat.color.chat', '"'))))
chat.append(self.html_chat((9, 11, 36),
self.html_nick(nicks, 16, True, True, True, self.prefix_width * -1),
(('weechat.color.chat', chat_msgs[1]),)))
chat.append(self.html_chat((9, 12, 4),
str_prefix_quit,
(('', 'joe'),
('weechat.color.chat_delimiters', ' ('),
('weechat.color.chat_host', nicks_hosts[1]),
('weechat.color.chat_delimiters', ')'),
('irc.color.message_quit', ' has left '),
('weechat.color.chat_channel', channel),
('weechat.color.chat_delimiters', ' ('),
('irc.color.reason_quit', 'bye!'),
('weechat.color.chat_delimiters', ')'))))
chat.append(self.html_chat((9, 15, 58),
self.html_nick(nicks, 12, True, True, False, self.prefix_width * -1),
(('weechat.color.chat', chat_msgs[2]),)))
chat.append(self.html_chat((9, 16, 12),
self.html_nick(nicks, 0, True, True, False, self.prefix_width * -1),
(('weechat.color.chat', chat_msgs[3]),)))
chat.append(self.html_chat((9, 16, 27),
self.html_nick(nicks, 12, True, True, False, self.prefix_width * -1),
(('weechat.color.chat', chat_msgs[4]),)))
for i in range(5, len(chat_msgs)):
chat.append(self.html_chat((9, 17, (i - 5) * 4),
self.html_nick(nicks, i - 2, True, True, False, self.prefix_width * -1),
(('weechat.color.chat', chat_msgs[i]),)))
chat_empty = self.html_string(' ', self.chat_width, 'weechat.color.chat', 'weechat.color.chat_bg')
# separator (between chat and nicklist)
str_separator = self.html_string('│', 0, 'weechat.color.separator', 'weechat.color.chat_bg')
# nicklist
nicklist = []
for index in range(0, len(nicks)):
fg = 'weechat.bar.nicklist.color_fg'
if nicks[index].endswith('a'):
fg = 'weechat.color.nicklist_away'
nicklist.append(self.html_nick(nicks, index, True, True, False, self.nicklist_width, fg, 'weechat.bar.nicklist.color_bg'))
nicklist_empty = self.html_string('', self.nicklist_width, 'weechat.bar.nicklist.color_fg', 'weechat.bar.nicklist.color_bg')
# print chat + nicklist
for i in range (0, self.chat_height):
if i < len(chat):
str1 = chat[i]
else:
str1 = chat_empty
if i < len(nicklist):
str2 = nicklist[i]
else:
str2 = nicklist_empty
html.append('%s%s%s' % (str1, str_separator, str2))
# status
html.append(self.html_concat((('weechat.bar.status.color_delim', '['),
('weechat.color.status_time', '12:34'),
('weechat.bar.status.color_delim', '] ['),
('weechat.bar.status.color_fg', '18'),
('weechat.bar.status.color_delim', '] ['),
('weechat.bar.status.color_fg', 'irc'),
('weechat.bar.status.color_delim', '/'),
('weechat.bar.status.color_fg', 'freenode'),
('weechat.bar.status.color_delim', '] '),
('weechat.color.status_number', '2'),
('weechat.bar.status.color_delim', ':'),
('weechat.color.status_name', '#weechat'),
('weechat.bar.status.color_delim', '('),
('irc.color.item_channel_modes', '+nt'),
('weechat.bar.status.color_delim', '){'),
('weechat.bar.status.color_fg', '%d' % len(nicks)),
('weechat.bar.status.color_delim', '} ['),
('weechat.bar.status.color_fg', 'Act: '),
('weechat.color.status_data_highlight', '3'),
('weechat.bar.status.color_delim', ':'),
('weechat.bar.status.color_fg', '#linux'),
('weechat.bar.status.color_delim', ','),
('weechat.color.status_data_private', '18'),
('weechat.bar.status.color_delim', ','),
('weechat.color.status_data_msg', '4'),
('weechat.bar.status.color_delim', ','),
('weechat.color.status_data_other', '5'),
('weechat.bar.status.color_delim', ','),
('weechat.color.status_data_other', '6'),
('weechat.bar.status.color_delim', ']')),
width, 'weechat.bar.status.color_fg', 'weechat.bar.status.color_bg'))
# input
html.append(self.html_concat((('weechat.bar.input.color_delim', '['),
(self.nick_prefix_color('+'), '+'),
('irc.color.input_nick', self.html_nick_self),
('weechat.bar.input.color_delim', '('),
('weechat.bar.input.color_fg', 'i'),
('weechat.bar.input.color_delim', ')] '),
('weechat.bar.input.color_fg', 'this is misspelled '),
('aspell.look.color', 'woord'),
('weechat.bar.input.color_fg', ' '),
('cursor', ' ')),
width, 'weechat.bar.input.color_fg', 'weechat.bar.input.color_bg'))
# end
html.append('</pre>')
del self.html_nick_self
return '\n'.join(html)
def get_html(self, whitebg=False):
if whitebg:
self.options['fg'] = 'black'
self.options['bg'] = 'white'
self.options['cursor'] = '!black'
else:
self.options['fg'] = '250'
self.options['bg'] = 'black'
self.options['cursor'] = '!yellow'
self.whitebg = whitebg
html = self.to_html()
del self.whitebg
del self.options['fg']
del self.options['bg']
del self.options['cursor']
return html
def save_html(self, filename, whitebg=False):
html = self.get_html(whitebg)
try:
f = open(filename, 'w')
f.write(html)
f.close()
self.prnt('Theme exported as HTML to "%s"' % filename)
except:
self.prnt_error('Error writing HTML to "%s"' % filename)
raise
# ================================[ command ]=================================
def theme_cmd(data, buffer, args):
"""Callback for /theme command."""
if args == '':
weechat.command('', '/help %s' % SCRIPT_COMMAND)
return weechat.WEECHAT_RC_OK
argv = args.strip().split(' ', 1)
if len(argv) == 0:
return weechat.WEECHAT_RC_OK
if argv[0] in ('list', 'install'):
weechat.prnt('', '%s: action "%s" not developed' % (SCRIPT_NAME, argv[0]))
return weechat.WEECHAT_RC_OK
# check arguments
if len(argv) < 2:
if argv[0] in ('install', 'installfile', 'save', 'export'):
weechat.prnt('', '%s: too few arguments for action "%s"'
% (SCRIPT_NAME, argv[0]))
return weechat.WEECHAT_RC_OK
# execute asked action
if argv[0] == 'info':
filename = None
if len(argv) >= 2:
filename = argv[1]
theme = Theme(filename)
if filename:
theme.info('Info about theme "%s":' % filename)
else:
theme.info('Info about current theme:')
elif argv[0] == 'show':
filename = None
if len(argv) >= 2:
filename = argv[1]
theme = Theme(filename)
if filename:
theme.show('Content of theme "%s":' % filename)
else:
theme.show('Content of current theme:')
elif argv[0] == 'installfile':
theme = Theme()
theme.save(theme_config_get_undo())
theme = Theme(argv[1])
if theme.isok():
theme.install()
elif argv[0] == 'undo':
theme = Theme(theme_config_get_undo())
if theme.isok():
theme.install()
elif argv[0] == 'save':
theme = Theme()
theme.save(argv[1])
elif argv[0] == 'backup':
theme = Theme()
theme.save(theme_config_get_backup())
elif argv[0] == 'restore':
theme = Theme(theme_config_get_backup())
if theme.isok():
theme.install()
elif argv[0] == 'export':
htheme = HtmlTheme()
whitebg = False
htmlfile = argv[1]
argv2 = args.strip().split(' ', 2)
if len(argv2) >= 3 and argv2[1] == 'white':
whitebg = True
htmlfile = argv2[2]
htheme.save_html(htmlfile, whitebg)
return weechat.WEECHAT_RC_OK
# ==================================[ main ]==================================
def theme_init():
"""Called when script is loaded."""
theme_config_create_dir()
filename = theme_config_get_backup()
if not os.path.isfile(filename):
theme = Theme()
theme.save(filename)
def theme_cmdline_usage():
"""Display usage."""
padding = ' ' * len(sys.argv[0])
print('')
print('Usage: %s --export <themefile> <htmlfile> [white]' % sys.argv[0])
print(' %s --info <filename>' % padding)
print(' %s --help' % padding)
print('')
print(' -e, --export export a theme file to HTML')
print(' -i, --info display info about a theme')
print(' -h, --help display this help')
print('')
sys.exit(0)
def theme_cmdline():
if len(sys.argv) < 2 or sys.argv[1] in ('-h', '--help'):
theme_cmdline_usage()
elif len(sys.argv) > 1:
if sys.argv[1] in ('-e', '--export'):
if len(sys.argv) < 4:
theme_cmdline_usage()
whitebg = 'white' in sys.argv[4:]
htheme = HtmlTheme(sys.argv[2])
htheme.save_html(sys.argv[3], whitebg)
elif sys.argv[1] in ('-i', '--info'):
if len(sys.argv) < 3:
theme_cmdline_usage()
theme = Theme(sys.argv[2])
theme.info('Info about theme "%s":' % sys.argv[2])
else:
theme_cmdline_usage()
if __name__ == '__main__' and import_other_ok:
if import_weechat_ok:
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, '', ''):
theme_config_init()
theme_config_read()
theme_init()
weechat.hook_command(SCRIPT_COMMAND,
'WeeChat theme manager',
'list [<text>] || info|show [<theme>] || install <theme>'
' || installfile <file> || undo || save <file> || backup || restore'
' || export [-white] <file>',
' list: list themes (search text if given)\n'
' info: show info about theme (without argument: for current theme)\n'
' show: show all options in theme (without argument: for current theme)\n'
' install: install a theme from repository\n'
'installfile: load theme from a file\n'
' undo: undo last theme install\n'
' save: save current theme in a file\n'
' backup: backup current theme (by default in ~/.weechat/themes/_backup.theme); this is done the first time script is loaded\n'
' restore: restore theme backuped by script\n'
' export: save current theme as HTML in a file (with "-white": use white background in HTML)\n\n'
'Examples:\n'
' /' + SCRIPT_COMMAND + ' save /tmp/flashcode.theme => save current theme',
'info %(filename)'
' || show %(filename)'
' || install %(themes)'
' || installfile %(filename)'
' || undo'
' || save %(filename)'
' || backup'
' || restore'
' || export -white|%(filename) %(filename)',
'theme_cmd', '')
else:
theme_cmdline()
|
posquit0/dotfiles
|
weechat/.weechat/python/theme.py
|
Python
|
mit
| 43,544
|
from django.conf import settings
from django.core.validators import MinValueValidator
from openslides.core.config import ConfigVariable
from openslides.motions.models import MotionPoll
from .models import Workflow
def get_workflow_choices():
"""
Returns a list of all workflows to be used as choices for the config variable
'motions_workflow'. Each list item contains the pk and the display name.
"""
return [
{"value": str(workflow.pk), "display_name": workflow.name}
for workflow in Workflow.objects.all()
]
def get_config_variables():
"""
Generator which yields all config variables of this app.
They are grouped in 'General', 'Amendments', 'Supporters', 'Voting and ballot
papers' and 'PDF'. The generator has to be evaluated during app loading
(see apps.py).
"""
# General
yield ConfigVariable(
name="motions_workflow",
default_value="1",
input_type="choice",
label="Workflow of new motions",
choices=get_workflow_choices,
weight=310,
group="Motions",
)
yield ConfigVariable(
name="motions_statute_amendments_workflow",
default_value="1",
input_type="choice",
label="Workflow of new statute amendments",
choices=get_workflow_choices,
weight=312,
group="Motions",
)
yield ConfigVariable(
name="motions_amendments_workflow",
default_value="1",
input_type="choice",
label="Workflow of new amendments",
choices=get_workflow_choices,
weight=314,
group="Motions",
)
yield ConfigVariable(
name="motions_preamble",
default_value="The assembly may decide:",
label="Motion preamble",
weight=320,
group="Motions",
)
yield ConfigVariable(
name="motions_default_line_numbering",
default_value="outside",
input_type="choice",
label="Default line numbering",
choices=(
{"value": "outside", "display_name": "outside"},
{"value": "inline", "display_name": "inline"},
{"value": "none", "display_name": "Disabled"},
),
weight=322,
group="Motions",
)
yield ConfigVariable(
name="motions_line_length",
default_value=85,
input_type="integer",
label="Line length",
help_text="The maximum number of characters per line. Relevant when line numbering is enabled. Min: 40",
weight=323,
group="Motions",
validators=(MinValueValidator(40),),
)
yield ConfigVariable(
name="motions_reason_required",
default_value=False,
input_type="boolean",
label="Reason required for creating new motion",
weight=324,
group="Motions",
)
yield ConfigVariable(
name="motions_disable_text_on_projector",
default_value=False,
input_type="boolean",
label="Hide motion text on projector",
weight=325,
group="Motions",
)
yield ConfigVariable(
name="motions_disable_reason_on_projector",
default_value=False,
input_type="boolean",
label="Hide reason on projector",
weight=326,
group="Motions",
)
yield ConfigVariable(
name="motions_disable_recommendation_on_projector",
default_value=False,
input_type="boolean",
label="Hide recommendation on projector",
weight=327,
group="Motions",
)
yield ConfigVariable(
name="motions_hide_referring_motions",
default_value=False,
input_type="boolean",
label="Hide referring motions",
weight=328,
group="Motions",
)
yield ConfigVariable(
name="motions_disable_sidebox_on_projector",
default_value=True,
input_type="boolean",
label="Show meta information box below the title on projector",
weight=329,
group="Motions",
)
yield ConfigVariable(
name="motions_show_sequential_numbers",
default_value=True,
input_type="boolean",
label="Show the sequential number for a motion",
help_text="In motion list, motion detail and PDF.",
weight=330,
group="Motions",
)
yield ConfigVariable(
name="motions_recommendations_by",
default_value="",
label="Name of recommender",
help_text="Will be displayed as label before selected recommendation. Use an empty value to disable the recommendation system.",
weight=332,
group="Motions",
)
yield ConfigVariable(
name="motions_statute_recommendations_by",
default_value="",
label="Name of recommender for statute amendments",
help_text="Will be displayed as label before selected recommendation in statute amendments.",
weight=333,
group="Motions",
)
yield ConfigVariable(
name="motions_recommendation_text_mode",
default_value="diff",
input_type="choice",
label="Default text version for change recommendations",
choices=(
{"value": "original", "display_name": "Original version"},
{"value": "changed", "display_name": "Changed version"},
{"value": "diff", "display_name": "Diff version"},
{"value": "agreed", "display_name": "Final version"},
),
weight=334,
group="Motions",
)
yield ConfigVariable(
name="motions_motions_sorting",
default_value="identifier",
input_type="choice",
label="Sort motions by",
choices=(
{"value": "weight", "display_name": "Call list"},
{"value": "identifier", "display_name": "Identifier"},
),
weight=335,
group="Motions",
)
# Numbering
yield ConfigVariable(
name="motions_identifier",
default_value="per_category",
input_type="choice",
label="Identifier",
choices=(
{"value": "per_category", "display_name": "Numbered per category"},
{"value": "serially_numbered", "display_name": "Serially numbered"},
{"value": "manually", "display_name": "Set it manually"},
),
weight=340,
group="Motions",
subgroup="Numbering",
)
yield ConfigVariable(
name="motions_identifier_min_digits",
default_value=1,
input_type="integer",
label="Number of minimal digits for identifier",
help_text="Uses leading zeros to sort motions correctly by identifier.",
weight=342,
group="Motions",
subgroup="Numbering",
validators=(MinValueValidator(1),),
)
yield ConfigVariable(
name="motions_identifier_with_blank",
default_value=False,
input_type="boolean",
label="Allow blank in identifier",
help_text="Blank between prefix and number, e.g. 'A 001'.",
weight=344,
group="Motions",
subgroup="Numbering",
)
# Amendments
yield ConfigVariable(
name="motions_statutes_enabled",
default_value=False,
input_type="boolean",
label="Activate statute amendments",
weight=350,
group="Motions",
subgroup="Amendments",
)
yield ConfigVariable(
name="motions_amendments_enabled",
default_value=False,
input_type="boolean",
label="Activate amendments",
weight=351,
group="Motions",
subgroup="Amendments",
)
yield ConfigVariable(
name="motions_amendments_main_table",
default_value=True,
input_type="boolean",
label="Show amendments together with motions",
weight=352,
group="Motions",
subgroup="Amendments",
)
yield ConfigVariable(
name="motions_amendments_prefix",
default_value="-",
label="Prefix for the identifier for amendments",
weight=353,
group="Motions",
subgroup="Amendments",
)
yield ConfigVariable(
name="motions_amendments_text_mode",
default_value="paragraph",
input_type="choice",
label="How to create new amendments",
choices=(
{"value": "freestyle", "display_name": "Empty text field"},
{"value": "fulltext", "display_name": "Edit the whole motion text"},
{"value": "paragraph", "display_name": "Paragraph-based, Diff-enabled"},
),
weight=354,
group="Motions",
subgroup="Amendments",
)
yield ConfigVariable(
name="motions_amendments_multiple_paragraphs",
default_value=True,
input_type="boolean",
label="Amendments can change multiple paragraphs",
weight=355,
group="Motions",
subgroup="Amendments",
)
yield ConfigVariable(
name="motions_amendments_of_amendments",
default_value=False,
input_type="boolean",
label="Allow amendments of amendments",
weight=356,
group="Motions",
subgroup="Amendments",
)
# Supporters
yield ConfigVariable(
name="motions_min_supporters",
default_value=0,
input_type="integer",
label="Number of (minimum) required supporters for a motion",
help_text="Choose 0 to disable the supporting system.",
weight=360,
group="Motions",
subgroup="Supporters",
validators=(MinValueValidator(0),),
)
yield ConfigVariable(
name="motions_remove_supporters",
default_value=False,
input_type="boolean",
label="Remove all supporters of a motion if a submitter edits his motion in early state",
weight=361,
group="Motions",
subgroup="Supporters",
)
# Voting and ballot papers
if getattr(settings, "ENABLE_ELECTRONIC_VOTING", False):
motion_poll_type_choices = tuple(
{"value": type[0], "display_name": type[1]} for type in MotionPoll.TYPES
)
else:
motion_poll_type_choices = (
{"value": MotionPoll.TYPE_ANALOG, "display_name": MotionPoll.TYPE_ANALOG},
)
yield ConfigVariable(
name="motion_poll_default_type",
default_value=MotionPoll.TYPE_ANALOG,
input_type="choice",
label="Default voting type",
choices=motion_poll_type_choices,
weight=367,
group="Motions",
subgroup="Voting and ballot papers",
)
yield ConfigVariable(
name="motion_poll_default_100_percent_base",
default_value=MotionPoll.PERCENT_BASE_YNA,
input_type="choice",
label="Default 100 % base of a voting result",
choices=tuple(
{"value": base[0], "display_name": base[1]}
for base in MotionPoll.PERCENT_BASES
),
weight=370,
group="Motions",
subgroup="Voting and ballot papers",
)
yield ConfigVariable(
name="motion_poll_default_majority_method",
default_value=MotionPoll.MAJORITY_SIMPLE,
input_type="choice",
choices=tuple(
{"value": method[0], "display_name": method[1]}
for method in MotionPoll.MAJORITY_METHODS
),
label="Required majority",
help_text="Default method to check whether a motion has reached the required majority.",
weight=371,
hidden=True,
group="Motions",
subgroup="Voting and ballot papers",
)
yield ConfigVariable(
name="motion_poll_default_groups",
default_value=[],
input_type="groups",
label="Default groups with voting rights",
weight=372,
group="Motions",
subgroup="Voting and ballot papers",
)
yield ConfigVariable(
name="motions_pdf_ballot_papers_selection",
default_value="CUSTOM_NUMBER",
input_type="choice",
label="Number of ballot papers",
choices=(
{"value": "NUMBER_OF_DELEGATES", "display_name": "Number of all delegates"},
{
"value": "NUMBER_OF_ALL_PARTICIPANTS",
"display_name": "Number of all participants",
},
{
"value": "CUSTOM_NUMBER",
"display_name": "Use the following custom number",
},
),
weight=373,
group="Motions",
subgroup="Voting and ballot papers",
)
yield ConfigVariable(
name="motions_pdf_ballot_papers_number",
default_value=8,
input_type="integer",
label="Custom number of ballot papers",
weight=374,
group="Motions",
subgroup="Voting and ballot papers",
validators=(MinValueValidator(1),),
)
# PDF export
yield ConfigVariable(
name="motions_export_title",
default_value="Motions",
label="Title for PDF documents of motions",
weight=380,
group="Motions",
subgroup="PDF export",
)
yield ConfigVariable(
name="motions_export_preamble",
default_value="",
label="Preamble text for PDF documents of motions",
weight=382,
group="Motions",
subgroup="PDF export",
)
yield ConfigVariable(
name="motions_export_submitter_recommendation",
default_value=False,
label="Show submitters and recommendation/state in table of contents",
input_type="boolean",
weight=384,
group="Motions",
subgroup="PDF export",
)
yield ConfigVariable(
name="motions_export_follow_recommendation",
default_value=False,
label="Show checkbox to record decision",
input_type="boolean",
weight=386,
group="Motions",
subgroup="PDF export",
)
|
FinnStutzenstein/OpenSlides
|
server/openslides/motions/config_variables.py
|
Python
|
mit
| 13,984
|
# SPDX-License-Identifier: MIT
"""
Commonly useful filters for `attr.asdict`.
"""
from __future__ import absolute_import, division, print_function
from ._compat import isclass
from ._make import Attribute
def _split_what(what):
"""
Returns a tuple of `frozenset`s of classes and attributes.
"""
return (
frozenset(cls for cls in what if isclass(cls)),
frozenset(cls for cls in what if isinstance(cls, Attribute)),
)
def include(*what):
"""
Include *what*.
:param what: What to include.
:type what: `list` of `type` or `attrs.Attribute`\\ s
:rtype: `callable`
"""
cls, attrs = _split_what(what)
def include_(attribute, value):
return value.__class__ in cls or attribute in attrs
return include_
def exclude(*what):
"""
Exclude *what*.
:param what: What to exclude.
:type what: `list` of classes or `attrs.Attribute`\\ s.
:rtype: `callable`
"""
cls, attrs = _split_what(what)
def exclude_(attribute, value):
return value.__class__ not in cls and attribute not in attrs
return exclude_
|
python-attrs/attrs
|
src/attr/filters.py
|
Python
|
mit
| 1,124
|
import beaglebone_pru_adc as adc
import time
numsamples = 10000 # how many samples to capture
capture = adc.Capture()
capture.oscilloscope_init(adc.OFF_VALUES, numsamples) # captures AIN0 - the first elt in AIN array
#capture.oscilloscope_init(adc.OFF_VALUES+8, numsamples) # captures AIN2 - the third elt in AIN array
capture.start()
for _ in range(10):
if capture.oscilloscope_is_complete():
break
print '.'
time.sleep(0.1)
capture.stop()
capture.wait()
print 'Saving oscilloscope values to "data.csv"'
with open('data.csv', 'w') as f:
for x in capture.oscilloscope_data(numsamples):
f.write(str(x) + '\n')
print 'done'
capture.close()
|
thegmarlow/TagTrack-
|
examples/oscilloscope.py
|
Python
|
mit
| 678
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listtransactions API."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, COIN
from io import BytesIO
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def setup_nodes(self):
#This test requires mocktime
enable_mocktime()
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir)
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1)
assert_equal(utxo_to_use["safe"], True)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
assert_equal(utxo_to_use["safe"], False)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
|
daliwangi/bitcoin
|
test/functional/listtransactions.py
|
Python
|
mit
| 10,471
|
class Solution(object):
def removeKdigits(self, num, k):
"""
:type num: str
:type k: int
:rtype: str
"""
stack = []
length = len(num) - k
for c in num:
while k and stack and stack[-1] > c:
stack.pop()
k -= 1
stack.append(c)
return ''.join(stack[:length]).lstrip('0') or '0'
|
Mlieou/leetcode_python
|
leetcode/python/ex_402.py
|
Python
|
mit
| 405
|
#Program to find the LCM of two numbers
#Function to find GCD
def gcd(num1, num2):
if num1 == num2:
return num1
if num1 > num2:
return gcd(num1-num2, num2)
return gcd(num1, num2-num1)
#Function to find LCM
def lcm(num1, num2):
return (num1*num2) // gcd(num1, num2)
#Driver function for testing above
def test():
num1, num2 = 12, 4
print('LCM of {} and {} is {}'.format(num1, num2, lcm(num1, num2)))
|
Deepak345/al-go-rithms
|
math/LCM/Python/LCM.py
|
Python
|
mit
| 469
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
# ++ single anchor added to individual output file
# ++ two anchors added to individual output file
# ++ anchors added to individual output files
# ++ entry added to index
# ++ index entry pointing to correct file and anchor
# ++ multiple entries added to index
# ++ multiple index entries pointing to correct files and anchors
# __ all of above for files in deep directory structure
#
# ++ group index entries by indexed term
# ++ sort index entries by indexed term
# __ hierarchical index entries (e.g. language!programming)
#
# ++ add parameter for what the index filename should be
# ++ add (default) ability to NOT index (if index not specified)
#
# ++ put actual index filename into INDEX link (if any) in the template
# __ make index links RELATIVE!
# __ make index pay attention to the outputdir!
#
# __ make index look nice
#
# ++ add section numbers to headers in lore output
# ++ make text of index entry links be chapter numbers
# ++ make text of index entry links be section numbers
#
# __ put all of our test files someplace neat and tidy
#
import os, shutil, errno, time
from StringIO import StringIO
from xml.dom import minidom as dom
from twisted.trial import unittest
from twisted.python.filepath import FilePath
from twisted.lore import tree, process, indexer, numberer, htmlbook, default
from twisted.lore.default import factory
from twisted.lore.latex import LatexSpitter
from twisted.python.util import sibpath
from twisted.lore.scripts import lore
from twisted.web import domhelpers
from twisted.test.testutils import XMLAssertionMixin
def sp(originalFileName):
return sibpath(__file__, originalFileName)
options = {"template" : sp("template.tpl"), 'baseurl': '%s', 'ext': '.xhtml'}
d = options
class RemoveBlanksTests(unittest.TestCase):
"""
Tests for L{tree._removeLeadingBlankLines} and
L{tree._removeLeadingTrailingBlankLines}.
"""
def setUp(self):
self.inputString = '\n\n\n\nfoo\nbar\n\n\n'
def test_removeLeadingBlankLines(self):
"""
L{tree._removeLeadingBlankLines} removes leading blank lines from a string and returns a list containing the remaining characters.
"""
result = tree._removeLeadingBlankLines(self.inputString)
self.assertEqual(result,
['f', 'o', 'o', '\n', 'b', 'a', 'r', '\n', '\n', '\n'])
def test_removeLeadingTrailingBlankLines(self):
"""
L{tree._removeLeadingTrailingBlankLines} removes leading and trailing
blank lines from a string and returns a string with all lines joined.
"""
result = tree._removeLeadingTrailingBlankLines(self.inputString)
self.assertEqual(result, 'foo\nbar\n')
class TestFactory(unittest.TestCase, XMLAssertionMixin):
file = sp('simple.html')
linkrel = ""
def assertEqualFiles1(self, exp, act):
if (exp == act): return True
fact = open(act)
self.assertEqualsFile(exp, fact.read())
def assertEqualFiles(self, exp, act):
if (exp == act): return True
fact = open(sp(act))
self.assertEqualsFile(exp, fact.read())
def assertEqualsFile(self, exp, act):
expected = open(sp(exp)).read()
self.assertEqual(expected, act)
def makeTemp(self, *filenames):
tmp = self.mktemp()
os.mkdir(tmp)
for filename in filenames:
tmpFile = os.path.join(tmp, filename)
shutil.copyfile(sp(filename), tmpFile)
return tmp
########################################
def setUp(self):
indexer.reset()
numberer.reset()
def testProcessingFunctionFactory(self):
base = FilePath(self.mktemp())
base.makedirs()
simple = base.child('simple.html')
FilePath(__file__).sibling('simple.html').copyTo(simple)
htmlGenerator = factory.generate_html(options)
htmlGenerator(simple.path, self.linkrel)
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: My Test Lore Input</title></head>
<body bgcolor="white">
<h1 class="title">My Test Lore Input</h1>
<div class="content">
<span/>
<p>A Body.</p>
</div>
<a href="index.xhtml">Index</a>
</body>
</html>""",
simple.sibling('simple.xhtml').getContent())
def testProcessingFunctionFactoryWithFilenameGenerator(self):
base = FilePath(self.mktemp())
base.makedirs()
def filenameGenerator(originalFileName, outputExtension):
name = os.path.splitext(FilePath(originalFileName).basename())[0]
return base.child(name + outputExtension).path
htmlGenerator = factory.generate_html(options, filenameGenerator)
htmlGenerator(self.file, self.linkrel)
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: My Test Lore Input</title></head>
<body bgcolor="white">
<h1 class="title">My Test Lore Input</h1>
<div class="content">
<span/>
<p>A Body.</p>
</div>
<a href="index.xhtml">Index</a>
</body>
</html>""",
base.child("simple.xhtml").getContent())
def test_doFile(self):
base = FilePath(self.mktemp())
base.makedirs()
simple = base.child('simple.html')
FilePath(__file__).sibling('simple.html').copyTo(simple)
templ = dom.parse(open(d['template']))
tree.doFile(simple.path, self.linkrel, d['ext'], d['baseurl'], templ, d)
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: My Test Lore Input</title></head>
<body bgcolor="white">
<h1 class="title">My Test Lore Input</h1>
<div class="content">
<span/>
<p>A Body.</p>
</div>
<a href="index.xhtml">Index</a>
</body>
</html>""",
base.child("simple.xhtml").getContent())
def test_doFile_withFilenameGenerator(self):
base = FilePath(self.mktemp())
base.makedirs()
def filenameGenerator(originalFileName, outputExtension):
name = os.path.splitext(FilePath(originalFileName).basename())[0]
return base.child(name + outputExtension).path
templ = dom.parse(open(d['template']))
tree.doFile(self.file, self.linkrel, d['ext'], d['baseurl'], templ, d, filenameGenerator)
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: My Test Lore Input</title></head>
<body bgcolor="white">
<h1 class="title">My Test Lore Input</h1>
<div class="content">
<span/>
<p>A Body.</p>
</div>
<a href="index.xhtml">Index</a>
</body>
</html>""",
base.child("simple.xhtml").getContent())
def test_munge(self):
indexer.setIndexFilename("lore_index_file.html")
doc = dom.parse(open(self.file))
node = dom.parse(open(d['template']))
tree.munge(doc, node, self.linkrel,
os.path.dirname(self.file),
self.file,
d['ext'], d['baseurl'], d)
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: My Test Lore Input</title></head>
<body bgcolor="white">
<h1 class="title">My Test Lore Input</h1>
<div class="content">
<span/>
<p>A Body.</p>
</div>
<a href="lore_index_file.html">Index</a>
</body>
</html>""",
node.toxml())
def test_mungeAuthors(self):
"""
If there is a node with a I{class} attribute set to C{"authors"},
L{tree.munge} adds anchors as children to it, taking the necessary
information from any I{link} nodes in the I{head} with their I{rel}
attribute set to C{"author"}.
"""
document = dom.parseString(
"""\
<html>
<head>
<title>munge authors</title>
<link rel="author" title="foo" href="bar"/>
<link rel="author" title="baz" href="quux"/>
<link rel="author" title="foobar" href="barbaz"/>
</head>
<body>
<h1>munge authors</h1>
</body>
</html>""")
template = dom.parseString(
"""\
<html xmlns="http://www.w3.org/1999/xhtml" lang="en">
<head>
<title />
</head>
<body>
<div class="body" />
<div class="authors" />
</body>
</html>
""")
tree.munge(
document, template, self.linkrel, os.path.dirname(self.file),
self.file, d['ext'], d['baseurl'], d)
self.assertXMLEqual(
template.toxml(),
"""\
<?xml version="1.0" ?><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>munge authors</title>
<link href="bar" rel="author" title="foo"/><link href="quux" rel="author" title="baz"/><link href="barbaz" rel="author" title="foobar"/></head>
<body>
<div class="content">
<span/>
</div>
<div class="authors"><span><a href="bar">foo</a>, <a href="quux">baz</a>, and <a href="barbaz">foobar</a></span></div>
</body>
</html>""")
def test_getProcessor(self):
base = FilePath(self.mktemp())
base.makedirs()
input = base.child("simple3.html")
FilePath(__file__).sibling("simple3.html").copyTo(input)
options = { 'template': sp('template.tpl'), 'ext': '.xhtml', 'baseurl': 'burl',
'filenameMapping': None }
p = process.getProcessor(default, "html", options)
p(input.path, self.linkrel)
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: My Test Lore Input</title></head>
<body bgcolor="white">
<h1 class="title">My Test Lore Input</h1>
<div class="content">
<span/>
<p>A Body.</p>
</div>
<a href="index.xhtml">Index</a>
</body>
</html>""",
base.child("simple3.xhtml").getContent())
def test_outputdirGenerator(self):
normp = os.path.normpath; join = os.path.join
inputdir = normp(join("/", 'home', 'joe'))
outputdir = normp(join("/", 'away', 'joseph'))
actual = process.outputdirGenerator(join("/", 'home', 'joe', "myfile.html"),
'.xhtml', inputdir, outputdir)
expected = normp(join("/", 'away', 'joseph', 'myfile.xhtml'))
self.assertEqual(expected, actual)
def test_outputdirGeneratorBadInput(self):
options = {'outputdir': '/away/joseph/', 'inputdir': '/home/joe/' }
self.assertRaises(ValueError, process.outputdirGenerator, '.html', '.xhtml', **options)
def test_makeSureDirectoryExists(self):
dirname = os.path.join("tmp", 'nonexistentdir')
if os.path.exists(dirname):
os.rmdir(dirname)
self.failIf(os.path.exists(dirname), "Hey: someone already created the dir")
filename = os.path.join(dirname, 'newfile')
tree.makeSureDirectoryExists(filename)
self.failUnless(os.path.exists(dirname), 'should have created dir')
os.rmdir(dirname)
def test_indexAnchorsAdded(self):
indexer.setIndexFilename('theIndexFile.html')
# generate the output file
templ = dom.parse(open(d['template']))
tmp = self.makeTemp('lore_index_test.xhtml')
tree.doFile(os.path.join(tmp, 'lore_index_test.xhtml'),
self.linkrel, '.html', d['baseurl'], templ, d)
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: The way of the program</title></head>
<body bgcolor="white">
<h1 class="title">The way of the program</h1>
<div class="content">
<span/>
<p>The first paragraph.</p>
<h2>The Python programming language<a name="auto0"/></h2>
<a name="index01"/>
<a name="index02"/>
<p>The second paragraph.</p>
</div>
<a href="theIndexFile.html">Index</a>
</body>
</html>""",
FilePath(tmp).child("lore_index_test.html").getContent())
def test_indexEntriesAdded(self):
indexer.addEntry('lore_index_test.html', 'index02', 'language of programming', '1.3')
indexer.addEntry('lore_index_test.html', 'index01', 'programming language', '1.2')
indexer.setIndexFilename("lore_index_file.html")
indexer.generateIndex()
self.assertEqualFiles1("lore_index_file_out.html", "lore_index_file.html")
def test_book(self):
tmp = self.makeTemp()
inputFilename = sp('lore_index_test.xhtml')
bookFilename = os.path.join(tmp, 'lore_test_book.book')
bf = open(bookFilename, 'w')
bf.write('Chapter(r"%s", None)\r\n' % inputFilename)
bf.close()
book = htmlbook.Book(bookFilename)
expected = {'indexFilename': None,
'chapters': [(inputFilename, None)],
}
dct = book.__dict__
for k in dct:
self.assertEqual(dct[k], expected[k])
def test_runningLore(self):
options = lore.Options()
tmp = self.makeTemp('lore_index_test.xhtml')
templateFilename = sp('template.tpl')
inputFilename = os.path.join(tmp, 'lore_index_test.xhtml')
indexFilename = 'theIndexFile'
bookFilename = os.path.join(tmp, 'lore_test_book.book')
bf = open(bookFilename, 'w')
bf.write('Chapter(r"%s", None)\n' % inputFilename)
bf.close()
options.parseOptions(['--null', '--book=%s' % bookFilename,
'--config', 'template=%s' % templateFilename,
'--index=%s' % indexFilename
])
result = lore.runGivenOptions(options)
self.assertEqual(None, result)
self.assertEqualFiles1("lore_index_file_unnumbered_out.html", indexFilename + ".html")
def test_runningLoreMultipleFiles(self):
tmp = self.makeTemp('lore_index_test.xhtml', 'lore_index_test2.xhtml')
templateFilename = sp('template.tpl')
inputFilename = os.path.join(tmp, 'lore_index_test.xhtml')
inputFilename2 = os.path.join(tmp, 'lore_index_test2.xhtml')
indexFilename = 'theIndexFile'
bookFilename = os.path.join(tmp, 'lore_test_book.book')
bf = open(bookFilename, 'w')
bf.write('Chapter(r"%s", None)\n' % inputFilename)
bf.write('Chapter(r"%s", None)\n' % inputFilename2)
bf.close()
options = lore.Options()
options.parseOptions(['--null', '--book=%s' % bookFilename,
'--config', 'template=%s' % templateFilename,
'--index=%s' % indexFilename
])
result = lore.runGivenOptions(options)
self.assertEqual(None, result)
self.assertEqual(
# XXX This doesn't seem like a very good index file.
"""\
aahz: <a href="lore_index_test2.html#index03">link</a><br />
aahz2: <a href="lore_index_test2.html#index02">link</a><br />
language of programming: <a href="lore_index_test.html#index02">link</a>, <a href="lore_index_test2.html#index01">link</a><br />
programming language: <a href="lore_index_test.html#index01">link</a><br />
""",
file(FilePath(indexFilename + ".html").path).read())
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: The way of the program</title></head>
<body bgcolor="white">
<h1 class="title">The way of the program</h1>
<div class="content">
<span/>
<p>The first paragraph.</p>
<h2>The Python programming language<a name="auto0"/></h2>
<a name="index01"/>
<a name="index02"/>
<p>The second paragraph.</p>
</div>
<a href="theIndexFile.html">Index</a>
</body>
</html>""",
FilePath(tmp).child("lore_index_test.html").getContent())
self.assertXMLEqual(
"""\
<?xml version="1.0" ?><!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'><html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head><title>Twisted Documentation: The second page to index</title></head>
<body bgcolor="white">
<h1 class="title">The second page to index</h1>
<div class="content">
<span/>
<p>The first paragraph of the second page.</p>
<h2>The Jython programming language<a name="auto0"/></h2>
<a name="index01"/>
<a name="index02"/>
<a name="index03"/>
<p>The second paragraph of the second page.</p>
</div>
<a href="theIndexFile.html">Index</a>
</body>
</html>""",
FilePath(tmp).child("lore_index_test2.html").getContent())
def XXXtest_NumberedSections(self):
# run two files through lore, with numbering turned on
# every h2 should be numbered:
# first file's h2s should be 1.1, 1.2
# second file's h2s should be 2.1, 2.2
templateFilename = sp('template.tpl')
inputFilename = sp('lore_numbering_test.xhtml')
inputFilename2 = sp('lore_numbering_test2.xhtml')
indexFilename = 'theIndexFile'
# you can number without a book:
options = lore.Options()
options.parseOptions(['--null',
'--index=%s' % indexFilename,
'--config', 'template=%s' % templateFilename,
'--config', 'ext=%s' % ".tns",
'--number',
inputFilename, inputFilename2])
result = lore.runGivenOptions(options)
self.assertEqual(None, result)
#self.assertEqualFiles1("lore_index_file_out_multiple.html", indexFilename + ".tns")
# VVV change to new, numbered files
self.assertEqualFiles("lore_numbering_test_out.html", "lore_numbering_test.tns")
self.assertEqualFiles("lore_numbering_test_out2.html", "lore_numbering_test2.tns")
def test_setTitle(self):
"""
L{tree.setTitle} inserts the given title into the first I{title}
element and the first element with the I{title} class in the given
template.
"""
parent = dom.Element('div')
firstTitle = dom.Element('title')
parent.appendChild(firstTitle)
secondTitle = dom.Element('span')
secondTitle.setAttribute('class', 'title')
parent.appendChild(secondTitle)
titleNodes = [dom.Text()]
# minidom has issues with cloning documentless-nodes. See Python issue
# 4851.
titleNodes[0].ownerDocument = dom.Document()
titleNodes[0].data = 'foo bar'
tree.setTitle(parent, titleNodes, None)
self.assertEqual(firstTitle.toxml(), '<title>foo bar</title>')
self.assertEqual(
secondTitle.toxml(), '<span class="title">foo bar</span>')
def test_setTitleWithChapter(self):
"""
L{tree.setTitle} includes a chapter number if it is passed one.
"""
document = dom.Document()
parent = dom.Element('div')
parent.ownerDocument = document
title = dom.Element('title')
parent.appendChild(title)
titleNodes = [dom.Text()]
titleNodes[0].ownerDocument = document
titleNodes[0].data = 'foo bar'
# Oh yea. The numberer has to agree to put the chapter number in, too.
numberer.setNumberSections(True)
tree.setTitle(parent, titleNodes, '13')
self.assertEqual(title.toxml(), '<title>13. foo bar</title>')
def test_setIndexLink(self):
"""
Tests to make sure that index links are processed when an index page
exists and removed when there is not.
"""
templ = dom.parse(open(d['template']))
indexFilename = 'theIndexFile'
numLinks = len(domhelpers.findElementsWithAttribute(templ,
"class",
"index-link"))
# if our testing template has no index-link nodes, complain about it
self.assertNotEquals(
[],
domhelpers.findElementsWithAttribute(templ,
"class",
"index-link"))
tree.setIndexLink(templ, indexFilename)
self.assertEqual(
[],
domhelpers.findElementsWithAttribute(templ,
"class",
"index-link"))
indexLinks = domhelpers.findElementsWithAttribute(templ,
"href",
indexFilename)
self.assertTrue(len(indexLinks) >= numLinks)
templ = dom.parse(open(d['template']))
self.assertNotEquals(
[],
domhelpers.findElementsWithAttribute(templ,
"class",
"index-link"))
indexFilename = None
tree.setIndexLink(templ, indexFilename)
self.assertEqual(
[],
domhelpers.findElementsWithAttribute(templ,
"class",
"index-link"))
def test_addMtime(self):
"""
L{tree.addMtime} inserts a text node giving the last modification time
of the specified file wherever it encounters an element with the
I{mtime} class.
"""
path = FilePath(self.mktemp())
path.setContent('')
when = time.ctime(path.getModificationTime())
parent = dom.Element('div')
mtime = dom.Element('span')
mtime.setAttribute('class', 'mtime')
parent.appendChild(mtime)
tree.addMtime(parent, path.path)
self.assertEqual(
mtime.toxml(), '<span class="mtime">' + when + '</span>')
def test_makeLineNumbers(self):
"""
L{tree._makeLineNumbers} takes an integer and returns a I{p} tag with
that number of line numbers in it.
"""
numbers = tree._makeLineNumbers(1)
self.assertEqual(numbers.tagName, 'p')
self.assertEqual(numbers.getAttribute('class'), 'py-linenumber')
self.assertIsInstance(numbers.firstChild, dom.Text)
self.assertEqual(numbers.firstChild.nodeValue, '1\n')
numbers = tree._makeLineNumbers(10)
self.assertEqual(numbers.tagName, 'p')
self.assertEqual(numbers.getAttribute('class'), 'py-linenumber')
self.assertIsInstance(numbers.firstChild, dom.Text)
self.assertEqual(
numbers.firstChild.nodeValue,
' 1\n 2\n 3\n 4\n 5\n'
' 6\n 7\n 8\n 9\n10\n')
def test_fontifyPythonNode(self):
"""
L{tree.fontifyPythonNode} accepts a text node and replaces it in its
parent with a syntax colored and line numbered version of the Python
source it contains.
"""
parent = dom.Element('div')
source = dom.Text()
source.data = 'def foo():\n pass\n'
parent.appendChild(source)
tree.fontifyPythonNode(source)
expected = """\
<div><pre class="python"><p class="py-linenumber">1
2
</p><span class="py-src-keyword">def</span> <span class="py-src-identifier">foo</span>():
<span class="py-src-keyword">pass</span>
</pre></div>"""
self.assertEqual(parent.toxml(), expected)
def test_addPyListings(self):
"""
L{tree.addPyListings} accepts a document with nodes with their I{class}
attribute set to I{py-listing} and replaces those nodes with Python
source listings from the file given by the node's I{href} attribute.
"""
listingPath = FilePath(self.mktemp())
listingPath.setContent('def foo():\n pass\n')
parent = dom.Element('div')
listing = dom.Element('a')
listing.setAttribute('href', listingPath.basename())
listing.setAttribute('class', 'py-listing')
parent.appendChild(listing)
tree.addPyListings(parent, listingPath.dirname())
expected = """\
<div><div class="py-listing"><pre><p class="py-linenumber">1
2
</p><span class="py-src-keyword">def</span> <span class="py-src-identifier">foo</span>():
<span class="py-src-keyword">pass</span>
</pre><div class="caption"> - <a href="temp"><span class="filename">temp</span></a></div></div></div>"""
self.assertEqual(parent.toxml(), expected)
def test_addPyListingsSkipLines(self):
"""
If a node with the I{py-listing} class also has a I{skipLines}
attribute, that number of lines from the beginning of the source
listing are omitted.
"""
listingPath = FilePath(self.mktemp())
listingPath.setContent('def foo():\n pass\n')
parent = dom.Element('div')
listing = dom.Element('a')
listing.setAttribute('href', listingPath.basename())
listing.setAttribute('class', 'py-listing')
listing.setAttribute('skipLines', 1)
parent.appendChild(listing)
tree.addPyListings(parent, listingPath.dirname())
expected = """\
<div><div class="py-listing"><pre><p class="py-linenumber">1
</p> <span class="py-src-keyword">pass</span>
</pre><div class="caption"> - <a href="temp"><span class="filename">temp</span></a></div></div></div>"""
self.assertEqual(parent.toxml(), expected)
def test_fixAPI(self):
"""
The element passed to L{tree.fixAPI} has all of its children with the
I{API} class rewritten to contain links to the API which is referred to
by the text they contain.
"""
parent = dom.Element('div')
link = dom.Element('span')
link.setAttribute('class', 'API')
text = dom.Text()
text.data = 'foo'
link.appendChild(text)
parent.appendChild(link)
tree.fixAPI(parent, 'http://example.com/%s')
self.assertEqual(
parent.toxml(),
'<div><span class="API">'
'<a href="http://example.com/foo" title="foo">foo</a>'
'</span></div>')
def test_fixAPIBase(self):
"""
If a node with the I{API} class and a value for the I{base} attribute
is included in the DOM passed to L{tree.fixAPI}, the link added to that
node refers to the API formed by joining the value of the I{base}
attribute to the text contents of the node.
"""
parent = dom.Element('div')
link = dom.Element('span')
link.setAttribute('class', 'API')
link.setAttribute('base', 'bar')
text = dom.Text()
text.data = 'baz'
link.appendChild(text)
parent.appendChild(link)
tree.fixAPI(parent, 'http://example.com/%s')
self.assertEqual(
parent.toxml(),
'<div><span class="API">'
'<a href="http://example.com/bar.baz" title="bar.baz">baz</a>'
'</span></div>')
def test_fixLinks(self):
"""
Links in the nodes of the DOM passed to L{tree.fixLinks} have their
extensions rewritten to the given extension.
"""
parent = dom.Element('div')
link = dom.Element('a')
link.setAttribute('href', 'foo.html')
parent.appendChild(link)
tree.fixLinks(parent, '.xhtml')
self.assertEqual(parent.toxml(), '<div><a href="foo.xhtml"/></div>')
def test_setVersion(self):
"""
Nodes of the DOM passed to L{tree.setVersion} which have the I{version}
class have the given version added to them a child.
"""
parent = dom.Element('div')
version = dom.Element('span')
version.setAttribute('class', 'version')
parent.appendChild(version)
tree.setVersion(parent, '1.2.3')
self.assertEqual(
parent.toxml(), '<div><span class="version">1.2.3</span></div>')
def test_footnotes(self):
"""
L{tree.footnotes} finds all of the nodes with the I{footnote} class in
the DOM passed to it and adds a footnotes section to the end of the
I{body} element which includes them. It also inserts links to those
footnotes from the original definition location.
"""
parent = dom.Element('div')
body = dom.Element('body')
footnote = dom.Element('span')
footnote.setAttribute('class', 'footnote')
text = dom.Text()
text.data = 'this is the footnote'
footnote.appendChild(text)
body.appendChild(footnote)
body.appendChild(dom.Element('p'))
parent.appendChild(body)
tree.footnotes(parent)
self.assertEqual(
parent.toxml(),
'<div><body>'
'<a href="#footnote-1" title="this is the footnote">'
'<super>1</super>'
'</a>'
'<p/>'
'<h2>Footnotes</h2>'
'<ol><li><a name="footnote-1">'
'<span class="footnote">this is the footnote</span>'
'</a></li></ol>'
'</body></div>')
def test_generateTableOfContents(self):
"""
L{tree.generateToC} returns an element which contains a table of
contents generated from the headers in the document passed to it.
"""
parent = dom.Element('body')
header = dom.Element('h2')
text = dom.Text()
text.data = u'header & special character'
header.appendChild(text)
parent.appendChild(header)
subheader = dom.Element('h3')
text = dom.Text()
text.data = 'subheader'
subheader.appendChild(text)
parent.appendChild(subheader)
tableOfContents = tree.generateToC(parent)
self.assertEqual(
tableOfContents.toxml(),
'<ol><li><a href="#auto0">header & special character</a></li><ul><li><a href="#auto1">subheader</a></li></ul></ol>')
self.assertEqual(
header.toxml(),
'<h2>header & special character<a name="auto0"/></h2>')
self.assertEqual(
subheader.toxml(),
'<h3>subheader<a name="auto1"/></h3>')
def test_putInToC(self):
"""
L{tree.putInToC} replaces all of the children of the first node with
the I{toc} class with the given node representing a table of contents.
"""
parent = dom.Element('div')
toc = dom.Element('span')
toc.setAttribute('class', 'toc')
toc.appendChild(dom.Element('foo'))
parent.appendChild(toc)
tree.putInToC(parent, dom.Element('toc'))
self.assertEqual(toc.toxml(), '<span class="toc"><toc/></span>')
def test_invalidTableOfContents(self):
"""
If passed a document with I{h3} elements before any I{h2} element,
L{tree.generateToC} raises L{ValueError} explaining that this is not a
valid document.
"""
parent = dom.Element('body')
parent.appendChild(dom.Element('h3'))
err = self.assertRaises(ValueError, tree.generateToC, parent)
self.assertEqual(
str(err), "No H3 element is allowed until after an H2 element")
def test_notes(self):
"""
L{tree.notes} inserts some additional markup before the first child of
any node with the I{note} class.
"""
parent = dom.Element('div')
noteworthy = dom.Element('span')
noteworthy.setAttribute('class', 'note')
noteworthy.appendChild(dom.Element('foo'))
parent.appendChild(noteworthy)
tree.notes(parent)
self.assertEqual(
noteworthy.toxml(),
'<span class="note"><strong>Note: </strong><foo/></span>')
def test_findNodeJustBefore(self):
"""
L{tree.findNodeJustBefore} returns the previous sibling of the node it
is passed. The list of nodes passed in is ignored.
"""
parent = dom.Element('div')
result = dom.Element('foo')
target = dom.Element('bar')
parent.appendChild(result)
parent.appendChild(target)
self.assertIdentical(
tree.findNodeJustBefore(target, [parent, result]),
result)
# Also, support other configurations. This is a really not nice API.
newTarget = dom.Element('baz')
target.appendChild(newTarget)
self.assertIdentical(
tree.findNodeJustBefore(newTarget, [parent, result]),
result)
def test_getSectionNumber(self):
"""
L{tree.getSectionNumber} accepts an I{H2} element and returns its text
content.
"""
header = dom.Element('foo')
text = dom.Text()
text.data = 'foobar'
header.appendChild(text)
self.assertEqual(tree.getSectionNumber(header), 'foobar')
def test_numberDocument(self):
"""
L{tree.numberDocument} inserts section numbers into the text of each
header.
"""
parent = dom.Element('foo')
section = dom.Element('h2')
text = dom.Text()
text.data = 'foo'
section.appendChild(text)
parent.appendChild(section)
tree.numberDocument(parent, '7')
self.assertEqual(section.toxml(), '<h2>7.1 foo</h2>')
def test_parseFileAndReport(self):
"""
L{tree.parseFileAndReport} parses the contents of the filename passed
to it and returns the corresponding DOM.
"""
path = FilePath(self.mktemp())
path.setContent('<foo bar="baz">hello</foo>\n')
document = tree.parseFileAndReport(path.path)
self.assertXMLEqual(
document.toxml(),
'<?xml version="1.0" ?><foo bar="baz">hello</foo>')
def test_parseFileAndReportMismatchedTags(self):
"""
If the contents of the file passed to L{tree.parseFileAndReport}
contain a mismatched tag, L{process.ProcessingFailure} is raised
indicating the location of the open and close tags which were
mismatched.
"""
path = FilePath(self.mktemp())
path.setContent(' <foo>\n\n </bar>')
err = self.assertRaises(
process.ProcessingFailure, tree.parseFileAndReport, path.path)
self.assertEqual(
str(err),
"mismatched close tag at line 3, column 4; expected </foo> "
"(from line 1, column 2)")
# Test a case which requires involves proper close tag handling.
path.setContent('<foo><bar></bar>\n </baz>')
err = self.assertRaises(
process.ProcessingFailure, tree.parseFileAndReport, path.path)
self.assertEqual(
str(err),
"mismatched close tag at line 2, column 4; expected </foo> "
"(from line 1, column 0)")
def test_parseFileAndReportParseError(self):
"""
If the contents of the file passed to L{tree.parseFileAndReport} cannot
be parsed for a reason other than mismatched tags,
L{process.ProcessingFailure} is raised with a string describing the
parse error.
"""
path = FilePath(self.mktemp())
path.setContent('\n foo')
err = self.assertRaises(
process.ProcessingFailure, tree.parseFileAndReport, path.path)
self.assertEqual(str(err), 'syntax error at line 2, column 3')
def test_parseFileAndReportIOError(self):
"""
If an L{IOError} is raised while reading from the file specified to
L{tree.parseFileAndReport}, a L{process.ProcessingFailure} is raised
indicating what the error was. The file should be closed by the
time the exception is raised to the caller.
"""
class FakeFile:
_open = True
def read(self, bytes=None):
raise IOError(errno.ENOTCONN, 'socket not connected')
def close(self):
self._open = False
theFile = FakeFile()
def fakeOpen(filename):
return theFile
err = self.assertRaises(
process.ProcessingFailure, tree.parseFileAndReport, "foo", fakeOpen)
self.assertEqual(str(err), "socket not connected, filename was 'foo'")
self.assertFalse(theFile._open)
class XMLParsingTests(unittest.TestCase):
"""
Tests for various aspects of parsing a Lore XML input document using
L{tree.parseFileAndReport}.
"""
def _parseTest(self, xml):
path = FilePath(self.mktemp())
path.setContent(xml)
return tree.parseFileAndReport(path.path)
def test_withoutDocType(self):
"""
A Lore XML input document may omit a I{DOCTYPE} declaration. If it
does so, the XHTML1 Strict DTD is used.
"""
# Parsing should succeed.
document = self._parseTest("<foo>uses an xhtml entity: ©</foo>")
# But even more than that, the © entity should be turned into the
# appropriate unicode codepoint.
self.assertEqual(
domhelpers.gatherTextNodes(document.documentElement),
u"uses an xhtml entity: \N{COPYRIGHT SIGN}")
def test_withTransitionalDocType(self):
"""
A Lore XML input document may include a I{DOCTYPE} declaration
referring to the XHTML1 Transitional DTD.
"""
# Parsing should succeed.
document = self._parseTest("""\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<foo>uses an xhtml entity: ©</foo>
""")
# But even more than that, the © entity should be turned into the
# appropriate unicode codepoint.
self.assertEqual(
domhelpers.gatherTextNodes(document.documentElement),
u"uses an xhtml entity: \N{COPYRIGHT SIGN}")
def test_withStrictDocType(self):
"""
A Lore XML input document may include a I{DOCTYPE} declaration
referring to the XHTML1 Strict DTD.
"""
# Parsing should succeed.
document = self._parseTest("""\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<foo>uses an xhtml entity: ©</foo>
""")
# But even more than that, the © entity should be turned into the
# appropriate unicode codepoint.
self.assertEqual(
domhelpers.gatherTextNodes(document.documentElement),
u"uses an xhtml entity: \N{COPYRIGHT SIGN}")
def test_withDisallowedDocType(self):
"""
A Lore XML input document may not include a I{DOCTYPE} declaration
referring to any DTD other than XHTML1 Transitional or XHTML1 Strict.
"""
self.assertRaises(
process.ProcessingFailure,
self._parseTest,
"""\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">
<foo>uses an xhtml entity: ©</foo>
""")
class XMLSerializationTests(unittest.TestCase, XMLAssertionMixin):
"""
Tests for L{tree._writeDocument}.
"""
def test_nonASCIIData(self):
"""
A document which contains non-ascii characters is serialized to a
file using UTF-8.
"""
document = dom.Document()
parent = dom.Element('foo')
text = dom.Text()
text.data = u'\N{SNOWMAN}'
parent.appendChild(text)
document.appendChild(parent)
outFile = self.mktemp()
tree._writeDocument(outFile, document)
self.assertXMLEqual(
FilePath(outFile).getContent(),
u'<foo>\N{SNOWMAN}</foo>'.encode('utf-8'))
class LatexSpitterTestCase(unittest.TestCase):
"""
Tests for the Latex output plugin.
"""
def test_indexedSpan(self):
"""
Test processing of a span tag with an index class results in a latex
\\index directive the correct value.
"""
doc = dom.parseString('<span class="index" value="name" />').documentElement
out = StringIO()
spitter = LatexSpitter(out.write)
spitter.visitNode(doc)
self.assertEqual(out.getvalue(), u'\\index{name}\n')
class ScriptTests(unittest.TestCase):
"""
Tests for L{twisted.lore.scripts.lore}, the I{lore} command's
implementation,
"""
def test_getProcessor(self):
"""
L{lore.getProcessor} loads the specified output plugin from the
specified input plugin.
"""
processor = lore.getProcessor("lore", "html", options)
self.assertNotIdentical(processor, None)
|
engdan77/edoAutoHomeMobile
|
twisted/lore/test/test_lore.py
|
Python
|
mit
| 42,212
|
#!/usr/bin/python
import os
import json
def main():
print("Sample Post Script")
files = json.loads(os.environ.get('MH_FILES'))
for filename in files:
print(filename)
if __name__ == "__main__":
main()
|
Collisionc/sickbeard_mp4_automator
|
post_process/sample.py
|
Python
|
mit
| 214
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014, 2015, 2016 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""experiments model definition."""
from ..schema import SchemaOverdo
experiments = SchemaOverdo(schema="experiments.json")
|
Panos512/inspire-next
|
inspirehep/dojson/experiments/model.py
|
Python
|
gpl-2.0
| 1,040
|
from flask_admin import expose
from listenbrainz.webserver.admin import AdminIndexView
class HomeView(AdminIndexView):
@expose('/')
def index(self):
return self.render('admin/home.html')
|
Freso/listenbrainz-server
|
listenbrainz/webserver/admin/views.py
|
Python
|
gpl-2.0
| 206
|
# -*- coding: utf-8 -*-
# Copyright (C) 2013-2014 Avencall
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
common = {}
execfile_('common.py', common)
MODELS = [
u'7906G',
u'7911G',
u'7931G',
u'7941G',
u'7942G',
u'7961G',
u'7962G',
]
class CiscoSccpPlugin(common['BaseCiscoSccpPlugin']):
IS_PLUGIN = True
pg_associator = common['BaseCiscoPgAssociator'](MODELS)
|
eallovon/xivo-provd-plugins
|
plugins/xivo-cisco-sccp/9.0.3/entry.py
|
Python
|
gpl-3.0
| 989
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from frappe import _
def execute(filters=None):
if not filters: filters = {}
columns = get_columns()
data = get_employees(filters)
return columns, data
def get_columns():
return [
_("Employee") + ":Link/Employee:120", _("Name") + ":Data:200", _("Date of Birth")+ ":Date:100",
_("Branch") + ":Link/Branch:120", _("Department") + ":Link/Department:120",
_("Designation") + ":Link/Designation:120", _("Gender") + "::60", _("Company") + ":Link/Company:120"
]
def get_employees(filters):
conditions = get_conditions(filters)
return frappe.db.sql("""select name, employee_name, date_of_birth,
branch, department, designation,
gender, company from tabEmployee where status = 'Active' %s""" % conditions, as_list=1)
def get_conditions(filters):
conditions = ""
if filters.get("month"):
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"].index(filters["month"]) + 1
conditions += " and month(date_of_birth) = '%s'" % month
if filters.get("company"): conditions += " and company = '%s'" % \
filters["company"].replace("'", "\\'")
return conditions
|
mhbu50/erpnext
|
erpnext/hr/report/employee_birthday/employee_birthday.py
|
Python
|
gpl-3.0
| 1,261
|
# -*- coding: utf-8 -*-
from outwiker.gui.baseaction import BaseAction
from outwiker.core.commands import createNewWiki
class NewAction (BaseAction):
"""
Создание нового дерева заметок
"""
stringId = u"NewTree"
def __init__(self, application):
self._application = application
@property
def title(self):
return _(u"New…")
@property
def description(self):
return _(u"Create a new tree notes")
def run(self, params):
createNewWiki(self._application.mainWindow)
|
unreal666/outwiker
|
src/outwiker/actions/new.py
|
Python
|
gpl-3.0
| 566
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
'''XYZ file format'''
import numpy as np
from horton.units import angstrom
from horton.periodic import periodic
__all__ = ['load_xyz', 'dump_xyz']
def load_xyz(filename):
'''Load a molecular geometry from a .xyz file.
**Argument:**
filename
The file to load the geometry from
**Returns:** dictionary with ``title`, ``coordinates`` and ``numbers``.
'''
f = file(filename)
size = int(f.next())
title = f.next().strip()
coordinates = np.empty((size, 3), float)
numbers = np.empty(size, int)
for i in xrange(size):
words = f.next().split()
numbers[i] = periodic[words[0]].number
coordinates[i,0] = float(words[1])*angstrom
coordinates[i,1] = float(words[2])*angstrom
coordinates[i,2] = float(words[3])*angstrom
f.close()
return {
'title': title,
'coordinates': coordinates,
'numbers': numbers
}
def dump_xyz(filename, data):
'''Write an ``.xyz`` file.
**Arguments:**
filename
The name of the file to be written. This usually the extension
".xyz".
data
An IOData instance. Must contain ``coordinates`` and ``numbers``.
May contain ``title``.
'''
with open(filename, 'w') as f:
print >> f, data.natom
print >> f, getattr(data, 'title', 'Created with HORTON')
for i in xrange(data.natom):
n = periodic[data.numbers[i]].symbol
x, y, z = data.coordinates[i]/angstrom
print >> f, '%2s %15.10f %15.10f %15.10f' % (n, x, y, z)
|
QuantumElephant/horton
|
horton/io/xyz.py
|
Python
|
gpl-3.0
| 2,419
|
from frappe import _
def get_data():
return {
'fieldname': 'delivery_note',
'non_standard_fieldnames': {
'Stock Entry': 'delivery_note_no',
'Quality Inspection': 'reference_name',
'Auto Repeat': 'reference_document',
},
'internal_links': {
'Sales Order': ['items', 'against_sales_order'],
},
'transactions': [
{
'label': _('Related'),
'items': ['Sales Invoice', 'Packing Slip', 'Delivery Trip']
},
{
'label': _('Reference'),
'items': ['Sales Order', 'Quality Inspection']
},
{
'label': _('Returns'),
'items': ['Stock Entry']
},
{
'label': _('Subscription'),
'items': ['Auto Repeat']
},
]
}
|
ESS-LLP/erpnext-medical
|
erpnext/stock/doctype/delivery_note/delivery_note_dashboard.py
|
Python
|
gpl-3.0
| 674
|
"""
Copyright (C) <2010> Autin L. TSRI
This file git_upy/blender/v271/__init__.py is part of upy.
upy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
upy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with upy. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
"""
|
corredD/upy
|
blender/v280/__init__.py
|
Python
|
gpl-3.0
| 755
|
from jx_elasticsearch.es52.painless._utils import Painless, LIST_TO_PIPE
from jx_elasticsearch.es52.painless.add_op import AddOp
from jx_elasticsearch.es52.painless.and_op import AndOp
from jx_elasticsearch.es52.painless.basic_add_op import BasicAddOp
from jx_elasticsearch.es52.painless.basic_eq_op import BasicEqOp
from jx_elasticsearch.es52.painless.basic_index_of_op import BasicIndexOfOp
from jx_elasticsearch.es52.painless.basic_mul_op import BasicMulOp
from jx_elasticsearch.es52.painless.basic_starts_with_op import BasicStartsWithOp
from jx_elasticsearch.es52.painless.basic_substring_op import BasicSubstringOp
from jx_elasticsearch.es52.painless.boolean_op import BooleanOp
from jx_elasticsearch.es52.painless.case_op import CaseOp
from jx_elasticsearch.es52.painless.coalesce_op import CoalesceOp
from jx_elasticsearch.es52.painless.concat_op import ConcatOp
from jx_elasticsearch.es52.painless.count_op import CountOp
from jx_elasticsearch.es52.painless.date_op import DateOp
from jx_elasticsearch.es52.painless.div_op import DivOp
from jx_elasticsearch.es52.painless.eq_op import EqOp
from jx_elasticsearch.es52.painless.es_script import EsScript
from jx_elasticsearch.es52.painless.exists_op import ExistsOp
from jx_elasticsearch.es52.painless.exp_op import ExpOp
from jx_elasticsearch.es52.painless.find_op import FindOp
from jx_elasticsearch.es52.painless.first_op import FirstOp
from jx_elasticsearch.es52.painless.floor_op import FloorOp
from jx_elasticsearch.es52.painless.gt_op import GtOp
from jx_elasticsearch.es52.painless.gte_op import GteOp
from jx_elasticsearch.es52.painless.in_op import InOp
from jx_elasticsearch.es52.painless.integer_op import IntegerOp
from jx_elasticsearch.es52.painless.is_number_op import IsNumberOp
from jx_elasticsearch.es52.painless.leaves_op import LeavesOp
from jx_elasticsearch.es52.painless.length_op import LengthOp
from jx_elasticsearch.es52.painless.literal import Literal
from jx_elasticsearch.es52.painless.lt_op import LtOp
from jx_elasticsearch.es52.painless.lte_op import LteOp
from jx_elasticsearch.es52.painless.max_op import MaxOp
from jx_elasticsearch.es52.painless.min_op import MinOp
from jx_elasticsearch.es52.painless.missing_op import MissingOp
from jx_elasticsearch.es52.painless.mod_op import ModOp
from jx_elasticsearch.es52.painless.mul_op import MulOp
from jx_elasticsearch.es52.painless.ne_op import NeOp
from jx_elasticsearch.es52.painless.not_left_op import NotLeftOp
from jx_elasticsearch.es52.painless.not_op import NotOp
from jx_elasticsearch.es52.painless.number_op import NumberOp
from jx_elasticsearch.es52.painless.or_op import OrOp
from jx_elasticsearch.es52.painless.prefix_op import PrefixOp
from jx_elasticsearch.es52.painless.string_op import StringOp
from jx_elasticsearch.es52.painless.sub_op import SubOp
from jx_elasticsearch.es52.painless.suffix_op import SuffixOp
from jx_elasticsearch.es52.painless.tuple_op import TupleOp
from jx_elasticsearch.es52.painless.union_op import UnionOp
from jx_elasticsearch.es52.painless.variable import Variable
from jx_elasticsearch.es52.painless.when_op import WhenOp
from jx_elasticsearch.es52.painless.false_op import FalseOp, false_script
from jx_elasticsearch.es52.painless.true_op import TrueOp, true_script
from jx_elasticsearch.es52.painless.null_op import NullOp, null_script
Painless.register_ops(vars())
|
klahnakoski/SpotManager
|
vendor/jx_elasticsearch/es52/painless/__init__.py
|
Python
|
mpl-2.0
| 3,355
|
# This file is part of Booktype.
# Copyright (c) 2012 Douglas Bagnall
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
import os, sys
from booki.utils.json_wrapper import json
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
MEDIATYPES = {
'html': "text/html",
'xhtml': "application/xhtml+xml",
'css': 'text/css',
'json': "application/json",
'png': 'image/png',
'gif': 'image/gif',
'jpg': 'image/jpeg',
'jpeg': 'image/jpeg',
'svg': 'image/svg+xml',
'tiff': 'image/tiff',
'ncx': 'application/x-dtbncx+xml',
'dtb': 'application/x-dtbook+xml',
'xml': 'application/xml',
'pdf': "application/pdf",
'txt': 'text/plain',
'epub': "application/epub+zip",
'booki': "application/x-booki+zip",
None: 'application/octet-stream',
}
#metadata construction routines
DC = "http://purl.org/dc/elements/1.1/"
FM = "http://booki.cc/"
def get_metadata(metadata, key, ns=DC,
scheme='', default=[]):
"""Get a list of metadata values matching a key, namespace and
scheme. If the ns or scheme are not set, they default to Dublin
Core and an empty string, respectively.
If no values are set, an empty list is returned, unless the
default argument is given, in which case you get that.
"""
values = metadata.get(ns, {}).get(key, {})
if scheme == '*':
return sum(values.values(), [])
return values.get(scheme, default)
def get_metadata_schemes(metadata, key, ns=DC):
"""Say what schemes are available for a given key and namespace."""
values = metadata.get(ns, {}).get(key, {})
return values.keys()
def add_metadata(metadata, key, value, ns=DC, scheme=''):
"""Add a metadata (ns, key, scheme, value) tuple. Namespace
defaults to Dublin Core, and scheme to an empty string. In most
cases that is what you want."""
namespace = metadata.setdefault(ns, {})
items = namespace.setdefault(key, {})
values = items.setdefault(scheme, [])
values.append(value)
def clear_metadata(metadata, key, ns=DC, scheme='*'):
"""Clear metadata for a key in a namespace (ns). If namespace is
ommited, Dublin Core is assumed. If a scheme is specified (and is
not '*'), only metadata in that scheme is removed. By default all
schemes are removed.
If ns is '*', that key is removed from all namespaces.
"""
if ns in metadata:
if key in metadata[ns]:
if scheme == '*':
metadata[ns][key] = {}
elif scheme in metadata[ns][key]:
del metadata[ns][key][scheme]
elif ns == '*':
for ns in metadata:
clear_metadata(metadata, key, ns, scheme)
class BookiZip(object):
"""Helper for writing booki-zips"""
def __init__(self, filename, info={}):
"""Start a new zip and put an uncompressed 'mimetype' file at the
start. This idea is copied from the epub specification, and
allows the file type to be dscovered by reading the first few
bytes."""
self.zipfile = ZipFile(filename, 'w', ZIP_DEFLATED, allowZip64=True)
self.write_blob('mimetype', MEDIATYPES['booki'], ZIP_STORED)
self.filename = filename
self.manifest = {}
self.info = info
def write_blob(self, filename, blob, compression=ZIP_DEFLATED, mode=0644):
"""Add something to the zip without adding to manifest"""
zinfo = ZipInfo(filename)
zinfo.external_attr = mode << 16L # set permissions
zinfo.compress_type = compression
self.zipfile.writestr(zinfo, blob)
def add_to_package(self, ID, fn, blob, mediatype=None,
contributors=[], rightsholders=[], license=[]):
"""Add an item to the zip, and save it in the manifest. If
mediatype is not provided, it will be guessed according to the
extrension."""
self.write_blob(fn, blob)
if mediatype is None:
ext = fn[fn.rfind('.') + 1:]
mediatype = MEDIATYPES.get(ext, MEDIATYPES[None])
self.manifest[ID] = {
"url": fn,
"mimetype": mediatype,
"contributors": contributors,
"rightsholders": rightsholders,
"license": license,
}
def _close(self):
self.zipfile.close()
def finish(self):
"""Finalise the metadata and write to disk"""
self.info['manifest'] = self.manifest
infojson = json.dumps(self.info, indent=2)
self.add_to_package('info.json', 'info.json', infojson, 'application/json')
self._close()
|
btat/Booktype
|
lib/booki/bookizip.py
|
Python
|
agpl-3.0
| 5,243
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Tests flow of API keys',
'category': 'Tools',
'depends': ['web_tour'],
'data': ['views/assets.xml'],
}
|
rven/odoo
|
odoo/addons/test_apikeys/__manifest__.py
|
Python
|
agpl-3.0
| 229
|
import bountyfunding
from bountyfunding.core.const import *
from bountyfunding.core.data import clean_database
from test import to_object
from nose.tools import *
USER = "bountyfunding"
class Email_Test:
def setup(self):
self.app = bountyfunding.app.test_client()
clean_database()
def test_email(self):
eq_(len(self.get_emails()), 0)
r = self.app.post('/issues', data=dict(ref=1, status='READY',
title='Title', link='/issue/1'))
eq_(r.status_code, 200)
r = self.app.post('/issue/1/sponsorships',
data=dict(user=USER, amount=10))
eq_(r.status_code, 200)
r = self.app.get("/issue/1")
eq_(r.status_code, 200)
r = self.app.put('/issue/1', data=dict(
status=IssueStatus.to_string(IssueStatus.STARTED)))
eq_(r.status_code, 200)
emails = self.get_emails()
eq_(len(emails), 1)
email = emails[0]
eq_(email.recipient, USER)
ok_(email.issue_id)
ok_(email.body)
r = self.app.delete("/email/%s" % email.id)
eq_(r.status_code, 200)
def get_emails(self):
r = self.app.get("/emails")
eq_(r.status_code, 200)
return to_object(r).data
|
bountyfunding/bountyfunding
|
test/integration_test/email_test.py
|
Python
|
agpl-3.0
| 1,266
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from HTMLParser import HTMLParser
from weboob.tools.browser import BaseBrowser, BrowserHTTPNotFound
from weboob.capabilities.base import NotAvailable, NotLoaded
from weboob.capabilities.cinema import Movie, Person
from weboob.tools.json import json
from .pages import PersonPage, MovieCrewPage, BiographyPage, FilmographyPage, ReleasePage
from datetime import datetime
__all__ = ['ImdbBrowser']
class ImdbBrowser(BaseBrowser):
DOMAIN = 'www.imdb.com'
PROTOCOL = 'http'
ENCODING = 'utf-8'
USER_AGENT = BaseBrowser.USER_AGENTS['wget']
PAGES = {
'http://www.imdb.com/title/tt[0-9]*/fullcredits.*': MovieCrewPage,
'http://www.imdb.com/title/tt[0-9]*/releaseinfo.*': ReleasePage,
'http://www.imdb.com/name/nm[0-9]*/*': PersonPage,
'http://www.imdb.com/name/nm[0-9]*/bio.*': BiographyPage,
'http://www.imdb.com/name/nm[0-9]*/filmo.*': FilmographyPage,
}
def iter_movies(self, pattern):
res = self.readurl('http://www.imdb.com/xml/find?json=1&nr=1&tt=on&q=%s' % pattern.encode('utf-8'))
jres = json.loads(res)
htmlparser = HTMLParser()
for cat in ['title_popular', 'title_exact', 'title_approx']:
if cat in jres:
for m in jres[cat]:
tdesc = unicode(m['title_description'])
if '<a' in tdesc and '>' in tdesc:
short_description = u'%s %s' % (tdesc.split('<')[
0].strip(', '), tdesc.split('>')[1].split('<')[0])
else:
short_description = tdesc.strip(', ')
movie = Movie(m['id'], htmlparser.unescape(m['title']))
movie.other_titles = NotLoaded
movie.release_date = NotLoaded
movie.duration = NotLoaded
movie.short_description = htmlparser.unescape(short_description)
movie.pitch = NotLoaded
movie.country = NotLoaded
movie.note = NotLoaded
movie.roles = NotLoaded
movie.all_release_dates = NotLoaded
movie.thumbnail_url = NotLoaded
yield movie
def iter_persons(self, pattern):
res = self.readurl('http://www.imdb.com/xml/find?json=1&nr=1&nm=on&q=%s' % pattern.encode('utf-8'))
jres = json.loads(res)
htmlparser = HTMLParser()
for cat in ['name_popular', 'name_exact', 'name_approx']:
if cat in jres:
for p in jres[cat]:
person = Person(p['id'], htmlparser.unescape(unicode(p['name'])))
person.real_name = NotLoaded
person.birth_place = NotLoaded
person.birth_date = NotLoaded
person.death_date = NotLoaded
person.gender = NotLoaded
person.nationality = NotLoaded
person.short_biography = NotLoaded
person.short_description = htmlparser.unescape(p['description'])
person.roles = NotLoaded
person.thumbnail_url = NotLoaded
yield person
def get_movie(self, id):
res = self.readurl(
'http://imdbapi.org/?id=%s&type=json&plot=simple&episode=1&lang=en-US&aka=full&release=simple&business=0&tech=0' % id)
if res is not None:
jres = json.loads(res)
else:
return None
htmlparser = HTMLParser()
title = NotAvailable
duration = NotAvailable
release_date = NotAvailable
pitch = NotAvailable
country = NotAvailable
note = NotAvailable
short_description = NotAvailable
thumbnail_url = NotAvailable
other_titles = []
genres = []
roles = {}
if 'title' not in jres:
return
title = htmlparser.unescape(unicode(jres['title'].strip()))
if 'poster' in jres:
thumbnail_url = unicode(jres['poster'])
if 'directors' in jres:
short_description = unicode(', '.join(jres['directors']))
if 'genres' in jres:
for g in jres['genres']:
genres.append(g)
if 'runtime' in jres:
dur_str = jres['runtime'][0].split(':')
if len(dur_str) == 1:
duration = int(dur_str[0].split()[0])
else:
duration = int(dur_str[1].split()[0])
if 'also_known_as' in jres:
for other_t in jres['also_known_as']:
if 'country' in other_t and 'title' in other_t:
other_titles.append('%s : %s' % (other_t['country'], htmlparser.unescape(other_t['title'])))
if 'release_date' in jres:
dstr = str(jres['release_date'])
year = int(dstr[:4])
if year == 0:
year = 1
month = int(dstr[4:5])
if month == 0:
month = 1
day = int(dstr[-2:])
if day == 0:
day = 1
release_date = datetime(year, month, day)
if 'country' in jres:
country = u''
for c in jres['country']:
country += '%s, ' % c
country = country[:-2]
if 'plot_simple' in jres:
pitch = unicode(jres['plot_simple'])
if 'rating' in jres and 'rating_count' in jres:
note = u'%s/10 (%s votes)' % (jres['rating'], jres['rating_count'])
for r in ['actor', 'director', 'writer']:
if '%ss' % r in jres:
roles['%s' % r] = list(jres['%ss' % r])
movie = Movie(id, title)
movie.other_titles = other_titles
movie.release_date = release_date
movie.duration = duration
movie.genres = genres
movie.pitch = pitch
movie.country = country
movie.note = note
movie.roles = roles
movie.short_description = short_description
movie.all_release_dates = NotLoaded
movie.thumbnail_url = thumbnail_url
return movie
def get_person(self, id):
try:
self.location('http://www.imdb.com/name/%s' % id)
except BrowserHTTPNotFound:
return
assert self.is_on_page(PersonPage)
return self.page.get_person(id)
def get_person_biography(self, id):
self.location('http://www.imdb.com/name/%s/bio' % id)
assert self.is_on_page(BiographyPage)
return self.page.get_biography()
def iter_movie_persons(self, movie_id, role):
self.location('http://www.imdb.com/title/%s/fullcredits' % movie_id)
assert self.is_on_page(MovieCrewPage)
for p in self.page.iter_persons(role):
yield p
def iter_person_movies(self, person_id, role):
self.location('http://www.imdb.com/name/%s/filmotype' % person_id)
assert self.is_on_page(FilmographyPage)
return self.page.iter_movies(role)
def iter_person_movies_ids(self, person_id):
self.location('http://www.imdb.com/name/%s/filmotype' % person_id)
assert self.is_on_page(FilmographyPage)
for movie in self.page.iter_movies_ids():
yield movie
def iter_movie_persons_ids(self, movie_id):
self.location('http://www.imdb.com/title/%s/fullcredits' % movie_id)
assert self.is_on_page(MovieCrewPage)
for person in self.page.iter_persons_ids():
yield person
def get_movie_releases(self, id, country):
self.location('http://www.imdb.com/title/%s/releaseinfo' % id)
assert self.is_on_page(ReleasePage)
return self.page.get_movie_releases(country)
|
yannrouillard/weboob
|
modules/imdb/browser.py
|
Python
|
agpl-3.0
| 8,535
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from . import b2s_image
|
eicher31/compassion-modules
|
sbc_compassion/controllers/__init__.py
|
Python
|
agpl-3.0
| 426
|
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('instructor_task.tests.test_base', 'lms.djangoapps.instructor_task.tests.test_base')
from lms.djangoapps.instructor_task.tests.test_base import *
|
eduNEXT/edunext-platform
|
import_shims/lms/instructor_task/tests/test_base.py
|
Python
|
agpl-3.0
| 416
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2013 jmesteve All Rights Reserved
# https://github.com/jmesteve
# <jmesteve@me.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
class res_partner(orm.Model):
_inherit = 'res.partner'
_columns = {
'ref': fields.char('Reference', size=64, select=True, required=True),
}
_sql_constraints = [
('uniq_reference', 'unique(ref)', "The reference must be unique"),
]
_defaults = {
'ref': '/',
}
# new register
def create(self, cr, uid, vals, context=None):
if not 'ref' in vals or vals['ref'] == '/':
vals['ref'] = self.pool.get('ir.sequence').get(cr, uid, 'res.partner')
return super(res_partner, self).create(cr, uid, vals, context)
# edit register
def write(self, cr, uid, ids, vals, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
partners_without_code = self.search(cr, uid, [
('ref', 'in', [False, '/']), ('id', 'in', ids)], context=context)
direct_write_ids = set(ids) - set(partners_without_code)
super(res_partner, self).write(
cr, uid, list(direct_write_ids), vals, context)
for partner_id in partners_without_code:
vals['ref'] = self.pool.get('ir.sequence').get(cr, uid, 'res.partner')
super(res_partner, self).write(cr, uid, partner_id, vals, context)
return True
def copy(self, cr, uid, id, default={}, context=None):
product = self.read(cr, uid, id, ['ref'], context=context)
if product['ref']:
default.update({
'ref': product['ref'] + _('-copy'),
})
return super(res_partner, self).copy(cr, uid, id, default, context)
|
jmesteve/openerpseda
|
openerp/addons_extra/partner_sequence_unify/partner_sequence_unify.py
|
Python
|
agpl-3.0
| 2,771
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Pngwriter(CMakePackage):
"""PNGwriter is a very easy to use open source graphics library that uses
PNG as its output format. The interface has been designed to be as simple
and intuitive as possible. It supports plotting and reading pixels in the
RGB (red, green, blue), HSV (hue, saturation, value/brightness) and CMYK
(cyan, magenta, yellow, black) colour spaces, basic shapes, scaling,
bilinear interpolation, full TrueType antialiased and rotated text support,
bezier curves, opening existing PNG images and more.
"""
homepage = "http://pngwriter.sourceforge.net/"
url = "https://github.com/pngwriter/pngwriter/archive/0.5.6.tar.gz"
git = "https://github.com/pngwriter/pngwriter.git"
maintainers = ['ax3l']
version('develop', branch='dev')
version('master', branch='master')
version('0.7.0', sha256='82d46eef109f434f95eba9cf5908710ae4e75f575fd3858178ad06e800152825')
version('0.6.0', sha256='5107c6be0bfadf76ba4d01a553f7e060b5a7763ca7d9374ef3e7e59746b3911e')
version('0.5.6', sha256='0c5f3c1fd6f2470e88951f4b8add64cf5f5a7e7038115dba69604139359b08f1')
depends_on('libpng')
depends_on('zlib')
depends_on('freetype')
def cmake_args(self):
spec = self.spec
args = []
if spec.satisfies('@0.7.0:'):
args += ['-DPNGwriter_USE_FREETYPE:BOOL=ON']
return args
|
iulian787/spack
|
var/spack/repos/builtin/packages/pngwriter/package.py
|
Python
|
lgpl-2.1
| 1,625
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
from spack.cmd.common import print_module_placeholder_help
description = "add package to environment using dotkit"
section = "environment"
level = "long"
def setup_parser(subparser):
"""Parser is only constructed so that this prints a nice help
message with -h. """
subparser.add_argument(
'spec', nargs=argparse.REMAINDER,
help='spec of package to use with dotkit')
def use(parser, args):
print_module_placeholder_help()
|
EmreAtes/spack
|
lib/spack/spack/cmd/use.py
|
Python
|
lgpl-2.1
| 1,713
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RModelmetrics(RPackage):
"""Collection of metrics for evaluating models written in C++ using
'Rcpp'."""
homepage = "https://cran.r-project.org/package=ModelMetrics"
url = "https://cran.r-project.org/src/contrib/ModelMetrics_1.1.0.tar.gz"
version('1.1.0', 'd43175001f0531b8810d2802d76b7b44')
depends_on('r@3.2.2:')
depends_on('r-rcpp', type=('build', 'run'))
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/r-modelmetrics/package.py
|
Python
|
lgpl-2.1
| 1,657
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RDoparallel(RPackage):
"""Provides a parallel backend for the %dopar% function using the parallel
package."""
homepage = "https://cloud.r-project.org/package=doParallel"
url = "https://cloud.r-project.org/src/contrib/doParallel_1.0.10.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/doParallel"
version('1.0.15', sha256='71ad7ea69616468996aefdd8d02a4a234759a21ddde9ed1657e3c537145cd86e')
version('1.0.11', sha256='4ccbd2eb46d3e4f5251b0c3de4d93d9168b02bb0be493656d6aea236667ff76a')
version('1.0.10', sha256='70024b6950025cc027022ee409f382e5ad3680c0a25bcd404bfc16418be8add5')
depends_on('r@2.14.0:', type=('build', 'run'))
depends_on('r-foreach@1.2.0:', type=('build', 'run'))
depends_on('r-iterators@1.0.0:', type=('build', 'run'))
|
rspavel/spack
|
var/spack/repos/builtin/packages/r-doparallel/package.py
|
Python
|
lgpl-2.1
| 1,029
|
# -*- coding: utf8 -*-
from unittest import TestCase
from . import formattest
from .. import nbpy
from .nbexamples import nb0, nb0_py
class TestPy(formattest.NBFormatTest, TestCase):
nb0_ref = nb0_py
ext = 'py'
mod = nbpy
ignored_keys = ['collapsed', 'outputs', 'prompt_number', 'metadata']
def assertSubset(self, da, db):
"""assert that da is a subset of db, ignoring self.ignored_keys.
Called recursively on containers, ultimately comparing individual
elements.
"""
if isinstance(da, dict):
for k,v in da.iteritems():
if k in self.ignored_keys:
continue
self.assertTrue(k in db)
self.assertSubset(v, db[k])
elif isinstance(da, list):
for a,b in zip(da, db):
self.assertSubset(a,b)
else:
if isinstance(da, basestring) and isinstance(db, basestring):
# pyfile is not sensitive to preserving leading/trailing
# newlines in blocks through roundtrip
da = da.strip('\n')
db = db.strip('\n')
self.assertEquals(da, db)
return True
def assertNBEquals(self, nba, nbb):
# since roundtrip is lossy, only compare keys that are preserved
# assumes nba is read from my file format
return self.assertSubset(nba, nbb)
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/nbformat/v3/tests/test_nbpy.py
|
Python
|
lgpl-3.0
| 1,441
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that shows the complex DAG structure.
"""
from datetime import datetime
from airflow import models
from airflow.models.baseoperator import chain
from airflow.operators.bash import BashOperator
with models.DAG(
dag_id="example_complex",
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['example', 'example2', 'example3'],
) as dag:
# Create
create_entry_group = BashOperator(task_id="create_entry_group", bash_command="echo create_entry_group")
create_entry_group_result = BashOperator(
task_id="create_entry_group_result", bash_command="echo create_entry_group_result"
)
create_entry_group_result2 = BashOperator(
task_id="create_entry_group_result2", bash_command="echo create_entry_group_result2"
)
create_entry_gcs = BashOperator(task_id="create_entry_gcs", bash_command="echo create_entry_gcs")
create_entry_gcs_result = BashOperator(
task_id="create_entry_gcs_result", bash_command="echo create_entry_gcs_result"
)
create_entry_gcs_result2 = BashOperator(
task_id="create_entry_gcs_result2", bash_command="echo create_entry_gcs_result2"
)
create_tag = BashOperator(task_id="create_tag", bash_command="echo create_tag")
create_tag_result = BashOperator(task_id="create_tag_result", bash_command="echo create_tag_result")
create_tag_result2 = BashOperator(task_id="create_tag_result2", bash_command="echo create_tag_result2")
create_tag_template = BashOperator(task_id="create_tag_template", bash_command="echo create_tag_template")
create_tag_template_result = BashOperator(
task_id="create_tag_template_result", bash_command="echo create_tag_template_result"
)
create_tag_template_result2 = BashOperator(
task_id="create_tag_template_result2", bash_command="echo create_tag_template_result2"
)
create_tag_template_field = BashOperator(
task_id="create_tag_template_field", bash_command="echo create_tag_template_field"
)
create_tag_template_field_result = BashOperator(
task_id="create_tag_template_field_result", bash_command="echo create_tag_template_field_result"
)
create_tag_template_field_result2 = BashOperator(
task_id="create_tag_template_field_result2", bash_command="echo create_tag_template_field_result"
)
# Delete
delete_entry = BashOperator(task_id="delete_entry", bash_command="echo delete_entry")
create_entry_gcs >> delete_entry
delete_entry_group = BashOperator(task_id="delete_entry_group", bash_command="echo delete_entry_group")
create_entry_group >> delete_entry_group
delete_tag = BashOperator(task_id="delete_tag", bash_command="echo delete_tag")
create_tag >> delete_tag
delete_tag_template_field = BashOperator(
task_id="delete_tag_template_field", bash_command="echo delete_tag_template_field"
)
delete_tag_template = BashOperator(task_id="delete_tag_template", bash_command="echo delete_tag_template")
# Get
get_entry_group = BashOperator(task_id="get_entry_group", bash_command="echo get_entry_group")
get_entry_group_result = BashOperator(
task_id="get_entry_group_result", bash_command="echo get_entry_group_result"
)
get_entry = BashOperator(task_id="get_entry", bash_command="echo get_entry")
get_entry_result = BashOperator(task_id="get_entry_result", bash_command="echo get_entry_result")
get_tag_template = BashOperator(task_id="get_tag_template", bash_command="echo get_tag_template")
get_tag_template_result = BashOperator(
task_id="get_tag_template_result", bash_command="echo get_tag_template_result"
)
# List
list_tags = BashOperator(task_id="list_tags", bash_command="echo list_tags")
list_tags_result = BashOperator(task_id="list_tags_result", bash_command="echo list_tags_result")
# Lookup
lookup_entry = BashOperator(task_id="lookup_entry", bash_command="echo lookup_entry")
lookup_entry_result = BashOperator(task_id="lookup_entry_result", bash_command="echo lookup_entry_result")
# Rename
rename_tag_template_field = BashOperator(
task_id="rename_tag_template_field", bash_command="echo rename_tag_template_field"
)
# Search
search_catalog = BashOperator(task_id="search_catalog", bash_command="echo search_catalog")
search_catalog_result = BashOperator(
task_id="search_catalog_result", bash_command="echo search_catalog_result"
)
# Update
update_entry = BashOperator(task_id="update_entry", bash_command="echo update_entry")
update_tag = BashOperator(task_id="update_tag", bash_command="echo update_tag")
update_tag_template = BashOperator(task_id="update_tag_template", bash_command="echo update_tag_template")
update_tag_template_field = BashOperator(
task_id="update_tag_template_field", bash_command="echo update_tag_template_field"
)
# Create
create_tasks = [
create_entry_group,
create_entry_gcs,
create_tag_template,
create_tag_template_field,
create_tag,
]
chain(*create_tasks)
create_entry_group >> delete_entry_group
create_entry_group >> create_entry_group_result
create_entry_group >> create_entry_group_result2
create_entry_gcs >> delete_entry
create_entry_gcs >> create_entry_gcs_result
create_entry_gcs >> create_entry_gcs_result2
create_tag_template >> delete_tag_template_field
create_tag_template >> create_tag_template_result
create_tag_template >> create_tag_template_result2
create_tag_template_field >> delete_tag_template_field
create_tag_template_field >> create_tag_template_field_result
create_tag_template_field >> create_tag_template_field_result2
create_tag >> delete_tag
create_tag >> create_tag_result
create_tag >> create_tag_result2
# Delete
delete_tasks = [
delete_tag,
delete_tag_template_field,
delete_tag_template,
delete_entry_group,
delete_entry,
]
chain(*delete_tasks)
# Get
create_tag_template >> get_tag_template >> delete_tag_template
get_tag_template >> get_tag_template_result
create_entry_gcs >> get_entry >> delete_entry
get_entry >> get_entry_result
create_entry_group >> get_entry_group >> delete_entry_group
get_entry_group >> get_entry_group_result
# List
create_tag >> list_tags >> delete_tag
list_tags >> list_tags_result
# Lookup
create_entry_gcs >> lookup_entry >> delete_entry
lookup_entry >> lookup_entry_result
# Rename
create_tag_template_field >> rename_tag_template_field >> delete_tag_template_field
# Search
chain(create_tasks, search_catalog, delete_tasks)
search_catalog >> search_catalog_result
# Update
create_entry_gcs >> update_entry >> delete_entry
create_tag >> update_tag >> delete_tag
create_tag_template >> update_tag_template >> delete_tag_template
create_tag_template_field >> update_tag_template_field >> rename_tag_template_field
|
apache/incubator-airflow
|
airflow/example_dags/example_complex.py
|
Python
|
apache-2.0
| 7,913
|
#!/usr/bin/python
import sys
import urlparse
import os
import requests
def chunkedFetchUrl(url, local_filename=None, **kwargs):
"""Adapted from http://stackoverflow.com/q/16694907"""
if not local_filename:
local_filename = url.split('/')[-1]
# NOTE the stream=True parameter
r = requests.get(url, stream=True, **kwargs)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return local_filename
url=sys.argv[1]
parsed=urlparse.urlparse(url)
(h,t) = os.path.split(parsed.path)
t = t or 'index.html'
bits = parsed.netloc.split('.')
if len(bits)==3:
d=bits[1]
elif len(bits)==2:
d=bits[0]
else:
d=parsed.netloc
full=os.path.join(d,h[1:])
try:
os.makedirs(full)
except Exception as e:
print >> sys.stderr, e
chunkedFetchUrl(url, local_filename=os.path.join(full, t))
|
usc-isi-i2/dig-alignment
|
versions/1.0/datasets/atf/bin/minicurl.py
|
Python
|
apache-2.0
| 973
|
import distutils.dir_util
import glob
import os
import shutil
import subprocess
import time
from cassandra.concurrent import execute_concurrent_with_args
from dtest import (Tester, cleanup_cluster, create_ccm_cluster, create_ks,
debug, get_test_path)
from tools.assertions import assert_one
from tools.files import replace_in_file, safe_mkdtemp
from tools.hacks import advance_to_next_cl_segment
from tools.misc import ImmutableMapping
from tools.decorators import since
class SnapshotTester(Tester):
def create_schema(self, session):
create_ks(session, 'ks', 1)
session.execute('CREATE TABLE ks.cf ( key int PRIMARY KEY, val text);')
def insert_rows(self, session, start, end):
insert_statement = session.prepare("INSERT INTO ks.cf (key, val) VALUES (?, 'asdf')")
args = [(r,) for r in range(start, end)]
execute_concurrent_with_args(session, insert_statement, args, concurrency=20)
def make_snapshot(self, node, ks, cf, name):
debug("Making snapshot....")
node.flush()
snapshot_cmd = 'snapshot {ks} -cf {cf} -t {name}'.format(ks=ks, cf=cf, name=name)
debug("Running snapshot cmd: {snapshot_cmd}".format(snapshot_cmd=snapshot_cmd))
node.nodetool(snapshot_cmd)
tmpdir = safe_mkdtemp()
os.mkdir(os.path.join(tmpdir, ks))
os.mkdir(os.path.join(tmpdir, ks, cf))
# Find the snapshot dir, it's different in various C*
x = 0
for data_dir in node.data_directories():
snapshot_dir = "{data_dir}/{ks}/{cf}/snapshots/{name}".format(data_dir=data_dir, ks=ks, cf=cf, name=name)
if not os.path.isdir(snapshot_dir):
snapshot_dirs = glob.glob("{data_dir}/{ks}/{cf}-*/snapshots/{name}".format(data_dir=data_dir, ks=ks, cf=cf, name=name))
if len(snapshot_dirs) > 0:
snapshot_dir = snapshot_dirs[0]
else:
continue
debug("snapshot_dir is : " + snapshot_dir)
debug("snapshot copy is : " + tmpdir)
# Copy files from the snapshot dir to existing temp dir
distutils.dir_util.copy_tree(str(snapshot_dir), os.path.join(tmpdir, str(x), ks, cf))
x += 1
return tmpdir
def restore_snapshot(self, snapshot_dir, node, ks, cf):
debug("Restoring snapshot....")
for x in xrange(0, self.cluster.data_dir_count):
snap_dir = os.path.join(snapshot_dir, str(x), ks, cf)
if os.path.exists(snap_dir):
ip = node.address()
args = [node.get_tool('sstableloader'), '-d', ip, snap_dir]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
exit_status = p.wait()
if exit_status != 0:
raise Exception("sstableloader command '%s' failed; exit status: %d'; stdout: %s; stderr: %s" %
(" ".join(args), exit_status, stdout, stderr))
def restore_snapshot_schema(self, snapshot_dir, node, ks, cf):
debug("Restoring snapshot schema....")
for x in xrange(0, self.cluster.data_dir_count):
schema_path = os.path.join(snapshot_dir, str(x), ks, cf, 'schema.cql')
if os.path.exists(schema_path):
node.run_cqlsh(cmds="SOURCE '%s'" % schema_path)
class TestSnapshot(SnapshotTester):
def test_basic_snapshot_and_restore(self):
cluster = self.cluster
cluster.populate(1).start()
(node1,) = cluster.nodelist()
session = self.patient_cql_connection(node1)
self.create_schema(session)
self.insert_rows(session, 0, 100)
snapshot_dir = self.make_snapshot(node1, 'ks', 'cf', 'basic')
# Write more data after the snapshot, this will get thrown
# away when we restore:
self.insert_rows(session, 100, 200)
rows = session.execute('SELECT count(*) from ks.cf')
self.assertEqual(rows[0][0], 200)
# Drop the keyspace, make sure we have no data:
session.execute('DROP KEYSPACE ks')
self.create_schema(session)
rows = session.execute('SELECT count(*) from ks.cf')
self.assertEqual(rows[0][0], 0)
# Restore data from snapshot:
self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf')
node1.nodetool('refresh ks cf')
rows = session.execute('SELECT count(*) from ks.cf')
# clean up
debug("removing snapshot_dir: " + snapshot_dir)
shutil.rmtree(snapshot_dir)
self.assertEqual(rows[0][0], 100)
@since('3.0')
def test_snapshot_and_restore_drop_table_remove_dropped_column(self):
"""
@jira_ticket CASSANDRA-13730
Dropping table should clear entries in dropped_column table
"""
cluster = self.cluster
cluster.populate(1).start()
node1, = cluster.nodelist()
session = self.patient_cql_connection(node1)
# Create schema and insert some data
create_ks(session, 'ks', 1)
session.execute("CREATE TABLE ks.cf (k int PRIMARY KEY, a text, b text)")
session.execute("INSERT INTO ks.cf (k, a, b) VALUES (1, 'a', 'b')")
assert_one(session, "SELECT * FROM ks.cf", [1, "a", "b"])
# Take a snapshot and drop the column and then drop table
snapshot_dir = self.make_snapshot(node1, 'ks', 'cf', 'basic')
session.execute("ALTER TABLE ks.cf DROP b")
assert_one(session, "SELECT * FROM ks.cf", [1, "a"])
session.execute("DROP TABLE ks.cf")
# Restore schema and data from snapshot, data should be the same as input
self.restore_snapshot_schema(snapshot_dir, node1, 'ks', 'cf')
self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf')
node1.nodetool('refresh ks cf')
assert_one(session, "SELECT * FROM ks.cf", [1, "a", "b"])
# Clean up
debug("removing snapshot_dir: " + snapshot_dir)
shutil.rmtree(snapshot_dir)
@since('3.11')
def test_snapshot_and_restore_dropping_a_column(self):
"""
@jira_ticket CASSANDRA-13276
Can't load snapshots of tables with dropped columns.
"""
cluster = self.cluster
cluster.populate(1).start()
node1, = cluster.nodelist()
session = self.patient_cql_connection(node1)
# Create schema and insert some data
create_ks(session, 'ks', 1)
session.execute("CREATE TABLE ks.cf (k int PRIMARY KEY, a text, b text)")
session.execute("INSERT INTO ks.cf (k, a, b) VALUES (1, 'a', 'b')")
assert_one(session, "SELECT * FROM ks.cf", [1, "a", "b"])
# Drop a column
session.execute("ALTER TABLE ks.cf DROP b")
assert_one(session, "SELECT * FROM ks.cf", [1, "a"])
# Take a snapshot and drop the table
snapshot_dir = self.make_snapshot(node1, 'ks', 'cf', 'basic')
session.execute("DROP TABLE ks.cf")
# Restore schema and data from snapshot
self.restore_snapshot_schema(snapshot_dir, node1, 'ks', 'cf')
self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf')
node1.nodetool('refresh ks cf')
assert_one(session, "SELECT * FROM ks.cf", [1, "a"])
# Clean up
debug("removing snapshot_dir: " + snapshot_dir)
shutil.rmtree(snapshot_dir)
class TestArchiveCommitlog(SnapshotTester):
cluster_options = ImmutableMapping({'commitlog_segment_size_in_mb': 1})
def make_snapshot(self, node, ks, cf, name):
debug("Making snapshot....")
node.flush()
snapshot_cmd = 'snapshot {ks} -cf {cf} -t {name}'.format(ks=ks, cf=cf, name=name)
debug("Running snapshot cmd: {snapshot_cmd}".format(snapshot_cmd=snapshot_cmd))
node.nodetool(snapshot_cmd)
tmpdirs = []
base_tmpdir = safe_mkdtemp()
for x in xrange(0, self.cluster.data_dir_count):
tmpdir = os.path.join(base_tmpdir, str(x))
os.mkdir(tmpdir)
# Copy files from the snapshot dir to existing temp dir
distutils.dir_util.copy_tree(os.path.join(node.get_path(), 'data{0}'.format(x), ks), tmpdir)
tmpdirs.append(tmpdir)
return tmpdirs
def restore_snapshot(self, snapshot_dir, node, ks, cf, name):
debug("Restoring snapshot for cf ....")
data_dir = os.path.join(node.get_path(), 'data{0}'.format(os.path.basename(snapshot_dir)))
cfs = [s for s in os.listdir(snapshot_dir) if s.startswith(cf + "-")]
if len(cfs) > 0:
cf_id = cfs[0]
glob_path = "{snapshot_dir}/{cf_id}/snapshots/{name}".format(snapshot_dir=snapshot_dir, cf_id=cf_id, name=name)
globbed = glob.glob(glob_path)
if len(globbed) > 0:
snapshot_dir = globbed[0]
if not os.path.exists(os.path.join(data_dir, ks)):
os.mkdir(os.path.join(data_dir, ks))
os.mkdir(os.path.join(data_dir, ks, cf_id))
debug("snapshot_dir is : " + snapshot_dir)
distutils.dir_util.copy_tree(snapshot_dir, os.path.join(data_dir, ks, cf_id))
def test_archive_commitlog(self):
self.run_archive_commitlog(restore_point_in_time=False)
def test_archive_commitlog_with_active_commitlog(self):
"""
Copy the active commitlogs to the archive directory before restoration
"""
self.run_archive_commitlog(restore_point_in_time=False, archive_active_commitlogs=True)
def dont_test_archive_commitlog(self):
"""
Run the archive commitlog test, but forget to add the restore commands
"""
self.run_archive_commitlog(restore_point_in_time=False, restore_archived_commitlog=False)
def test_archive_commitlog_point_in_time(self):
"""
Test archive commit log with restore_point_in_time setting
"""
self.run_archive_commitlog(restore_point_in_time=True)
def test_archive_commitlog_point_in_time_with_active_commitlog(self):
"""
Test archive commit log with restore_point_in_time setting
"""
self.run_archive_commitlog(restore_point_in_time=True, archive_active_commitlogs=True)
def test_archive_commitlog_point_in_time_with_active_commitlog_ln(self):
"""
Test archive commit log with restore_point_in_time setting
"""
self.run_archive_commitlog(restore_point_in_time=True, archive_active_commitlogs=True, archive_command='ln')
def run_archive_commitlog(self, restore_point_in_time=False, restore_archived_commitlog=True, archive_active_commitlogs=False, archive_command='cp'):
"""
Run archive commit log restoration test
"""
cluster = self.cluster
cluster.populate(1)
(node1,) = cluster.nodelist()
# Create a temp directory for storing commitlog archives:
tmp_commitlog = safe_mkdtemp()
debug("tmp_commitlog: " + tmp_commitlog)
# Edit commitlog_archiving.properties and set an archive
# command:
replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'),
[(r'^archive_command=.*$', 'archive_command={archive_command} %path {tmp_commitlog}/%name'.format(
tmp_commitlog=tmp_commitlog, archive_command=archive_command))])
cluster.start()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 1)
# Write until we get a new CL segment. This avoids replaying
# initialization mutations from startup into system tables when
# restoring snapshots. See CASSANDRA-11811.
advance_to_next_cl_segment(
session=session,
commitlog_dir=os.path.join(node1.get_path(), 'commitlogs')
)
session.execute('CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);')
debug("Writing first 30,000 rows...")
self.insert_rows(session, 0, 30000)
# Record when this first set of inserts finished:
insert_cutoff_times = [time.gmtime()]
# Delete all commitlog backups so far:
for f in glob.glob(tmp_commitlog + "/*"):
debug('Removing {}'.format(f))
os.remove(f)
snapshot_dirs = self.make_snapshot(node1, 'ks', 'cf', 'basic')
if self.cluster.version() >= '3.0':
system_ks_snapshot_dirs = self.make_snapshot(node1, 'system_schema', 'keyspaces', 'keyspaces')
else:
system_ks_snapshot_dirs = self.make_snapshot(node1, 'system', 'schema_keyspaces', 'keyspaces')
if self.cluster.version() >= '3.0':
system_col_snapshot_dirs = self.make_snapshot(node1, 'system_schema', 'columns', 'columns')
else:
system_col_snapshot_dirs = self.make_snapshot(node1, 'system', 'schema_columns', 'columns')
if self.cluster.version() >= '3.0':
system_ut_snapshot_dirs = self.make_snapshot(node1, 'system_schema', 'types', 'usertypes')
else:
system_ut_snapshot_dirs = self.make_snapshot(node1, 'system', 'schema_usertypes', 'usertypes')
if self.cluster.version() >= '3.0':
system_cfs_snapshot_dirs = self.make_snapshot(node1, 'system_schema', 'tables', 'cfs')
else:
system_cfs_snapshot_dirs = self.make_snapshot(node1, 'system', 'schema_columnfamilies', 'cfs')
try:
# Write more data:
debug("Writing second 30,000 rows...")
self.insert_rows(session, 30000, 60000)
node1.flush()
time.sleep(10)
# Record when this second set of inserts finished:
insert_cutoff_times.append(time.gmtime())
debug("Writing final 5,000 rows...")
self.insert_rows(session, 60000, 65000)
# Record when the third set of inserts finished:
insert_cutoff_times.append(time.gmtime())
# Flush so we get an accurate view of commitlogs
node1.flush()
rows = session.execute('SELECT count(*) from ks.cf')
# Make sure we have the same amount of rows as when we snapshotted:
self.assertEqual(rows[0][0], 65000)
# Check that there are at least one commit log backed up that
# is not one of the active commit logs:
commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
debug("node1 commitlog dir: " + commitlog_dir)
debug("node1 commitlog dir contents: " + str(os.listdir(commitlog_dir)))
debug("tmp_commitlog contents: " + str(os.listdir(tmp_commitlog)))
self.assertNotEqual(set(os.listdir(tmp_commitlog)) - set(os.listdir(commitlog_dir)),
set())
cluster.flush()
cluster.compact()
node1.drain()
# Destroy the cluster
cluster.stop()
debug("node1 commitlog dir contents after stopping: " + str(os.listdir(commitlog_dir)))
debug("tmp_commitlog contents after stopping: " + str(os.listdir(tmp_commitlog)))
self.copy_logs(self.cluster, name=self.id().split(".")[0] + "_pre-restore")
cleanup_cluster(self.cluster, self.test_path)
self.test_path = get_test_path()
cluster = self.cluster = create_ccm_cluster(self.test_path, name='test')
cluster.populate(1)
node1, = cluster.nodelist()
# Restore schema from snapshots:
for system_ks_snapshot_dir in system_ks_snapshot_dirs:
if self.cluster.version() >= '3.0':
self.restore_snapshot(system_ks_snapshot_dir, node1, 'system_schema', 'keyspaces', 'keyspaces')
else:
self.restore_snapshot(system_ks_snapshot_dir, node1, 'system', 'schema_keyspaces', 'keyspaces')
for system_col_snapshot_dir in system_col_snapshot_dirs:
if self.cluster.version() >= '3.0':
self.restore_snapshot(system_col_snapshot_dir, node1, 'system_schema', 'columns', 'columns')
else:
self.restore_snapshot(system_col_snapshot_dir, node1, 'system', 'schema_columns', 'columns')
for system_ut_snapshot_dir in system_ut_snapshot_dirs:
if self.cluster.version() >= '3.0':
self.restore_snapshot(system_ut_snapshot_dir, node1, 'system_schema', 'types', 'usertypes')
else:
self.restore_snapshot(system_ut_snapshot_dir, node1, 'system', 'schema_usertypes', 'usertypes')
for system_cfs_snapshot_dir in system_cfs_snapshot_dirs:
if self.cluster.version() >= '3.0':
self.restore_snapshot(system_cfs_snapshot_dir, node1, 'system_schema', 'tables', 'cfs')
else:
self.restore_snapshot(system_cfs_snapshot_dir, node1, 'system', 'schema_columnfamilies', 'cfs')
for snapshot_dir in snapshot_dirs:
self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf', 'basic')
cluster.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
node1.nodetool('refresh ks cf')
rows = session.execute('SELECT count(*) from ks.cf')
# Make sure we have the same amount of rows as when we snapshotted:
self.assertEqual(rows[0][0], 30000)
# Edit commitlog_archiving.properties. Remove the archive
# command and set a restore command and restore_directories:
if restore_archived_commitlog:
replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'),
[(r'^archive_command=.*$', 'archive_command='),
(r'^restore_command=.*$', 'restore_command=cp -f %from %to'),
(r'^restore_directories=.*$', 'restore_directories={tmp_commitlog}'.format(
tmp_commitlog=tmp_commitlog))])
if restore_point_in_time:
restore_time = time.strftime("%Y:%m:%d %H:%M:%S", insert_cutoff_times[1])
replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'),
[(r'^restore_point_in_time=.*$', 'restore_point_in_time={restore_time}'.format(restore_time=restore_time))])
debug("Restarting node1..")
node1.stop()
node1.start(wait_for_binary_proto=True)
node1.nodetool('flush')
node1.nodetool('compact')
session = self.patient_cql_connection(node1)
rows = session.execute('SELECT count(*) from ks.cf')
# Now we should have 30000 rows from the snapshot + 30000 rows
# from the commitlog backups:
if not restore_archived_commitlog:
self.assertEqual(rows[0][0], 30000)
elif restore_point_in_time:
self.assertEqual(rows[0][0], 60000)
else:
self.assertEqual(rows[0][0], 65000)
finally:
# clean up
debug("removing snapshot_dir: " + ",".join(snapshot_dirs))
for snapshot_dir in snapshot_dirs:
shutil.rmtree(snapshot_dir)
debug("removing snapshot_dir: " + ",".join(system_ks_snapshot_dirs))
for system_ks_snapshot_dir in system_ks_snapshot_dirs:
shutil.rmtree(system_ks_snapshot_dir)
debug("removing snapshot_dir: " + ",".join(system_cfs_snapshot_dirs))
for system_cfs_snapshot_dir in system_cfs_snapshot_dirs:
shutil.rmtree(system_cfs_snapshot_dir)
debug("removing snapshot_dir: " + ",".join(system_ut_snapshot_dirs))
for system_ut_snapshot_dir in system_ut_snapshot_dirs:
shutil.rmtree(system_ut_snapshot_dir)
debug("removing snapshot_dir: " + ",".join(system_col_snapshot_dirs))
for system_col_snapshot_dir in system_col_snapshot_dirs:
shutil.rmtree(system_col_snapshot_dir)
debug("removing tmp_commitlog: " + tmp_commitlog)
shutil.rmtree(tmp_commitlog)
def test_archive_and_restore_commitlog_repeatedly(self):
"""
@jira_ticket CASSANDRA-10593
Run archive commit log restoration test repeatedly to make sure it is idempotent
and doesn't fail if done repeatedly
"""
cluster = self.cluster
cluster.populate(1)
node1 = cluster.nodelist()[0]
# Create a temp directory for storing commitlog archives:
tmp_commitlog = safe_mkdtemp()
debug("tmp_commitlog: {}".format(tmp_commitlog))
# Edit commitlog_archiving.properties and set an archive
# command:
replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'),
[(r'^archive_command=.*$', 'archive_command=ln %path {tmp_commitlog}/%name'.format(
tmp_commitlog=tmp_commitlog)),
(r'^restore_command=.*$', 'restore_command=cp -f %from %to'),
(r'^restore_directories=.*$', 'restore_directories={tmp_commitlog}'.format(
tmp_commitlog=tmp_commitlog))])
cluster.start(wait_for_binary_proto=True)
debug("Creating initial connection")
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 1)
session.execute('CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);')
debug("Writing 30,000 rows...")
self.insert_rows(session, 0, 60000)
try:
# Check that there are at least one commit log backed up that
# is not one of the active commit logs:
commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
debug("node1 commitlog dir: " + commitlog_dir)
cluster.flush()
self.assertNotEqual(set(os.listdir(tmp_commitlog)) - set(os.listdir(commitlog_dir)),
set())
debug("Flushing and doing first restart")
cluster.compact()
node1.drain()
# restart the node which causes the active commitlogs to be archived
node1.stop()
node1.start(wait_for_binary_proto=True)
debug("Stopping and second restart")
node1.stop()
node1.start(wait_for_binary_proto=True)
# Shouldn't be any additional data since it's replaying the same stuff repeatedly
session = self.patient_cql_connection(node1)
rows = session.execute('SELECT count(*) from ks.cf')
self.assertEqual(rows[0][0], 60000)
finally:
debug("removing tmp_commitlog: " + tmp_commitlog)
shutil.rmtree(tmp_commitlog)
|
snazy/cassandra-dtest
|
snapshot_test.py
|
Python
|
apache-2.0
| 23,150
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import sys
from threading import Lock
from tempfile import NamedTemporaryFile
from pyspark import accumulators
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway
from pyspark.serializers import dump_pickle, write_with_length, batched
from pyspark.rdd import RDD
from py4j.java_collections import ListConverter
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create L{RDD}s and
broadcast variables on that cluster.
"""
_gateway = None
_jvm = None
_writeIteratorToPickleFile = None
_takePartition = None
_next_accum_id = 0
_active_spark_context = None
_lock = Lock()
def __init__(self, master, jobName, sparkHome=None, pyFiles=None,
environment=None, batchSize=1024):
"""
Create a new SparkContext.
@param master: Cluster URL to connect to
(e.g. mesos://host:port, spark://host:port, local[4]).
@param jobName: A name for your job, to display on the cluster web UI
@param sparkHome: Location where Spark is installed on cluster nodes.
@param pyFiles: Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
@param environment: A dictionary of environment variables to set on
worker nodes.
@param batchSize: The number of Python objects represented as a single
Java object. Set 1 to disable batching or -1 to use an
unlimited batch size.
"""
with SparkContext._lock:
if SparkContext._active_spark_context:
raise ValueError("Cannot run multiple SparkContexts at once")
else:
SparkContext._active_spark_context = self
if not SparkContext._gateway:
SparkContext._gateway = launch_gateway()
SparkContext._jvm = SparkContext._gateway.jvm
SparkContext._writeIteratorToPickleFile = \
SparkContext._jvm.PythonRDD.writeIteratorToPickleFile
SparkContext._takePartition = \
SparkContext._jvm.PythonRDD.takePartition
self.master = master
self.jobName = jobName
self.sparkHome = sparkHome or None # None becomes null in Py4J
self.environment = environment or {}
self.batchSize = batchSize # -1 represents a unlimited batch size
# Create the Java SparkContext through Py4J
empty_string_array = self._gateway.new_array(self._jvm.String, 0)
self._jsc = self._jvm.JavaSparkContext(master, jobName, sparkHome,
empty_string_array)
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
self._accumulatorServer = accumulators._start_update_server()
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jsc.accumulator(
self._jvm.java.util.ArrayList(),
self._jvm.PythonAccumulatorParam(host, port))
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = set()
# Deploy any code dependencies specified in the constructor
for path in (pyFiles or []):
self.addPyFile(path)
SparkFiles._sc = self
sys.path.append(SparkFiles.getRootDirectory())
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.spark.Utils.getLocalDir()
self._temp_dir = \
self._jvm.spark.Utils.createTempDir(local_dir).getAbsolutePath()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
def __del__(self):
self.stop()
def stop(self):
"""
Shut down the SparkContext.
"""
if self._jsc:
self._jsc.stop()
self._jsc = None
if self._accumulatorServer:
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD.
>>> sc.parallelize(range(5), 5).glom().collect()
[[0], [1], [2], [3], [4]]
"""
numSlices = numSlices or self.defaultParallelism
# Calling the Java parallelize() method with an ArrayList is too slow,
# because it sends O(n) Py4J commands. As an alternative, serialized
# objects are written to a file and loaded through textFile().
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = min(len(c) // numSlices, self.batchSize)
if batchSize > 1:
c = batched(c, batchSize)
for x in c:
write_with_length(dump_pickle(x), tempFile)
tempFile.close()
readRDDFromPickleFile = self._jvm.PythonRDD.readRDDFromPickleFile
jrdd = readRDDFromPickleFile(self._jsc, tempFile.name, numSlices)
return RDD(jrdd, self)
def textFile(self, name, minSplits=None):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jsc.textFile(name, minSplits)
return RDD(jrdd, self)
def _checkpointFile(self, name):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self)
def union(self, rdds):
"""
Build the union of a list of RDDs.
"""
first = rdds[0]._jrdd
rest = [x._jrdd for x in rdds[1:]]
rest = ListConverter().convert(rest, self.gateway._gateway_client)
return RDD(self._jsc.union(first, rest), self)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a C{Broadcast}
object for reading it in distributed functions. The variable will be
sent to each cluster only once.
"""
jbroadcast = self._jsc.broadcast(bytearray(dump_pickle(value)))
return Broadcast(jbroadcast.id(), value, jbroadcast,
self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an L{Accumulator} with the given initial value, using a given
L{AccumulatorParam} helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param == None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise Exception("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path):
"""
Add a file to be downloaded with this Spark job on every node.
The C{path} passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use
L{SparkFiles.get(path)<pyspark.files.SparkFiles.get>} to find its
download location.
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * 100 for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path)
def clearFiles(self):
"""
Clear the job's list of files added by L{addFile} or L{addPyFile} so
that they do not get downloaded to any new nodes.
"""
# TODO: remove added .py or .zip files from the PYTHONPATH?
self._jsc.sc().clearFiles()
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The C{path} passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
"""
self.addFile(path)
filename = path.split("/")[-1]
def setCheckpointDir(self, dirName, useExisting=False):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be a HDFS path if running on a cluster.
If the directory does not exist, it will be created. If the directory
exists and C{useExisting} is set to true, then the exisiting directory
will be used. Otherwise an exception will be thrown to prevent
accidental overriding of checkpoint files in the existing directory.
"""
self._jsc.sc().setCheckpointDir(dirName, useExisting)
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
bavardage/spark
|
python/pyspark/context.py
|
Python
|
apache-2.0
| 11,827
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for Apple System Log file parser."""
import unittest
from plaso.formatters import asl as _ # pylint: disable=unused-import
from plaso.lib import timelib
from plaso.parsers import asl
from tests.parsers import test_lib
class AslParserTest(test_lib.ParserTestCase):
"""Tests for Apple System Log file parser."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._parser = asl.AslParser()
def testParse(self):
"""Tests the Parse function."""
test_file = self._GetTestFilePath([u'applesystemlog.asl'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEqual(len(event_objects), 2)
event_object = event_objects[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-11-25 09:45:35.705481')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.record_position, 442)
self.assertEqual(event_object.message_id, 101406)
self.assertEqual(event_object.computer_name, u'DarkTemplar-2.local')
self.assertEqual(event_object.sender, u'locationd')
self.assertEqual(event_object.facility, u'com.apple.locationd')
self.assertEqual(event_object.pid, 69)
self.assertEqual(event_object.user_sid, u'205')
self.assertEqual(event_object.group_id, 205)
self.assertEqual(event_object.read_uid, 205)
self.assertEqual(event_object.read_gid, 0xffffffff)
self.assertEqual(event_object.level, 4)
# Note that "compatiblity" is spelt incorrectly in the actual message being
# tested here.
expected_message = (
u'Incorrect NSStringEncoding value 0x8000100 detected. '
u'Assuming NSASCIIStringEncoding. Will stop this compatiblity '
u'mapping behavior in the near future.')
self.assertEqual(event_object.message, expected_message)
expected_extra = (
u'CFLog Local Time: 2013-11-25 09:45:35.701, '
u'CFLog Thread: 1007, '
u'Sender_Mach_UUID: 50E1F76A-60FF-368C-B74E-EB48F6D98C51')
self.assertEqual(event_object.extra_information, expected_extra)
expected_msg = (
u'MessageID: 101406 '
u'Level: WARNING (4) '
u'User ID: 205 '
u'Group ID: 205 '
u'Read User: 205 '
u'Read Group: ALL '
u'Host: DarkTemplar-2.local '
u'Sender: locationd '
u'Facility: com.apple.locationd '
u'Message: {0:s} {1:s}').format(expected_message, expected_extra)
expected_msg_short = (
u'Sender: locationd '
u'Facility: com.apple.locationd')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
if __name__ == '__main__':
unittest.main()
|
ostree/plaso
|
tests/parsers/asl.py
|
Python
|
apache-2.0
| 2,820
|
"""Support for SCSGate switches."""
import logging
import voluptuous as vol
from homeassistant.components import scsgate
from homeassistant.components.switch import SwitchDevice, PLATFORM_SCHEMA
from homeassistant.const import ATTR_ENTITY_ID, ATTR_STATE, CONF_NAME, CONF_DEVICES
import homeassistant.helpers.config_validation as cv
ATTR_SCENARIO_ID = "scenario_id"
CONF_TRADITIONAL = "traditional"
CONF_SCENARIO = "scenario"
CONF_SCS_ID = "scs_id"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_DEVICES): cv.schema_with_slug_keys(scsgate.SCSGATE_SCHEMA)}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the SCSGate switches."""
logger = logging.getLogger(__name__)
_setup_traditional_switches(
logger=logger, config=config, add_entities_callback=add_entities
)
_setup_scenario_switches(logger=logger, config=config, hass=hass)
def _setup_traditional_switches(logger, config, add_entities_callback):
"""Add traditional SCSGate switches."""
traditional = config.get(CONF_TRADITIONAL)
switches = []
if traditional:
for _, entity_info in traditional.items():
if entity_info[scsgate.CONF_SCS_ID] in scsgate.SCSGATE.devices:
continue
name = entity_info[CONF_NAME]
scs_id = entity_info[scsgate.CONF_SCS_ID]
logger.info("Adding %s scsgate.traditional_switch", name)
switch = SCSGateSwitch(name=name, scs_id=scs_id, logger=logger)
switches.append(switch)
add_entities_callback(switches)
scsgate.SCSGATE.add_devices_to_register(switches)
def _setup_scenario_switches(logger, config, hass):
"""Add only SCSGate scenario switches."""
scenario = config.get(CONF_SCENARIO)
if scenario:
for _, entity_info in scenario.items():
if entity_info[scsgate.CONF_SCS_ID] in scsgate.SCSGATE.devices:
continue
name = entity_info[CONF_NAME]
scs_id = entity_info[scsgate.CONF_SCS_ID]
logger.info("Adding %s scsgate.scenario_switch", name)
switch = SCSGateScenarioSwitch(
name=name, scs_id=scs_id, logger=logger, hass=hass
)
scsgate.SCSGATE.add_device(switch)
class SCSGateSwitch(SwitchDevice):
"""Representation of a SCSGate switch."""
def __init__(self, scs_id, name, logger):
"""Initialize the switch."""
self._name = name
self._scs_id = scs_id
self._toggled = False
self._logger = logger
@property
def scs_id(self):
"""Return the SCS ID."""
return self._scs_id
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self._toggled
def turn_on(self, **kwargs):
"""Turn the device on."""
from scsgate.tasks import ToggleStatusTask
scsgate.SCSGATE.append_task(ToggleStatusTask(target=self._scs_id, toggled=True))
self._toggled = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
from scsgate.tasks import ToggleStatusTask
scsgate.SCSGATE.append_task(
ToggleStatusTask(target=self._scs_id, toggled=False)
)
self._toggled = False
self.schedule_update_ha_state()
def process_event(self, message):
"""Handle a SCSGate message related with this switch."""
if self._toggled == message.toggled:
self._logger.info(
"Switch %s, ignoring message %s because state already active",
self._scs_id,
message,
)
# Nothing changed, ignoring
return
self._toggled = message.toggled
self.schedule_update_ha_state()
command = "off"
if self._toggled:
command = "on"
self.hass.bus.fire(
"button_pressed", {ATTR_ENTITY_ID: self._scs_id, ATTR_STATE: command}
)
class SCSGateScenarioSwitch:
"""Provides a SCSGate scenario switch.
This switch is always in an 'off" state, when toggled it's used to trigger
events.
"""
def __init__(self, scs_id, name, logger, hass):
"""Initialize the scenario."""
self._name = name
self._scs_id = scs_id
self._logger = logger
self._hass = hass
@property
def scs_id(self):
"""Return the SCS ID."""
return self._scs_id
@property
def name(self):
"""Return the name of the device if any."""
return self._name
def process_event(self, message):
"""Handle a SCSGate message related with this switch."""
from scsgate.messages import StateMessage, ScenarioTriggeredMessage
if isinstance(message, StateMessage):
scenario_id = message.bytes[4]
elif isinstance(message, ScenarioTriggeredMessage):
scenario_id = message.scenario
else:
self._logger.warn("Scenario switch: received unknown message %s", message)
return
self._hass.bus.fire(
"scenario_switch_triggered",
{ATTR_ENTITY_ID: int(self._scs_id), ATTR_SCENARIO_ID: int(scenario_id, 16)},
)
|
Cinntax/home-assistant
|
homeassistant/components/scsgate/switch.py
|
Python
|
apache-2.0
| 5,491
|
import unittest
import imp
import os
import errno
import sys
import glob
import re
from distutils.errors import *
def unlink(path):
try:
os.unlink(path)
except OSError, exc:
if exc.errno != errno.ENOENT:
raise
class BrokenTest(unittest.TestCase.failureException):
def __repr__(self):
name, reason = self.args
return '%s: %s: %s works now' % (
(self.__class__.__name__, name, reason))
def broken(reason, *exceptions):
'''Indicates a failing (or erroneous) test case fails that should succeed.
If the test fails with an exception, list the exception type in args'''
def wrapper(test_method):
def replacement(*args, **kwargs):
try:
test_method(*args, **kwargs)
except exceptions or unittest.TestCase.failureException:
pass
else:
raise BrokenTest(test_method.__name__, reason)
replacement.__doc__ = test_method.__doc__
replacement.__name__ = 'XXX_' + test_method.__name__
replacement.todo = reason
return replacement
return wrapper
dependencyCache = {}
compileErrorCache = {}
# setup java CLASSPATH
if 'CLASSPATH' not in os.environ:
cp = []
baseDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
libDir = os.path.join(baseDir, 'lib')
jar = os.path.join(libDir, 'stringtemplate-3.0.jar')
if not os.path.isfile(jar):
raise DistutilsFileError(
"Missing file '%s'. Grap it from a distribution package."
% jar,
)
cp.append(jar)
jar = os.path.join(libDir, 'antlr-2.7.7.jar')
if not os.path.isfile(jar):
raise DistutilsFileError(
"Missing file '%s'. Grap it from a distribution package."
% jar,
)
cp.append(jar)
jar = os.path.join(libDir, 'junit-4.2.jar')
if not os.path.isfile(jar):
raise DistutilsFileError(
"Missing file '%s'. Grap it from a distribution package."
% jar,
)
cp.append(jar)
cp.append(os.path.join(baseDir, 'runtime', 'Python', 'build'))
classpath = '-cp "' + ':'.join([os.path.abspath(p) for p in cp]) + '"'
else:
classpath = ''
class ANTLRTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.baseName = os.path.splitext(os.path.basename(sys.modules[self.__module__].__file__))[0]
self.lexerModule = None
self.parserModule = None
def _invokeantlr(self, dir, file, options):
fp = os.popen('cd %s; java %s org.antlr.Tool %s %s 2>&1'
% (dir, classpath, options, file)
)
output = ''
failed = False
for line in fp:
output += line
if line.startswith('error('):
failed = True
rc = fp.close()
if rc is not None:
failed = True
if failed:
raise RuntimeError(
"Failed to compile grammar '%s':\n\n" % file
+ output
)
def compileGrammar(self, grammarName=None, options=''):
if grammarName is None:
grammarName = self.baseName + '.g'
# don't try to rebuild grammar, if it already failed
if grammarName in compileErrorCache:
return
try:
testDir = os.path.dirname(os.path.abspath(__file__))
# get dependencies from antlr
if grammarName in dependencyCache:
dependencies = dependencyCache[grammarName]
else:
dependencies = []
cmd = ('cd %s; java %s org.antlr.Tool -depend %s 2>&1'
% (testDir, classpath, grammarName)
)
output = ""
failed = False
fp = os.popen(cmd)
for line in fp:
output += line
if line.startswith('error('):
failed = True
elif ':' in line:
a, b = line.strip().split(':', 1)
dependencies.append(
(os.path.join(testDir, a.strip()),
[os.path.join(testDir, b.strip())])
)
rc = fp.close()
if rc is not None:
failed = True
if failed:
raise RuntimeError(
"antlr -depend failed with code %s on grammar '%s':\n\n"
% (rc, grammarName)
+ cmd
+ "\n"
+ output
)
# add dependencies to my .stg files
templateDir = os.path.abspath(os.path.join(testDir, '..', '..', '..', 'src', 'org', 'antlr', 'codegen', 'templates', 'Python'))
templates = glob.glob(os.path.join(templateDir, '*.stg'))
for dst, src in dependencies:
src.extend(templates)
dependencyCache[grammarName] = dependencies
rebuild = False
for dest, sources in dependencies:
if not os.path.isfile(dest):
rebuild = True
break
for source in sources:
if os.path.getmtime(source) > os.path.getmtime(dest):
rebuild = True
break
if rebuild:
self._invokeantlr(testDir, grammarName, options)
except:
# mark grammar as broken
compileErrorCache[grammarName] = True
raise
def lexerClass(self, base):
"""Optionally build a subclass of generated lexer class"""
return base
def parserClass(self, base):
"""Optionally build a subclass of generated parser class"""
return base
def walkerClass(self, base):
"""Optionally build a subclass of generated walker class"""
return base
def __load_module(self, name):
modFile, modPathname, modDescription \
= imp.find_module(name, [os.path.dirname(__file__)])
return imp.load_module(
name, modFile, modPathname, modDescription
)
def getLexer(self, *args, **kwargs):
"""Build lexer instance. Arguments are passed to lexer.__init__()."""
self.lexerModule = self.__load_module(self.baseName + 'Lexer')
cls = getattr(self.lexerModule, self.baseName + 'Lexer')
cls = self.lexerClass(cls)
lexer = cls(*args, **kwargs)
return lexer
def getParser(self, *args, **kwargs):
"""Build parser instance. Arguments are passed to parser.__init__()."""
self.parserModule = self.__load_module(self.baseName + 'Parser')
cls = getattr(self.parserModule, self.baseName + 'Parser')
cls = self.parserClass(cls)
parser = cls(*args, **kwargs)
return parser
def getWalker(self, *args, **kwargs):
"""Build walker instance. Arguments are passed to walker.__init__()."""
self.walkerModule = self.__load_module(self.baseName + 'Walker')
cls = getattr(self.walkerModule, self.baseName + 'Walker')
cls = self.walkerClass(cls)
walker = cls(*args, **kwargs)
return walker
def compileInlineGrammar(self, grammar, options=''):
testDir = os.path.dirname(os.path.abspath(__file__))
# get type and name from first grammar line
m = re.match(r'\s*((lexer|parser|tree)\s+|)grammar\s+(\S+);', grammar)
assert m is not None
grammarType = m.group(2)
if grammarType is None:
grammarType = 'combined'
grammarName = m.group(3)
assert grammarType in ('lexer', 'parser', 'tree', 'combined'), grammarType
# dump temp grammar file
fp = open(os.path.join(testDir, grammarName + '.g'), 'w')
fp.write(grammar)
fp.close()
# compile it
self._invokeantlr(testDir, grammarName + '.g', options)
if grammarType == 'combined':
lexerMod = self.__load_module(grammarName + 'Lexer')
lexerCls = getattr(lexerMod, grammarName + 'Lexer')
lexerCls = self.lexerClass(lexerCls)
parserMod = self.__load_module(grammarName + 'Parser')
parserCls = getattr(parserMod, grammarName + 'Parser')
parserCls = self.parserClass(parserCls)
return lexerCls, parserCls
if grammarType == 'lexer':
lexerMod = self.__load_module(grammarName + 'Lexer')
lexerCls = getattr(lexerMod, grammarName + 'Lexer')
lexerCls = self.lexerClass(lexerCls)
return lexerCls
if grammarType == 'parser':
parserMod = self.__load_module(grammarName + 'Parser')
parserCls = getattr(parserMod, grammarName + 'Parser')
parserCls = self.parserClass(parserCls)
return parserCls
if grammarType == 'tree':
walkerMod = self.__load_module(grammarName)
walkerCls = getattr(walkerMod, grammarName)
walkerCls = self.walkerClass(walkerCls)
return walkerCls
|
sshrdp/mclab
|
lib/antlr-3.0.1/runtime/Python/tests/testbase.py
|
Python
|
apache-2.0
| 9,623
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Convenience imports for public API components.
# Importing non-modules that are not used explicitly
from horizon.tables.actions import Action
from horizon.tables.actions import BatchAction
from horizon.tables.actions import DeleteAction
from horizon.tables.actions import FilterAction
from horizon.tables.actions import FixedFilterAction
from horizon.tables.actions import LinkAction
from horizon.tables.actions import NameFilterAction
from horizon.tables.base import Column
from horizon.tables.base import DataTable
from horizon.tables.base import Row
from horizon.tables.base import WrappingColumn
from horizon.tables.views import DataTableView
from horizon.tables.views import MixedDataTableView
from horizon.tables.views import MultiTableMixin
from horizon.tables.views import MultiTableView
from horizon.tables.views import PagedTableMixin
__all__ = [
'Action',
'BatchAction',
'DeleteAction',
'FilterAction',
'FixedFilterAction',
'LinkAction',
'NameFilterAction',
'Column',
'DataTable',
'Row',
'WrappingColumn',
'DataTableView',
'MixedDataTableView',
'MultiTableMixin',
'MultiTableView',
'PagedTableMixin',
]
|
ChameleonCloud/horizon
|
horizon/tables/__init__.py
|
Python
|
apache-2.0
| 1,788
|
# Copyright 2016 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import logging
from rally import consts
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.nova import utils
from rally.task import validation
LOG = logging.getLogger(__name__)
class NovaServices(utils.NovaScenario):
"""Benchmark scenarios for Nova agents."""
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True)
@scenario.configure()
def list_services(self, host=None, binary=None):
"""List all nova services.
Measure the "nova service-list" command performance.
:param host: List nova services on host
:param binary: List nova services matching given binary
"""
self._list_services(host, binary)
|
eonpatapon/rally
|
rally/plugins/openstack/scenarios/nova/services.py
|
Python
|
apache-2.0
| 1,390
|