code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Float, Tuple
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.core.helpers.strtools import csv_to_ints
from pychron.hardware.axis import Axis
MAPPING = dict(
acceleration="HA",
deceleration="HD",
# emergency_deceleration = 'HE',
jerk="HJ",
velocity="HV",
axes="HN",
)
class NewportGroup(Axis):
# acceleration = Float
# deceleration = Float
emergency_deceleration = None
jerk = Float
# velocity = Float
name = "GroupedAxes"
machine_velocity = Float
machine_acceleration = Float
machine_deceleration = Float
axes = Tuple
# calculate_parameters = Bool(True)
id = None
def _set_acceleration(self, v):
self._acceleration = v
def _set_deceleration(self, v):
self._deceleration = v
def _set_velocity(self, v):
self._velocity = v
def load(self, path):
config = self.get_configuration(path)
for attr in [
"acceleration",
"deceleration",
# 'emergency_deceleration',
"jerk",
"velocity",
]:
self.set_attribute(config, attr, "General", attr, cast="float")
self.set_attribute(config, "id", "General", "id", cast="int")
axes = self.config_get(config, "General", "axes")
self.axes = tuple(csv_to_ints(axes))
self.nominal_velocity = self.velocity
self.nominal_acceleration = self.acceleration
self.nominal_deceleration = self.deceleration
def build_command(self, new_group):
cmds = []
for key, value in MAPPING.items():
if key is not "axes":
cmds.append("{}{}{:0.5f}".format(self.id, value, getattr(self, key)))
if new_group:
gid = "{:n}HN{}".format(self.id, ",".join(map(str, self.axes)))
cmds = [gid] + cmds
return ";".join(cmds)
# ============= EOF ==============================================
|
USGSDenverPychron/pychron
|
pychron/hardware/newport/newport_group.py
|
Python
|
apache-2.0
| 2,866
|
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import hashlib
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip\
as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class SimplySupplementsSpider(BaseSpider):
name = 'simplysupplements.net-merckgroup'
allowed_domains = ['www.simplysupplements.net', 'simplysupplements.net']
start_urls = ('http://www.simplysupplements.net/product-a-to-z/',)
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# getting product links from A-Z product list
links = hxs.select('//ul[@id="product-a-to-z"]/li/a/@href').extract()
for prod_url in links:
url = urljoin_rfc(get_base_url(response), prod_url)
yield Request(url)
# products
for product in self.parse_product(response):
yield product
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
name = hxs.select('//div[@class="innercol"]/h1/text()').extract()
if name:
url = response.url
url = urljoin_rfc(get_base_url(response), url)
skus = hxs.select('//td[@class="size"]/strong/text()').extract()
prices = hxs.select('//td[@class="price"]/text()').extract()
skus_prices = zip(skus, prices)
for sku, price in skus_prices:
loader = ProductLoader(item=Product(), selector=hxs)
loader.add_value('url', url)
loader.add_value('name', name[0].strip() + ' ' + sku.strip(':'))
#loader.add_value('sku', sku)
loader.add_value('price', price)
yield loader.load_item()
|
0--key/lib
|
portfolio/Python/scrapy/merckgroup/simplysupplements.py
|
Python
|
apache-2.0
| 2,128
|
#
# Copyright 2015 IBM Corp.
#
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
from heat.engine import translation
class Listener(neutron.NeutronResource):
"""A resource for managing LBaaS v2 Listeners.
This resource creates and manages Neutron LBaaS v2 Listeners,
which represent a listening endpoint for the vip.
"""
support_status = support.SupportStatus(version='6.0.0')
required_service_extension = 'lbaasv2'
PROPERTIES = (
PROTOCOL_PORT, PROTOCOL, LOADBALANCER, NAME,
ADMIN_STATE_UP, DESCRIPTION, DEFAULT_TLS_CONTAINER_REF,
SNI_CONTAINER_REFS, CONNECTION_LIMIT, TENANT_ID
) = (
'protocol_port', 'protocol', 'loadbalancer', 'name',
'admin_state_up', 'description', 'default_tls_container_ref',
'sni_container_refs', 'connection_limit', 'tenant_id'
)
PROTOCOLS = (
TCP, HTTP, HTTPS, TERMINATED_HTTPS,
) = (
'TCP', 'HTTP', 'HTTPS', 'TERMINATED_HTTPS',
)
ATTRIBUTES = (
LOADBALANCERS_ATTR, DEFAULT_POOL_ID_ATTR
) = (
'loadbalancers', 'default_pool_id'
)
properties_schema = {
PROTOCOL_PORT: properties.Schema(
properties.Schema.INTEGER,
_('TCP or UDP port on which to listen for client traffic.'),
required=True,
constraints=[
constraints.Range(1, 65535),
]
),
PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('Protocol on which to listen for the client traffic.'),
required=True,
constraints=[
constraints.AllowedValues(PROTOCOLS),
]
),
LOADBALANCER: properties.Schema(
properties.Schema.STRING,
_('ID or name of the load balancer with which listener '
'is associated.'),
required=True,
constraints=[
constraints.CustomConstraint('neutron.lbaas.loadbalancer')
]
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of this listener.'),
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of this listener.'),
update_allowed=True,
default=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of this listener.'),
update_allowed=True,
default=''
),
DEFAULT_TLS_CONTAINER_REF: properties.Schema(
properties.Schema.STRING,
_('Default TLS container reference to retrieve TLS '
'information.'),
update_allowed=True
),
SNI_CONTAINER_REFS: properties.Schema(
properties.Schema.LIST,
_('List of TLS container references for SNI.'),
update_allowed=True
),
CONNECTION_LIMIT: properties.Schema(
properties.Schema.INTEGER,
_('The maximum number of connections permitted for this '
'load balancer. Defaults to -1, which is infinite.'),
update_allowed=True,
default=-1,
constraints=[
constraints.Range(min=-1),
]
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the tenant who owns the listener.')
),
}
attributes_schema = {
LOADBALANCERS_ATTR: attributes.Schema(
_('ID of the load balancer this listener is associated to.'),
type=attributes.Schema.LIST
),
DEFAULT_POOL_ID_ATTR: attributes.Schema(
_('ID of the default pool this listener is associated to.'),
type=attributes.Schema.STRING
)
}
def translation_rules(self, props):
return [
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.LOADBALANCER],
client_plugin=self.client_plugin(),
finder='find_resourceid_by_name_or_id',
entity='loadbalancer'
),
]
def validate(self):
res = super(Listener, self).validate()
if res:
return res
if self.properties[self.PROTOCOL] == self.TERMINATED_HTTPS:
if self.properties[self.DEFAULT_TLS_CONTAINER_REF] is None:
msg = (_('Property %(ref)s required when protocol is '
'%(term)s.') % {'ref': self.DEFAULT_TLS_CONTAINER_REF,
'term': self.TERMINATED_HTTPS})
raise exception.StackValidationFailed(message=msg)
def _check_lb_status(self):
lb_id = self.properties[self.LOADBALANCER]
return self.client_plugin().check_lb_status(lb_id)
def handle_create(self):
properties = self.prepare_properties(
self.properties,
self.physical_resource_name())
properties['loadbalancer_id'] = properties.pop(self.LOADBALANCER)
return properties
def check_create_complete(self, properties):
if self.resource_id is None:
try:
listener = self.client().create_listener(
{'listener': properties})['listener']
self.resource_id_set(listener['id'])
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def _show_resource(self):
return self.client().show_listener(
self.resource_id)['listener']
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
self._update_called = False
return prop_diff
def check_update_complete(self, prop_diff):
if not prop_diff:
return True
if not self._update_called:
try:
self.client().update_listener(self.resource_id,
{'listener': prop_diff})
self._update_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def handle_delete(self):
self._delete_called = False
def check_delete_complete(self, data):
if self.resource_id is None:
return True
if not self._delete_called:
try:
self.client().delete_listener(self.resource_id)
self._delete_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
elif self.client_plugin().is_not_found(ex):
return True
raise
return self._check_lb_status()
def resource_mapping():
return {
'OS::Neutron::LBaaS::Listener': Listener,
}
|
cwolferh/heat-scratch
|
heat/engine/resources/openstack/neutron/lbaas/listener.py
|
Python
|
apache-2.0
| 7,961
|
# Copyright (c) 2009 NHN Inc. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of NHN Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import nsiqcppstyle_checker
import unittest
import nsiqcppstyle_rulemanager
import nsiqcppstyle_reporter
import nsiqcppstyle_state
errors = []
def AddError(err):
errors.append(err)
def CheckErrorContent(msg):
for err in errors :
if err[1] == msg :
return True
return False
def MockError(token, category, message):
AddError((token, category, message))
print token, category, message
class nct(unittest.TestCase):
def setUp(self):
nsiqcppstyle_rulemanager.ruleManager.ResetRules()
nsiqcppstyle_rulemanager.ruleManager.ResetRegisteredRules()
nsiqcppstyle_state._nsiqcppstyle_state.verbose = True
nsiqcppstyle_reporter.Error = MockError
self.setUpRule()
global errors
errors = []
def Analyze(self, filename, data):
nsiqcppstyle_checker.ProcessFile(nsiqcppstyle_rulemanager.ruleManager, filename, data)
|
DLR-SC/tigl
|
thirdparty/nsiqcppstyle/nsiqunittest/nsiqcppstyle_unittestbase.py
|
Python
|
apache-2.0
| 2,439
|
# Copyright (c) 2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tarfile
from urllib import quote, unquote
from xml.sax import saxutils
from swift.common.swob import Request, HTTPBadGateway, \
HTTPCreated, HTTPBadRequest, HTTPNotFound, HTTPUnauthorized, HTTPOk, \
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPNotAcceptable, \
wsgify
from swift.common.utils import json, TRUE_VALUES
from swift.common.constraints import check_utf8, MAX_FILE_SIZE
from swift.common.http import HTTP_BAD_REQUEST, HTTP_UNAUTHORIZED, \
HTTP_NOT_FOUND
from swift.common.constraints import MAX_OBJECT_NAME_LENGTH, \
MAX_CONTAINER_NAME_LENGTH
MAX_PATH_LENGTH = MAX_OBJECT_NAME_LENGTH + MAX_CONTAINER_NAME_LENGTH + 2
class CreateContainerError(Exception):
def __init__(self, msg, status_int, status):
self.status_int = status_int
self.status = status
Exception.__init__(self, msg)
ACCEPTABLE_FORMATS = ['text/plain', 'application/json', 'application/xml',
'text/xml']
def get_response_body(data_format, data_dict, error_list):
"""
Returns a properly formatted response body according to format.
:params data_format: resulting format
:params data_dict: generated data about results.
:params error_list: list of quoted filenames that failed
"""
if data_format == 'text/plain':
output = ''
for key in sorted(data_dict.keys()):
output += '%s: %s\n' % (key, data_dict[key])
output += 'Errors:\n'
output += '\n'.join(
['%s, %s' % (name, status)
for name, status in error_list])
return output
if data_format == 'application/json':
data_dict['Errors'] = error_list
return json.dumps(data_dict)
if data_format.endswith('/xml'):
output = '<?xml version="1.0" encoding="UTF-8"?>\n<delete>\n'
for key in sorted(data_dict.keys()):
xml_key = key.replace(' ', '_').lower()
output += '<%s>%s</%s>\n' % (xml_key, data_dict[key], xml_key)
output += '<errors>\n'
output += '\n'.join(
['<object>'
'<name>%s</name><status>%s</status>'
'</object>' % (saxutils.escape(name), status) for
name, status in error_list])
output += '</errors>\n</delete>\n'
return output
raise HTTPNotAcceptable('Invalid output type')
class Bulk(object):
"""
Middleware that will do many operations on a single request.
Extract Archive:
Expand tar files into a swift account. Request must be a PUT with the
query parameter ?extract-archive=format specifying the format of archive
file. Accepted formats are tar, tar.gz, and tar.bz2.
For a PUT to the following url:
/v1/AUTH_Account/$UPLOAD_PATH?extract-archive=tar.gz
UPLOAD_PATH is where the files will be expanded to. UPLOAD_PATH can be a
container, a pseudo-directory within a container, or an empty string. The
destination of a file in the archive will be built as follows:
/v1/AUTH_Account/$UPLOAD_PATH/$FILE_PATH
Where FILE_PATH is the file name from the listing in the tar file.
If the UPLOAD_PATH is an empty string, containers will be auto created
accordingly and files in the tar that would not map to any container (files
in the base directory) will be ignored.
Only regular files will be uploaded. Empty directories, symlinks, etc will
not be uploaded.
If all valid files were uploaded successfully will return an HTTPCreated
response. If any files failed to be created will return an HTTPBadGateway
response. In both cases the response body will specify the number of files
successfully uploaded and a list of the files that failed. The return body
will be formatted in the way specified in the request's Accept header.
Acceptable formats are text/plain, application/json, application/xml, and
text/xml.
There are proxy logs created for each file (which becomes a subrequest) in
the tar. The subrequest's proxy log will have a swift.source set to "EA"
the log's content length will reflect the unzipped size of the file. If
double proxy-logging is used the leftmost logger will not have a
swift.source set and the content length will reflect the size of the
payload sent to the proxy (the unexpanded size of the tar.gz).
Bulk Delete:
Will delete multiple objects or containers from their account with a
single request. Responds to DELETE requests with query parameter
?bulk-delete set. The Content-Type should be set to text/plain.
The body of the DELETE request will be a newline separated list of url
encoded objects to delete. You can only delete 1000 (configurable) objects
per request. The objects specified in the DELETE request body must be URL
encoded and in the form:
/container_name/obj_name
or for a container (which must be empty at time of delete)
/container_name
If all items were successfully deleted (or did not exist), will return an
HTTPOk. If any failed to delete, will return an HTTPBadGateway. In
both cases the response body will specify the number of items
successfully deleted, not found, and a list of those that failed.
The return body will be formatted in the way specified in the request's
Accept header. Acceptable formats are text/plain, application/json,
application/xml, and text/xml.
There are proxy logs created for each object or container (which becomes a
subrequest) that is deleted. The subrequest's proxy log will have a
swift.source set to "BD" the log's content length of 0. If double
proxy-logging is used the leftmost logger will not have a
swift.source set and the content length will reflect the size of the
payload sent to the proxy (the list of objects/containers to be deleted).
"""
def __init__(self, app, conf):
self.app = app
self.max_containers = int(
conf.get('max_containers_per_extraction', 10000))
self.max_failed_extractions = int(
conf.get('max_failed_extractions', 1000))
self.max_deletes_per_request = int(
conf.get('max_deletes_per_request', 1000))
def create_container(self, req, container_path):
"""
Makes a subrequest to create a new container.
:params container_path: an unquoted path to a container to be created
:returns: None on success
:raises: CreateContainerError on creation error
"""
new_env = req.environ.copy()
new_env['PATH_INFO'] = container_path
new_env['swift.source'] = 'EA'
create_cont_req = Request.blank(container_path, environ=new_env)
resp = create_cont_req.get_response(self.app)
if resp.status_int // 100 != 2:
raise CreateContainerError(
"Create Container Failed: " + container_path,
resp.status_int, resp.status)
def get_objs_to_delete(self, req):
"""
Will populate objs_to_delete with data from request input.
:params req: a Swob request
:returns: a list of the contents of req.body when separated by newline.
:raises: HTTPException on failures
"""
line = ''
data_remaining = True
objs_to_delete = []
if req.content_length is None and \
req.headers.get('transfer-encoding', '').lower() != 'chunked':
raise HTTPBadRequest('Invalid request: no content sent.')
while data_remaining:
if len(objs_to_delete) > self.max_deletes_per_request:
raise HTTPRequestEntityTooLarge(
'Maximum Bulk Deletes: %d per request' %
self.max_deletes_per_request)
if '\n' in line:
obj_to_delete, line = line.split('\n', 1)
objs_to_delete.append(unquote(obj_to_delete))
else:
data = req.body_file.read(MAX_PATH_LENGTH)
if data:
line += data
else:
data_remaining = False
if line.strip():
objs_to_delete.append(unquote(line))
if len(line) > MAX_PATH_LENGTH * 2:
raise HTTPBadRequest('Invalid File Name')
return objs_to_delete
def handle_delete(self, req, objs_to_delete=None, user_agent='BulkDelete',
swift_source='BD'):
"""
:params req: a swob Request
:raises HTTPException: on unhandled errors
:returns: a swob Response
"""
try:
vrs, account, _junk = req.split_path(2, 3, True)
except ValueError:
return HTTPNotFound(request=req)
incoming_format = req.headers.get('Content-Type')
if incoming_format and not incoming_format.startswith('text/plain'):
# For now only accept newline separated object names
return HTTPNotAcceptable(request=req)
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
if not out_content_type:
return HTTPNotAcceptable(request=req)
if objs_to_delete is None:
objs_to_delete = self.get_objs_to_delete(req)
failed_files = []
success_count = not_found_count = 0
failed_file_response_type = HTTPBadRequest
for obj_to_delete in objs_to_delete:
obj_to_delete = obj_to_delete.strip().lstrip('/')
if not obj_to_delete:
continue
delete_path = '/'.join(['', vrs, account, obj_to_delete])
if not check_utf8(delete_path):
failed_files.append([quote(delete_path),
HTTPPreconditionFailed().status])
continue
new_env = req.environ.copy()
new_env['PATH_INFO'] = delete_path
del(new_env['wsgi.input'])
new_env['CONTENT_LENGTH'] = 0
new_env['HTTP_USER_AGENT'] = \
'%s %s' % (req.environ.get('HTTP_USER_AGENT'), user_agent)
new_env['swift.source'] = swift_source
delete_obj_req = Request.blank(delete_path, new_env)
resp = delete_obj_req.get_response(self.app)
if resp.status_int // 100 == 2:
success_count += 1
elif resp.status_int == HTTP_NOT_FOUND:
not_found_count += 1
elif resp.status_int == HTTP_UNAUTHORIZED:
return HTTPUnauthorized(request=req)
else:
if resp.status_int // 100 == 5:
failed_file_response_type = HTTPBadGateway
failed_files.append([quote(delete_path), resp.status])
resp_body = get_response_body(
out_content_type,
{'Number Deleted': success_count,
'Number Not Found': not_found_count},
failed_files)
if (success_count or not_found_count) and not failed_files:
return HTTPOk(resp_body, content_type=out_content_type)
if failed_files:
return failed_file_response_type(
resp_body, content_type=out_content_type)
return HTTPBadRequest('Invalid bulk delete.')
def handle_extract(self, req, compress_type):
"""
:params req: a swob Request
:params compress_type: specifying the compression type of the tar.
Accepts '', 'gz, or 'bz2'
:raises HTTPException: on unhandled errors
:returns: a swob response to request
"""
success_count = 0
failed_files = []
existing_containers = set()
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
if not out_content_type:
return HTTPNotAcceptable(request=req)
if req.content_length is None and \
req.headers.get('transfer-encoding', '').lower() != 'chunked':
return HTTPBadRequest('Invalid request: no content sent.')
try:
vrs, account, extract_base = req.split_path(2, 3, True)
except ValueError:
return HTTPNotFound(request=req)
extract_base = extract_base or ''
extract_base = extract_base.rstrip('/')
try:
tar = tarfile.open(mode='r|' + compress_type,
fileobj=req.body_file)
while True:
tar_info = tar.next()
if tar_info is None or \
len(failed_files) >= self.max_failed_extractions:
break
if tar_info.isfile():
obj_path = tar_info.name
if obj_path.startswith('./'):
obj_path = obj_path[2:]
obj_path = obj_path.lstrip('/')
if extract_base:
obj_path = extract_base + '/' + obj_path
if '/' not in obj_path:
continue # ignore base level file
destination = '/'.join(
['', vrs, account, obj_path])
container = obj_path.split('/', 1)[0]
if not check_utf8(destination):
failed_files.append(
[quote(destination[:MAX_PATH_LENGTH]),
HTTPPreconditionFailed().status])
continue
if tar_info.size > MAX_FILE_SIZE:
failed_files.append([
quote(destination[:MAX_PATH_LENGTH]),
HTTPRequestEntityTooLarge().status])
continue
if container not in existing_containers:
try:
self.create_container(
req, '/'.join(['', vrs, account, container]))
existing_containers.add(container)
except CreateContainerError, err:
if err.status_int == HTTP_UNAUTHORIZED:
return HTTPUnauthorized(request=req)
failed_files.append([
quote(destination[:MAX_PATH_LENGTH]),
err.status])
continue
except ValueError:
failed_files.append([
quote(destination[:MAX_PATH_LENGTH]),
HTTP_BAD_REQUEST])
continue
if len(existing_containers) > self.max_containers:
return HTTPBadRequest(
'More than %d base level containers in tar.' %
self.max_containers)
tar_file = tar.extractfile(tar_info)
new_env = req.environ.copy()
new_env['wsgi.input'] = tar_file
new_env['PATH_INFO'] = destination
new_env['CONTENT_LENGTH'] = tar_info.size
new_env['swift.source'] = 'EA'
new_env['HTTP_USER_AGENT'] = \
'%s BulkExpand' % req.environ.get('HTTP_USER_AGENT')
create_obj_req = Request.blank(destination, new_env)
resp = create_obj_req.get_response(self.app)
if resp.status_int // 100 == 2:
success_count += 1
else:
if resp.status_int == HTTP_UNAUTHORIZED:
return HTTPUnauthorized(request=req)
failed_files.append([
quote(destination[:MAX_PATH_LENGTH]), resp.status])
resp_body = get_response_body(
out_content_type,
{'Number Files Created': success_count},
failed_files)
if success_count and not failed_files:
return HTTPCreated(resp_body, content_type=out_content_type)
if failed_files:
return HTTPBadGateway(resp_body, content_type=out_content_type)
return HTTPBadRequest('Invalid Tar File: No Valid Files')
except tarfile.TarError, tar_error:
return HTTPBadRequest('Invalid Tar File: %s' % tar_error)
@wsgify
def __call__(self, req):
extract_type = req.params.get('extract-archive')
if extract_type is not None and req.method == 'PUT':
archive_type = {
'tar': '', 'tar.gz': 'gz',
'tar.bz2': 'bz2'}.get(extract_type.lower().strip('.'))
if archive_type is not None:
return self.handle_extract(req, archive_type)
else:
return HTTPBadRequest("Unsupported archive format")
if 'bulk-delete' in req.params and req.method == 'DELETE':
return self.handle_delete(req)
return self.app
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def bulk_filter(app):
return Bulk(app, conf)
return bulk_filter
|
Triv90/SwiftUml
|
swift/common/middleware/bulk.py
|
Python
|
apache-2.0
| 17,862
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Drop-in replacement for django.contrib.messages which handles Horizon's
messaging needs (e.g. AJAX communication, etc.).
"""
from django.contrib import messages as _messages
from django.contrib.messages import constants
from django.utils.encoding import force_unicode # noqa
from django.utils.safestring import SafeData # noqa
def add_message(request, level, message, extra_tags='', fail_silently=False):
"""Attempts to add a message to the request using the 'messages' app."""
if request.is_ajax():
tag = constants.DEFAULT_TAGS[level]
# if message is marked as safe, pass "safe" tag as extra_tags so that
# client can skip HTML escape for the message when rendering
if isinstance(message, SafeData):
extra_tags = extra_tags + ' safe'
request.horizon['async_messages'].append([tag,
force_unicode(message),
extra_tags])
else:
return _messages.add_message(request, level, message,
extra_tags, fail_silently)
def debug(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``DEBUG`` level."""
add_message(request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def info(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``INFO`` level."""
add_message(request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def success(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``SUCCESS`` level."""
add_message(request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def warning(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``WARNING`` level."""
add_message(request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def error(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``ERROR`` level."""
add_message(request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently)
|
ikargis/horizon_fod
|
horizon/messages.py
|
Python
|
apache-2.0
| 2,969
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_git_repo_volume_source import V1GitRepoVolumeSource
class TestV1GitRepoVolumeSource(unittest.TestCase):
""" V1GitRepoVolumeSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1GitRepoVolumeSource(self):
"""
Test V1GitRepoVolumeSource
"""
model = kubernetes.client.models.v1_git_repo_volume_source.V1GitRepoVolumeSource()
if __name__ == '__main__':
unittest.main()
|
djkonro/client-python
|
kubernetes/test/test_v1_git_repo_volume_source.py
|
Python
|
apache-2.0
| 911
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# import json
# replace with simplejson
import simplejson as json
import os
import time
import logging
import traceback
import sys
from oslo_config import cfg
from yabgp.common import constants as bgp_cons
from yabgp.handler import BaseHandler
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
MSG_PROCESS_OPTS = [
cfg.BoolOpt('write_disk',
default=True,
help='Whether the BGP message is written to disk'),
cfg.StrOpt('write_dir',
default=os.path.join(os.environ.get('HOME') or '.', 'data/bgp/'),
help='The BGP messages storage path'),
cfg.IntOpt('write_msg_max_size',
default=500,
help='The Max size of one BGP message file, the unit is MB'),
cfg.BoolOpt('write_keepalive',
default=False,
help='Whether write keepalive message to disk')
]
CONF.register_opts(MSG_PROCESS_OPTS, group='message')
class DefaultHandler(BaseHandler):
def __init__(self):
super(DefaultHandler, self).__init__()
'''
{<peer>: (<path>, <current file>)}
'''
self.peer_files = {}
'''
{<peer>: <seq number>}
'''
self.msg_sequence = {}
def init(self):
if CONF.message.write_disk:
self.init_msg_file(CONF.bgp.running_config['remote_addr'].lower())
def init_msg_file(self, peer_addr):
msg_file_path_for_peer = os.path.join(
CONF.message.write_dir,
peer_addr
)
if not os.path.exists(msg_file_path_for_peer):
os.makedirs(msg_file_path_for_peer)
LOG.info('Create dir %s for peer %s', msg_file_path_for_peer, peer_addr)
LOG.info('BGP message file path is %s', msg_file_path_for_peer)
if msg_file_path_for_peer and peer_addr not in self.peer_files:
msg_path = msg_file_path_for_peer + '/msg/'
if not os.path.exists(msg_path):
os.makedirs(msg_path)
# try get latest file and msg sequence if any
last_msg_seq, msg_file_name = DefaultHandler.get_last_seq_and_file(msg_path)
if not msg_file_name:
msg_file_name = "%s.msg" % time.time()
# store the message sequence
self.msg_sequence[peer_addr] = last_msg_seq + 1
msg_file = open(os.path.join(msg_path, msg_file_name), 'a')
msg_file.flush()
self.peer_files[peer_addr] = (msg_path, msg_file)
LOG.info('BGP message file %s', msg_file_name)
LOG.info('The last bgp message seq number is %s', last_msg_seq)
@staticmethod
def get_last_seq_and_file(msg_path):
"""
Get the last sequence number in the latest log file.
"""
LOG.info('get the last bgp message seq for this peer')
last_seq = 0
# first get the last file
file_list = os.listdir(msg_path)
if not file_list:
return last_seq, None
file_list.sort()
msg_file_name = file_list[-1]
try:
with open(msg_path + msg_file_name, 'r') as fh:
line = None
for line in fh:
pass
last = line
if line:
if last.startswith('['):
last_seq = eval(last)[1]
elif last.startswith('{'):
last_seq = json.loads(last)['seq']
except OSError:
LOG.error('Error when reading bgp message files')
except Exception as e:
LOG.debug(traceback.format_exc())
LOG.error(e)
sys.exit()
return last_seq, msg_file_name
def write_msg(self, peer, timestamp, msg_type, msg):
"""
write bgp message into local disk file
:param peer: peer address
:param timestamp: timestamp
:param msg_type: message type (0,1,2,3,4,5,6)
:param msg: message dict
:param msg_path: path to store messages on disk
:return:
"""
msg_path, msg_file = self.peer_files.get(peer.lower(), (None, None))
if msg_path:
msg_seq = self.msg_sequence[peer.lower()]
msg_record = {
't': timestamp,
'seq': msg_seq,
'type': msg_type
}
msg_record.update(msg)
try:
json.dump(msg_record, msg_file)
except Exception as e:
LOG.error(e)
LOG.info('raw message %s', msg)
msg_file.write('\n')
self.msg_sequence[peer.lower()] += 1
msg_file.flush()
os.fsync(msg_file.fileno())
def check_file_size(self, peer):
"""if the size of the msg file is bigger than 'max_msg_file_size',
then save as and re-open a new file.
"""
msg_path, cur_file = self.peer_files.get(peer.lower(), (None, None))
if msg_path:
if os.path.getsize(cur_file.name) >= CONF.message.write_msg_max_size:
cur_file.close()
msg_file_name = "%s.msg" % time.time()
LOG.info('Open a new message file %s', msg_file_name)
msg_file = open(os.path.join(msg_path + msg_file_name), 'a')
self.peer_files[peer.lower()] = (msg_path, msg_file)
return True
return False
def on_update_error(self, peer, timestamp, msg):
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=timestamp,
msg_type=6,
msg={'msg': msg}
)
def update_received(self, peer, timestamp, msg):
# write message to disk
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=timestamp,
msg_type=bgp_cons.MSG_UPDATE,
msg={"msg": msg}
)
self.check_file_size(peer.factory.peer_addr)
def keepalive_received(self, peer, timestamp):
"""
keepalive message default handler
:param peer:
:param timestamp:
:return:
"""
if peer.msg_recv_stat['Keepalives'] == 1:
# do something with the connection establish event
pass
if CONF.message.write_keepalive:
# write bgp message
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=timestamp,
msg_type=4,
msg={"msg": None}
)
def open_received(self, peer, timestamp, result):
# write bgp message
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=timestamp,
msg_type=1,
msg={"msg": result}
)
def route_refresh_received(self, peer, msg, msg_type):
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=time.time(),
msg_type=msg_type,
msg={"msg": msg}
)
def notification_received(self, peer, msg):
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=time.time(),
msg_type=3,
msg={"msg": msg}
)
def on_connection_lost(self, peer):
self.write_msg(
peer=peer.factory.peer_addr,
timestamp=time.time(),
msg_type=bgp_cons.MSG_BGP_CLOSED,
msg={"msg": None}
)
def on_connection_failed(self, peer, msg):
self.write_msg(
peer=peer,
timestamp=time.time(),
msg_type=0,
msg={"msg": msg}
)
def on_established(self, peer, msg):
pass
|
meidli/yabgp
|
yabgp/handler/default_handler.py
|
Python
|
apache-2.0
| 7,759
|
# Generated by the pRPC protocol buffer compiler plugin. DO NOT EDIT!
# source: service.proto
import base64
import zlib
from google.protobuf import descriptor_pb2
# Includes description of the service.proto and all of its transitive
# dependencies. Includes source code info.
FILE_DESCRIPTOR_SET = descriptor_pb2.FileDescriptorSet()
FILE_DESCRIPTOR_SET.ParseFromString(zlib.decompress(base64.b64decode(
'eJzlvX10ZFd1J0pVqaTSbbV0VN2222o3fV3+aKktVbvbxoY2xqOW5LZMd6unpIaYGSyuqq6kcp'
'fqirpVLcuBlWQyhI98vEcAY+eBSfgyMQFCgCQDi5dhArMWvIQk7zFkzcKTYQWcQAjmw7OAISHv'
'7d8++5x7bpXaNoTO/PG8Elq177n77LPPPvvsvc8++3r/UPB2x2HrQr0aljdbUTsqDtbqcTW6EL'
'a2x/y1KFprhEf4wUpn9UgtjKut+mY7aunGpX6v7yVRvVZqe2qWn62ElTDejJpxWJz1dpn29ai5'
'L+NnxncdK5U1zrLBWb6z3ghnLd7FsF1xXyuOeQWhL96X9XPjgxX7+9hJb3DWEFs87hUMCcWRsh'
'1EGfSN7XcA3YSWnnP3N/u9gupTz1H7Vcb7WKYwxD+Kxx7L+DPR5narvrbe9o/dePQWf2k99E+d'
'm5n3pzvt9agVl/3pRsPnBrHfCkFaWCt7/rk49KNVv71ej/046rSqoV+NaqFPP9dARjOs+Z1mLW'
'xRk9Cf3gyqQEyjIoIm/ZeErZgG7x8r3+hRg6DtV4OmvxL6qxG95Neb/Nap+Zm5M4tz/ipxsOx5'
'hUJW9YNs+jNXeI4q0N+HvalCP/29i/4eUZmxg75lmF8TPhCBwlBCMoTmhQy9UFCjXsS/soRgWG'
'XVTWPLvmEejbXdaTVjP/Ab9biNsRosfkAkBr4jKz1TbAYFQmKMZcMPGg3qXpkOiYRhGs2wA8kS'
'RKm9DiRHkIPqiHeo0EckFjHGsf0+Jhx8Jo6FG5vtbX8jjONgLeTR9THqIo1uyLuZfmXUZXrax6'
'73uwXjYhyit/i9gtrntfgXOLSPOHTz2Io/mwgvDbDZDurEJUeiY/CKRmtxToIBdRKE7U3DvEaD'
'ONQKmnG9Xb8AOjZDkpVmtc4UKNNnoZ97vcaBZAhyLbEkgeQIckzd5J0QSEaN0TsHx475i3bCWq'
'Ghxm8GxC6fVueFeo1kdGWbppKluuX0nCn0MRYXkifILpKZBIKeimrMgeQIckA9d6WfV/9N3lt/'
'K+c9o5IpjnSpjNJpb7RHpIrP9/qwEEjP5EjPXPsMeuYswBV+o/TVPm/PDk+LRa8P3GDNNVjhv4'
'v7vAFaq+dJnEgbAWx+Fp/reXaetvflWFU5kOIN3uhmZ6VRry47zTxqlq8o/WA2aXzIG9kKg/Nu'
'013cdBhgp+GMNyTivQz52dfHo/d7Rt898l3y1hK9VJz2BsNmZ0NjyF+Ef3PUohtLAa8JigGRoH'
'39jOBQDwIRt24c5j0aymB4f5sUIHaLAUZy3Q6zGDZq3SiS94q3eAORXmb7CrzhXLWjICzoNhXT'
'uDjvKa2ml6Gml+vN1WjfICM42DsQbjhD7eapWWU4Tv0uXu71x9u07u/fN8QSIr9K/6HfG3k2In'
'abl1/FKHm7e9Y80O+kmdj/EzJx2tvVDON2WNMSkXuWMuXpl3pFqu8nEqmf8UYsScukDNeMbB55'
'JkrKc+a9Cl6rDIep32SZeFEzjFZpeVUbJCc7c2kBTXq4FGlotVF8QSJqAxeRlNN6kfVI2zlv2N'
'gKMrJBJqL8jCOryGt6YLtb7s/iNZ4FLLNYeayFhgzwDMHGHvCG0+wp7vXycTtotVkK8xX9o6i8'
'HCkZ1nL5Cv4s/qtkwDke8PW9M5rC3D3usVu93akBPNuuS6/yLtsRNQnJ3k6z3myHrU2ySGjcuq'
't9fztwEZk757bWWCp7Or3Aw4OFrw2on6f/sqVP9Xt7d1ozOy5fWv4kwSthi5mUr8gvWhH5RrAS'
'Nmg1ZMaHj93wrFZl+RReqeg3iy/y+kRFA8PhZ4cBa6nC7xX3e4P4V8tGP9NcAAByAUObl0ktNF'
'ub/Q3BqoWrQafRXr4QNDohCzwJlgBfAljxoLdLr6o6vXM/a898RS+0eUDQ/X0xrWURTe4CAO7+'
'1m7FfWDn4fWsJdoqtTUhUx809o0SgkJlWIMXBFr6/azXx4plxNu1dM/ZueXZhXMnTs2pTHHY8x'
'hw56mF6SWVtb/nzyzdcrPK2RfOaUCf2+CmYypPAjukEcz/zNwstehPQ6jNQHG3N8iQEwsLp1TB'
'4lxcqsyfOakGLc6TlYVzZ5VnMZyeW1ycPjmndtkWJ+5ZmltUQymyqIvdtou5M+dOq+HiqLdbd2'
'GIGOkCEaUqIURjGU0BqEWxNOPlWQxJ3IdPTZ+YO7W8cHZpfuHM9CninYVV5v71ufnK3Czxz4Gd'
'nZteIliuVPX27qRQd1xCjixkLyILjKtbFkpfyXp7dthUduzkDi+vZVlvsxM77k4s2T1bLb/nmh'
'q5i5gaQNEjsC/vUf56f7zl2eyPDPvxNoH8DpvAbd5oD6JnrYx/MePtuxhznkElZlMq8bZuDl59'
'8UnometHM97lO5uUO9LwIq9/IySX3ZhVvXvXaX7cPdnylrvb5y5mF2pqeih9fda7bEfkOxJ6wP'
'Pqzc1OW5tOWhMPMoSVF7Rsp22f5/i5p0Hc4PkJoX1M6HMvMtIewbzRU9VGPWy2l+N2Kww26s01'
'3moKx/OrQSMOKyP68aJ5ije0b+i80Z96Qz+2b5TeOOjtcgzw4tXe0H3BhWDZOFWaE7sAOyuO1Y'
'3eXm5CY6SOqo0gjplpBW5axLMFPJoxT4rP8/bwGxu0N9U3G+Ey3LyYtxxL2ShanJYGoCgms/AA'
'v7YWNsNW0A6Xw1d2qO0yueTL60G8vm8vEJzI7stUrkTDk9JujptNN2t3UaPice9yxkIcoQEvV9'
'fD6vnlTnv1+fv2u/0zhYvcZgZNzlGL4qI3hMnYqD9ANEct3kOHd1BNDgfLC/LCafI/jucXz87N'
'zVZ2GSx3Ri0I1FpkGbxLC9RaZNhLzKpW9ZjJN7WxNpViVrV6UjcwMQNaD5clzHJfHO0ZZfer1O'
'Pmdu+LxVSPm9vdr93q7d1c3+x977D7XpGadL94HXvmrbBKc1Xbd4Xb3HlQLJP4V5fDZrBCEhO0'
'6I9430Fu3NdudciLqFbn+OE0Pyse9kajlfuqWiKXCc1q/f591zJ7R/CA5fEsg4sThDteD1qbrJ'
'Jjmoxw33W6qYafMWCsiHirvto2GA/pFcEwwTbuKXAi1fE4NxsmuNsvbQZomXQ6oQ03AiY93uxd'
'jkak6IJa0A6c1pPcGmw/LQ9TdLY6K9tWsKY0nYAZ0bpkxnnpuDfkyn1x0NOSTwYJGUEzC7MwX1'
'42R7YImVGn5pfmlivnzizNn55TOcewv7uvcL06VPqLrDec9tSKL/SuMGGVOGwvb9VbvCA3Ar05'
'WvnZK60Ww/ZLqc2d3KR4yjvYjEgBkOIIWrXlJKC1HFRJIONIb4QWy1XNaFEaJzvEtDTtEt/cxc'
'SXrOuNYJPkt93aZvu8UCkQYA6//0XcJOJmQQ3S/w4qr/TXOW/Itdfh/lR5x8qwTrvmaa378gy2'
'suP92jiu6DdhRkDYQm2MFCryq3jS678vZtz9jHun2J+D++5FRj549+LymYXK6elTFXm9eKXX1w'
'ge2E5vegx6tpNAGBCgS281DLqEi+GIl2d+FT1POKaeUyx4fTMLFSwIWgEaunx2fm6G1kTpeV6/'
'ZgIWi2UDvaR/Co6MeXru9Im5isqmp7pP5UsxrULHDv+Xccb/Y8bb5djVMIiCRiPaWg4a9SAW0f'
'AYNA3Is526f6Elklf9pUcynuo2bLvIzPyvJLP0tow3nLZmu8i7+n8peV/OertTNuyzpe6V3mi9'
'Fm5sRm0Ez5cb4YWwsa/ESqM3qJjqoTyfvHcKrx3fMz87d/rswtLcmZl7ls+defGZhZeeqah6V7'
'NLuOzPeqqbqOIV3k5k0cre442cWaA9kTbGuTvvnJtZWtRxD9t6KbXAS7+R8/bsQAmpce2xaCdq'
'6tlQX4bNcJZcSXFwyBYiLjXb9dU62fPaB9duzEgC1yGlSa+4GelDsGXCK43h1vRVlHky32zb1s'
'1wLehqDWWeqyjzxLYm+6UWdWDr6XbYOzKVXRpmm4gVn0S9hsgUY5hucsgbCdbWWkBuEGm/ZNiC'
'ueHY3V7B8AFbNThBphM721kEwprmIXVaj5eTIH6Wnhcqu+qxDYCWHiWDJX0IQb5LoRGRkOuTds'
'zN+DOcW5RPSfuKfXPs8xmvYMC03fZtBu11Rpc/kVWZCv8GnCzAJouAwPEb89oIgxo7PdHGBs1k'
'bOZV4DMCxllYuxXUG6m2fdxWmQe28XHvSoO3RlYoOVS15KV+Dm5cIQ1m5bl5t/QXGW/UuGk1y6'
'zTnhc0m1HbZVevKPe8V562L1UcBGMbnpc8uSjbaJ+SEyY+ptSOvadB8OcQflkJ1+pNiRvrHyb8'
'0mfDLyf+twx5bNFGN70nVFd0Ib4r87IXrdXb652VMrU/shY1guZacs7Kf1SnyJ+aWoucU9fbkj'
'9/kMm8M5s7efbEY9mxk7q7s4Y9lXC1EVYx5Lv//ImsN6gOqeeoXxlQGe+xkcIQ/yoe+9SQzy9U'
'o4Z/orO6iuP+KV+jOhT78Ch8VhjVdYSgfG1je6msixufLy/4881q2b9IwsV6u70ZHz9C4yA9GG'
'1SR4ZBGPumEDG1ook44nl+JazVsZRXOnxWj4P3ThwiuUISNgBZqTeD1jbTFU/6W8RNP2rxv1GH'
'6NyIaqSu9IKZ5HN06nmj3iapSc7QOdcBCRurEawSklMkBtTqOhuAXiI8Yfs4kYT/DncRxvkCbg'
'rJRiduI/0ikDSQYCW6gEfCMc8nUaSNe1Inn5jsDLfHZq2LHCTHNIL6Bg75L0IEdebwwhBBY6x1'
'iDBLh5cQ8s+iwzNJLrWo2sFaDswkHSH+R/Sk5ZOkkJ9Pm3zCap4geuj5LvV2UGfCOr8JxFC4IM'
'iVrWaUPGO+19uxx1kcjCoi6d0ItpGIQ5JCxEc+LUiChhAKImKD9mJf86SNpI8WbTg1f5UeeCYV'
'aLW9BTERCfLjzbAKCaK36hCsFmSnqaUojpl2z1+6a37RX1y4c+ml05U5n/4+W1l4Ce3cs/6Je+'
'jhnD+zcPaeyvzJu5b8uxZOzc5VFv3pM7MEPUOW/IlzSwuVRc8vTS/SqyV+Mn3mHn/uZ85W5hYX'
'/YWKP3/67Kl5wkboK9PkJs8tTvrzZ2ZOnZslP2DSJwz+mYUlzz81f5pc6Vl/aWGSu+19z1+40z'
'89V5m5i35On5gnz/se7vDO+aUz6OzOhYrnT/tnpytL8zPnTk1X/LPnKmcXFud8jGx2fnHm1DR5'
'6bNl6p/69OdeQpaJv3jX9KlT6YF6Ptk1cxVQ7w7TPzFHVE6fODWHrnics/MVMncwoOSvGWIeEX'
'hq0vMXz87NzNNfxA+yhIiieyYF6eLcvz5HreihPzt9evokjW78mbhCEzNzrjJ3GlQTKxbPnVhc'
'ml86tzTnn1xYmGVmL85VXjI/M7d4m39qYZEZdm5xjgiZnV6a5q4JB7GLntPfJ84tzjPj5s8szV'
'Uq5/jcZYJm+aXEGaJymt6dZQ4vnMFoIStzC5V7gBZ84BmY9F961xzBK2Aqc2sabICbN7PkNqMO'
'iYk0pGSc/pm5k6fmT5I1OYfHC0Dz0vnFuQmasPlFNJjnjkkGqNNzPGpMFNHl6b8d0Z3k+fTn7/'
'SnZ18yD8qlNUnA4ryIC7Nt5i7hedk79qWsJNsd98+TIoia/ypR7P74ixnkvyRo1YIJWucnAqxM'
'gkSkhEhhNXo3INrf6mukzrap+WLQvI9W9Mn1cCPYCtqT/t3h6qo/GwZQ56SfWNPEvAhJL0i0J9'
'bKidYztnKbJqb1Vbhab4qCs9l6epPm1oSLFgBZivWaCzYJfpz11YDVQRqM3JZ2YxtqJvB3SFDy'
'rBYJmtuiE5G4gi0UynI8LK+VbZuWNpGg0vw6eQ2tdjwhWYMTtFfvo78K6gb66xyAhV3yN6CT9N'
'ckQzPyN6BT9NdRhpq/8VeZ/rqVodfJ34Aeob+uZui18jegN9JfBxl6UP4G9Gb66yrv5zP096D+'
'MdZ2swk103gDIoaZSHcN+xLUKZLWyN3DVt6yJsrUCsTC84PGGslFe32DpCBqHmr7W1HrvF/rwK'
'D3V6KoTZtGsLlJv4g1DU4rfD5RcFxlxu5lCTCGBG0KG5s0JS2eOH380jNLi2HbpAACP8RDU+9p'
'UaC58MkHiG1eIfL3nq8K6kpvmH8hr/AFKqvGOYdOJz32McRzIP0E2aWe60AyBDkoeYEmVfIF6n'
'p1yDvKOY+305heRmO6xuY8IpETQ2qQ3+bKpZPweDsRtt97oU14fBGRsb80qcUXO+YkMbzBThZE'
'tkXcdEyXdisM3dTFPn7fheQJ4iYQYhgvUkV1eSqZ8UXqSjXmTdlkxjsIy3NLB3yW9dJqFBFF+K'
'e8ErToz7Bd7cpbvKMnb/GOnrzFO6jbK1N5i3eoq9QB71aBZNUJwnL12CH/jLEUZEJ5Yem8Sasg'
'HAKyRMCJFAFZIuBEigCM6gQRcJUDyRHkoPK9JYHk1CzEYmzW59QKTQLkS2cSJnQIWWJL2bxBbY'
'6xxeZQlyPqZlPU5Yi6WaJOOZAMQUZVyYGAmutIuF4tkD51krBcP7bRTR0Coc+ONtKSd9J61sbZ'
'FLsI0OUb9bWWVm5Rs7Fd9mcjWJmwxpxh9AkBLiRPEHcYWAUnaRi+A8kR5Bp1nfcCgeTV3YRlcm'
'yCXY52tDnFAaOUinc3AoeEPJFwd4qEPC3Tu4mEMQeSIch+4lsCyRHksLqBlz8g/erFhGXKtugn'
'vC9O4e3nNmb5a0iGIAdFaWhIjiA3kN42eAfUKcJSti0GCO+pFN4BwnuK8F7tQDIEKakJB5IjyC'
'TRZ/AW1OkU3gLhPZ3CWyC8pwnvQQeSIYjv4C0Q3tMpvINqIZXoPEh4F1J4BwnvAuG93IFkCHKF'
'w5lBwrugribZ/X5GQJ46R2iOjP1NhtQ7z2PYqCV52yalKLWv0rLpaIfO2hiOf0aSe0/UYT8hDl'
'ZD2r9b4QY8NJYTxLfI/NfdmK15PWjhcNxvdejpBjkvq51mVXdcb2+bxZNsgeRDTzHIpYqQN2nz'
'Y+OHOoV1I74w3CcS36jhJo97xMFzKQ56xMFzxMErHUiGIGPqsAPJEWSKZvgVAtml7oEGHjvL26'
'NOdbXKyNlG9OPOpqx9DpXpzPMSNztWYntL/7ip5FC6iyi9J0XpLlqa96R05i6i9B7SmfscSI4g'
'+0lpTxAkq15OW96racvbn9ryxKDj3HvZ6qB+X05b3RUsfFne6u7FVseos7J53WsJysrmda8lKC'
'ub171288rK5nUvb14Gb0a9grActi2wO70ihTdDU/IKu1iysju9ghbLdQ4kR5BxWj4Gb1YFdhFm'
'ZdMJUnihWIIUXlAT2EWYlU0nsItQ/64SlhtsC2wX1RTeHOGtWiWXle2iStNwvQMBngkat8Hbp2'
'pWyWVFf9dSePsIb80quazo75pVclnR3zVWchqCKzCrhOW8yiUQemuVNoF9nm8hmN811acOlIYQ'
'BGh04jpvinvdFkQR2gx1QfME3a2KXdAMQfdQH2lojqD7aVN3e86odcK6n3qeu3/nniET6z09w2'
'pZ7+k5w/j2kMyloTmCQu6KDjSr7iOsR1ItMRP39fQFWbmP+ip1QTMEvYZmMQ3NERTqwcxtXjVS'
'soiNsZGaW2yMDat2NCRDkDFHFrExNlKy2K+a0Nu2BTbGZgpvP7dxZRwbY5Nk/LADyRHEpXdAbc'
'J0sS2wMW6m8GJj3EzRi41xk+i92oHkCHItrdIPZoQ9GdUhNPer3Nj/kfE53w5a0gQw/Xaw5uus'
'uLjsV3aA8oYCW8cEn6DgeRuJEXyy5wU+pxRasypGlMtcgEoQSyPCyeFCHxexGsFm2bNLJcMUe6'
'QGfQvBUrnwNEvFWPYXUkJkrPsLKYE1Fv6F1FIxVv6F1FLRlv7W0ywVY9xv9fSMpbLV03OG8blL'
'xRj6WykVXVDbMAHtxMKe2U6JA+yZbRKHfQ4kQ5Arxa7TkBxBYNe9JiOgQfUqQnPdWCeZE20TcO'
'hx0t9ar1fXd5hzM+U7TS8CC2vEl6aObeJlbQqEiZxFzWpYTqiHHfWq1HgGiV+vSm1lsKNeRVvZ'
'QQeSI0hJXevtZrX7c7S3/lJGZXj/hKj/HO2fB7wK/4LQ/EKG+jg+Nq1daHi85KqTIRMaC4td4V'
'oUxvDJW2E1WmuST++vhy2S2sUwtK7KqMFJhDNWF9QPEHaJBJQB6KC6yQHlALpFvcD7twzKq9cA'
'z5Vjp/0ZTm+M2aVnI58sug7RuZFQ2UxWWmwveNr15VI6orHT6An/bpKPUQMgohg06oCyAF1GrZ'
'7H29/rM8TSJ4mlY9el7JXEbiRKAudmIyYCeyK9WKCVU+SfCF+8AYP77QxtgqMGRs0IOqB2eT9j'
'QZilX83Q6tk7NuPfCGPSigziKmGrhcuunr/Qwp1VerwV1lv6GXGAprJeJTO4FQYxDss97zIXM3'
'XIuEe6wFmAi2qPt8cBZ9Svoe2eVNuMAQ93gbMAj9LqfpUDzqo3MoqxNf8MrZ6X1ddeRoqUfEsy'
'z2tl3z8jZ8FWt7aD86F/9EZaX+2Q9C/f3nVy4P36KqlJ85Jjujbq58nA7xouhvDGXlqFLNDqDj'
'enfp3ZnmqLNfTrvRwj+QUYHHOH26fe8lMc7k3HfrzhQure0jtc2Nxv6R1uXj2Itpel2mLDZ7Dq'
'AmcB3kPMcVH0q7f2osDe/tZeFP2E4q29KAbUQ2hbTLXFNs7g3V3gLMCKlquLoqAe7p03qP6He+'
'cNMdaH9bz9bcaBD6pH9JL7C/I+g7WpWtiob9ThHtnkAVpyJ1tRZ5M9FKy7JJuF/SXsDolXRUpB'
'+09l/65oi7y/1qQOf9/kIezSCO1JWuzH5HqSLonb9UYDWwQyWxG3Y/XMm8sad7zF/iq7megRF5'
'ND+lM/lN0oiP1O83wz2moKpEtGsIU80isjg8SXRyAje7xjDthT70Dby0sH/FNhc629vjNjUqjg'
'qr6jd/496uEdmP/LvHEHvEu9UzN+Dy2OLbDtgr1xn8YLx/KdvZTvIrzv1JS7QjGk3tUrmkOE4l'
'29QjFEKN4FoUiL5m717l71t5tQvLtXNHcTindDNNNrbFi9B22vSLUdJhQMHu0CZwHeS5aQi2JE'
'vbcXxQiheG8vihFC8V6NYtIBK/Uo86J0BfRLnFJLOr7uIlGE+9FeJinC/ahmkot7VL3vx8A9Sr'
'jf14t7lHC/T+M222VGvR/b5e+622VGQwfIrjxiQdguP8AcGhu76HaZUGGM3Q+ktU5GdsEPYOtP'
'JkCbux9MT4CxWj/YiwK74Ad7UWTVh3pRAPOHelFIa6AYYSAG+HsZDnuMGgBtNL+XmFx9Ytb/Xo'
'atxQSUAQiRjwSUAwh2tUGeUR8BpqtsGxjvH0kjh+X+kQxHbRMQvzhKg0pAOYDGiFCDPKs+mqYc'
'W+RH08ixjXw0jRxUfRTIL3dAOYBA+TsyAsupP9T24q9m/PlV316ohBDEYVvSHRCKM6Y7QWmTpb'
'YrEWcn1CX1wbzpSQEG86492mqyj2iv9E367oVAeH3JhcFyMjSEZf4wPVrEZf4Qox1xQBmAUFAj'
'AfHYriAr9ONZgfWpPwIqf+x9WQ7Im4AZBoCb2rpqhRBej1OZFfibA2s8ePPE8/leWaxzNQL/UP'
'nQJKx/BF87jcb2FG7TIAcQpssCDjW36qiJMnPDDVMwQPy4GuGAzvNbnYYYJiYbg0z2mu3WH6+X'
'qe/VeivW0Vp9PV3qbIgNDbq9ZFQ8D0ELOWRclqOZtGPHhbbPSRwuY0OOGIhjLs+3VyYmnInoM9'
'xzQXmA3AUDrfNHWDD7HVAOoOeSy/ULRuzy6o8zXLdjk+ch8UKenvckR4gsy01esHQerPagr6LG'
'hbCW9iSDZpPMe9rOrXA640Hs5o/T48lrutzxwJj74wwX/khAOYAOkGf2USNY/epzQHXt2Du1YJ'
'Es4dKOkScbgU+F2ds0Ds46ImVrUoJksGymyqnRShQ1wgCsKeHmTglLpcTZwCVpoVM4u/uR68y6'
'Gzxhb3Acy5h8sGBTcwuH7VvB9oTpDEZ0F6IZ216TpZPWuKX/otv9o8eez6ImjRAcX5hdGNfZDR'
'PHdRLDFPkd2oa/I+E3wlyfS09BP03B59JTAGP4cxl22RNQDqCSusZ7rRGpAfX5DB9jXsD6ZP2D'
'8EEsRwu1kAP5gc9XuI2cuOfZNFWHYj8ppeDpozvfPUqp6zA7HA89VWQ78huOYCHI9vn0qAZoVJ'
'9P62fY55+Hfr7KAeUAwsnoj8yoCuoLelTfyPh3Ly6ccZaEIarMcQieGtHaiKz0HO2XRW150C/k'
'5wa6aeCX7DX2kkQGoBMS/JMyTH5EHOKekK2hLW1oq7Kr2upt5EKGyKTT2WI6LQ6o6AHTT/QR58'
'lKbbPqq+NAx6/Sk8ZMELuLFBGqL6R5WSBefiEtIXBUvpBWOghSfQFKx7cb6aD6Iq9R2wbhoi+m'
'kePc7YtAfoUDygC0zxE/RIy+qMXv9wcF5qlvZjgG9u5BZjSt1ESXBeLI+CVzBFcq+y+FQrZPrH'
'yZFmAk102qnkdKIHREFaGvtaBVo51Cjunxkhz3aIRd1QNYkya06BfMlrwSNiJIfmSXBZI5kE4E'
'7RT5UaNmyKtKMIlFwVLDyPnaEhuKaZmEXpbFFvvanQrAihKO1dbDdr1a0s8nJXeyhz4k95Be54'
'xSXnLjYVBdNyTZIeqX1kKkAtH/oyPbhe5houwvGogQFdMugqwBe2BvjiM5yqhJqunzeHODlnXl'
'9Nn5nZBZKwcRJXh2q21sPbRESuRgNmSknHrlGh7av5zsmTUTPKzCO9UdxyHtZxiRTi6exERhDp'
'pRc4o2kZCd6DRe6p9Uu8yRnTXrTWPdscOM9NQa8n5FW9qukKFeb0DStmi4JteU9cdWC9mhiTxj'
'VyJm8dFoPTYJYNSUzCAWjhbSx6Zop+KEJCd9SB+pMltIKcQ0KZw3sUXTDgXeM6buyaVRxtGkHy'
'JUjBPotXUxwDB3rfCVHXL54ehHPXxYkuUZct4qTda2pHPQ6G3aVTsJ8tKgDmFNhjSt9VUbwKy7'
'Z680CpRqIwNgkmQXlLBoyEyY+fS6BwHB4NjGsS65jo0UQlnrNdII1iZd8rYJO/Lhtu00egkStk'
'tfkS508QpHw+Lk/JtpJeiRhv1m2r5GQOKbsK8POqAcQIibX8uW4HcQ5f0fiPLuTUV5zfgQ1IU5'
'9R0d1B3hn/DMvpv4N3nxzL6bUJQXz+y7ic7Pi2f23cQzy4tn9t3EM8uzD/RUovPz4pk9lUaOY+'
'mnEp2fF8/sqUTn58Uze0rrfBzB96vvY7hvzNJwr3SH20wcCRkz7JfvZzjdbIR/Ysw/SMbcL2P+'
'QUJWv4z5B8mY+2XMP0jG3C9j/kEy5n4e8w8zfEpp2mDMP0wjx5h/COTXOCB+8Vo14YByAOGk0i'
'DPqn/McO6KaQOn6h/TyGEO/2OGs1cSUAagK+Q0o1+8UQIhf+UjfCbOZ9z/PkuoXptVubFHsjsc'
'LBq7WodxnSNAievudKyIO8T1rjNEzNGOB4hd54cIQYfmyoNWFkjFJ8Xb1qbN01a00l3Su9jJ8b'
'Y5ZfTjTnXdPGJdGWySLtxs1Wnxco6NOAVMsWTZ1Jvtm455pA42yGItG3brNABim0fidLUFQcZe'
'k93hdPMytwnNHTca6gLnAd4tUZUEnAEYB5xpcA5gnHC63WfUL2XliPNi3UMuf6m3e0RKfqm3+4'
'xGiVPONDgHMBbABzIiWjn1KxCjG3c+nL6oDKUfdMuSB6uV17Y875YnH05DoBP6LipbniNcZiUg'
'xMEEu6B+gHaJd9AvIQ4CHVA3OCAeaVkd8SIB9an/PcuO6L0JBQnRFz2PbYXajN/xyNXb6czV0N'
'BnunRBeYBczYWwAIGMD9cvYQECQaHeRD8G1JuzpFAfzvYk8mrKzSmh1q+iWuFE0WsFsfIHWOzf'
'krWqdUCE/C0JgQMi4G9JCBwQ4X5L1qrWARHst2Stah1goX4wawN9AyLCD6aRQ3wfzFpfb0BEl0'
'Am0DcgYksgE+gbYNX6UJazo00bzOZDaeRQrQ8B+X4HlAHoKtHJA6JaCYQM6esLqGPwNrD2t8Da'
'y1OsNbWSNTfhRr0N3Hwu01Rgbr494WZBuPn2hKaCcPPtCTcLws23J9wsCDffnnCzwHQ/kuV8BN'
'MG3HwkjRwb1SNA7jsgfvFqGl0CygGEnASDPKt+M8u5L6YNuPmbaeTg5m9mOfslAWUAQvpLAsoB'
'hPwXnOsMqneDm78Hbpa60u9wxVpHGdKchQ/57iynEYzwT3D2PQlnB4Wz70noGxTOvifh7KBw9j'
'0JZweFs+/RnP2NjMAy6rezHDZ4XYbUL2fz4/qHTuznSBprAg5u0MLm+M2OMTSyWslrI6dlp2J8'
'NqxG5minbW7RrcJNiHrTEQ29mOXfTg8Ua+a30wPN6DEUZbENyiwT6KCs9kGe5ceAqWTbYJYfSy'
'NHtPGxNHIw6DEgP+CAcgD5NPEGeU79DjBdZ9tAUf9OGjkU9e9kOWMmAWUAulJEdlAUNYGuIUP0'
'LgH1qQ8B061jt/rz5p54DK9Ce9q+LkEFx1IXdTJwc2knIaHP4HJBeYCMBT8oGphAyqEKGvhDmq'
'oEVADoOnWLAxoA6Eb1PEt7Xn14Z9p1faxe2gXeSzuCsB9O057X6F3a4TV8OE07grAfTtOeJ9o/'
'nKY9T7R/WNP+YSRBeeq/YO3+eU5ljp3xb//n/+f5UlHBO/bnu/05RCdsfnOSVq/vtGI7XQ8uWK'
'c5LvlBW9/wdteh59/HDqO9gx0727X2+fjiLC0x2pZNbKLmxw1cUyXjsVbHxTCaFuzciDUE2ybO'
'6dMW2o7ZHdc4YIGSr1/f7DTY+7dRQzNVsIFNIhEQ7ZxIFMQ9iUShf1hY4+CSUTptjQ2yHbIdIm'
'EifZGIwwR1aJc46nLD2ZLinKskXG4ssg0cCzTjNnnEOsLBmVlclp3TjMRpb6aITE5NqkGjweXs'
'e8tclN1QMGs7nk+rLOX4q9qK4phjP70s8F8a6pMY7btbbEhViPQs6CCtw6MtPrkJScfWG+la9J'
'NglZaOzYjo5U55DmNN2koYNj3NN51hQZxEGwc7VD+zHVNtqzLY7BqWlfRsy6Xh6noUhxzT01ee'
'4+Oef5jDOKahpowDxSZ1j70ym/GFKSb1QM5PQ66dk8DienPUWiOpfEBuohNKPoe+f5Psc076aJ'
'g+JpmRkmwoJD/vRvoPWNrrLQ4HvQD/mcMiOe3ajjr6xnPcaclCAKhmr+DoafBlZhk1U2GVmlBQ'
'fppRr8gVpnhd2M/D17ea+FaDxYaecEihrw212edr12Grh1MbiMGZQgdTa41oJWhM2RmcaoVruB'
'2+7Vwk5cFHxmZ30mltUu4iEmi2zVVzDJ2vfN8XkmPLxj9ftwSehRUAySuYmvE3G521enOCh5J6'
'ZStciettHFKS8XOB6EX+zYRc7GjhXKUZARkfJIPt9282WB9FW8x2rLUmPxHO0yI+F3c4JIfnLD'
'78ctRkXnUPqczXVHSqI7ywsGeepNwBEOEo2iJgsvT04tik025LeFLURdxZmUqlRPKRmF4RZnnH'
'+l4maT4tdnzRPxahDZCqhMXh/7hlHYBEQ6+JjJqS02dE3vmLFXx5h7cCrT+gEzAd9hMIRhARP+'
'xsimQEHSKfVleVORzEfIVYDn+MjGjTFZG//wLTdY++ZuSx7fol7NnXjD2RIY60ddD4bpp3X8rn'
'QWW1YLIQZtK9ySYlx0FEvcR+hTrSUY2gyhvhiW1zcjnpXLsxiLF+Yxxnr3SSkEy02oaaqzedKI'
'oNkKbet9HUJlLAMHY+vteysUJttoJWLTZBFjGStW3iiY3+pcRc8cRG/1JiXXpio38J1uVzHVAO'
'IAS7/lNWYBn1FaC6Yex3s/bEErqZzyWFc/b4zL1czRUXcLFXTnBFjXo+1xTV79raEY5Db44yWd'
'1iS4x0Dgcb80c9WvKv7NRpK9UXrPRZNanV0tQULZhlro/JX5gQQdFqQILW4zzjtPkZSpOjB30D'
'PNY+gWh8plFP4rZcL9sKuFuPdUO9WocpQ1tMJCF4PSoMY8KZEPgSX0lPCHyJr6QnJKN5XRRv0B'
'Nf4itZvrjz3pzAsuobQHXb2JtzMiH26CxsJjLOlpOINFdPoTlLjojotc2A/yyDY/putNYEsA6T'
'e5eiUyblrIA3Z1my2siyNop7y5rnsKO/shKHNvsDtgaJ9WEsdEkMqZN2q2neOXKhkzs4936nMr'
'SE3qZadr2pezEJlp6zqHF2K/alPunlajhrYTvxF8cnjHccINmaUOAosLntOQxxbJIuBsh1VDN3'
'cJ6+kZ50+CTfSJwGT3y8b8BpuMYB5QC6XmIsGlQAaEIdd0ADAN2sXsBZeh6/9iT6mxvbr1eQaE'
'qc0kGbrOssPYMAbuKTafqQmP9kmj64iU+CvikHxB3dqI45oAJAN6lZzoYTkG53s5rxvm60SZ/6'
'Hrp80dhfZp9GeI89vfRap8Bj22lLn2fcXw31eGGI8sEhAtbtNr5HBPW7EWh3RLaRgI8kzy3dOf'
'V8j7NEiJhXdvi8WKsAzlaRc2LPl1qlTnkDTVQtsoY1tbJSGTh3FsTu0zKJYqSb7TjpPN13bE4m'
'Ze6Q/9AkQ4R22ipyOLSVbc1T7rsBk0ausTrz22eY7YLyALnzC4f7e5jf6x1QDqAJCeF64nATaF'
'Ld7oAGALpVvdA7yyBcfPgh+vt/cS7yQt+W8rLbhJy27lQ9wrh6McHtGPS1CcI5QNv6vAVhZ/8H'
'BOSHSzfbXpJCCowHmMU5m4TCqgcNY6fr6L5FRT0wssEucBbgIbXbO+WAM+pHaFsc69dFDkpHOI'
'8/Kcu1sMk7iM37FgWotYrJ0LbYDL7dXeAswEh+d/vOqn/Kcv7tre6ga3BWIKgioKfq7dAWeeqS'
'DLcTjIXxqS4wd4Pc/RGZ6Lz6+Rz2GzvziIYwyHNA/QCZw0lPoiE/j+9gXeuAcgAdchQboiEEch'
'UboiEEgmJ7wqiNfvVadFga+3+yiSl3Muoy5GjJcg2oH8eQIyUU6dphkz1IxSR1S0qla2extTzF'
'iU2odeLmAVpLkE/qfVSjm0TsyVTNKutXnYQk9y0y99paflKbK28xgoYP3nbG0k1L6mVz/92qJ0'
'dlINPttemZRabba3MpOwUnxQQyMU8NygF0UF3t/WWfwAbUgyw3Y5/p8xf1vQcpI558tS4VGsI9'
'LxgfqJjXIS7c4fslKStesq/ovF3ONDBVvKCum9tY7uQpwLfwK2dn/Hib7IsNHbLa5peSnrhQCP'
'J1YAykt5q4hwwyGk3Fn5quokZ+vXYxUczHP9k9qK1Q4j3sxZ1HemzypT7TE5R7coeED4CDFjWX'
'OxEcnuoujujt0JnmxpbOSdFJPc5tFRrLanAh4ixArSaEcPkYYHpLdTmq/eaLs5T2waiF9B6t4G'
'wWJ4kZcj6RRsw1HWqhRBp00ACZi71ioK/FpGxqnUdC3i0nxHTcjQ2Ziw+mpRSZiw/mUhsbDt0I'
'5BpWyFwkkGtYDZD+eTCtfwa05EL/GBVYUG9Ff8nmh4S/t6ZJQMLfW9Mk4KTqrSDhOgeUA2icDP'
'oExOhvoI00AQ0AdAspXUPCoHoorYWRFvhQmgRcI30oTQKOdB5KcwFpgQ+luTBIJDyU5sIg7mul'
'ueCph9FfQiaSch5Ok4CknIfTJMA1fxgkXOuAcgAdkiQODSoAdNgZoUckEOh5RNX3jE+/S70DHR'
'4b+2rGn49T9cCM0N/h+fpbfRD3SKtP8p7J0IfSb+NWlaQZVvlrlfU22if3u2xA29P+EL7bybaz'
'a+dztK/etnuEsT6Qm7VGXntYu800x/ue3wiDuO2mWvLtLmOUcE9mCNrsbKRcetTjeEea1SjI8Y'
'40q3Fx6h1g9ZgDygF0QI60NagAkK+OOqABgG5QN3r/zrB6SL0rx8cnr/T1Jxhik1bHZ4f8PQbJ'
'pzThix1LtZVT7ryntSI1XuuxD4Fs5oYbnGEP0bDflR72EA37Xelh82WvnD140aAcQNc4S2+Ihv'
'0uyP0tDmgAoKPqed5bzLB3q/ehw4mxX3SiRpGJLvpVcTP1FyBEt9X5NwdF2c9EfMR5xdtprF2W'
'SVkrVZ0QaNSpw4ndxIn3pTmxmzjxvvTWjDtr78PWfI0DygGE5X5KQMPq/cA0Pnabbz8twczvIf'
'M2Q0lsQi1ioTiUDRNl709TNkyUvT9NGa7CvR+UlRxQDiAUsXqzMfRG1IdznBjy77JOiM1fxIc4'
'3E2a1x1fqe0NvsH6nTFZ2pzURHND5h7W1aHyIe048XeA4yqOZkwRWp0/FZmpNbc54iPx9sZK1E'
'C8TTv8khTdTvy0WIuANu0mdbYjk2jPTyTxXR/4eE/Xje0l4ecIzh/TLB7B+WOaxbgq+OFcKqA3'
'gvPHnLqa5OEXjIAr9TE9+5uJfG+ubz5buUbTHjnxdpDnWZk7ZK7ik8DOeBSN52Pp8Sgaz8fS48'
'H1xI+lRUbReD6mReZTZjyj6uM5Tmn/UIadMWdaOOajixybezpa2EmB7TgOS7WXkN0z2fZR2mZn'
'i9u9boAlBZlpo66sLUZoiXD4Qf/qQbigPEAuP3Cl8uM5m52kQTmAkN36p4YfRfVHQFUe+8Q/gx'
'/muzSWMV7vfD4jY5JYsMsbzzLnWfGmiAtdad4UcaErzZsiLnSBN+MOKAfQDWrK+4zhzR71aa1e'
'PvJMvDGzipS8DvkLP7moSFb0TyQs3HWvyt1DPPl0mid7iCefTvNkD/Hk02l9sId48mmtD35OQH'
'vVZ3NcKKT5ExUK8exhU7potDEMSgsmi8CcPrlVRZgAGsxn04PZSw7/Z3O2qogGZQAyVUU0KAcQ'
'qoq8Xk9wXv1pjq+JPvDPLivyk49Lm8uoQULEmBokntQgYdCoA8oChBok+hRrUP0ZRjAsWAYJy5'
'+BEbvllUHG0gPKGhDSR3ap/zunnqN+vU9lGCusQoIU1OXel/L8GxG0J3Ich/1sHrsAu1jOuWZy'
'p+aoCSyhlVuWYDV1IdKELZ1tkFvYw6opWMjEwpU6l9ezwcsu7J6gJ/8RJ6VSLyE5665rKzGpva'
'urnxzHqfmh2MeNIw/RUvIi+doo4qer4RYOxcOg3WnhqBr7PWYaez/b7XwZodZVb9jelTFR/vD+'
'gCsDpzIJfNv8zijyf1bXPJe1f5GPWfm3M7dv020dEbwZE7AR3M9PXp1O6g6dxA94KDpvAmww5O'
'lrGLc5DI0lLZabulPl8c3ItPCzv88pd2bcCBew62+s+tt0vVc2bkxyy4okfMf6gChmv6j7ng93'
'ecLeBBEPyJzkasWok4/aW5wO0G7Vq7ZQP89+iKKLVYmU2M0ldXFQqw8WbtIoTyQaRYPyABmvYZ'
'dEfp+A13DIAeUAOiyRbw0qAGQi3xo0ABAi309mBJZRf4cO7xx7POPP1uPEXXLCPRKNM58o80s1'
'5+Cp5JvPlJm8Z2Ixl8FfJflsm4vb+ijBYDLZPObIlPWXPqAkQSLj2JZuFQNNljStmXrYus1vhl'
'sS+dHrLLgQ1Y0kyQmcQ2TJYTFONP8uzWKcaP5dmsUZzReljjigHEDHRI9rUAGgm9WcAxoA6A41'
'633HsDirvo0Oj47998T1N4viknn/zsr7MV1+8fi9Z+3yO4vFsAHnZd9Ocxnx+G+nuQzp+3bi9W'
'tQDqADsoFqUAGgg+TiJ6ABgA7T9LyrX2A59do+hdqib+yHNWNv0xlO65WbTsxwzMZgky+ybGtV'
'IhxEoHXTXLk0VwstRKtRH5AXvjjcxifSJn3+fg/+fBHgy1rsb/eP3uYlVkrNvQ7ZiKLzMRdLMu'
'iE4NPBJmcF8zf5jIZ2tbT5fl9aLyctgoYvZPnnw20hoqeJJVg8vdv9Y9Ls1fofqxTTBHWNzvPn'
'u0oGcV6kTjCAInQCJ3peDPm38xZuZXUFX0qBvg1oIXMYAnNTTy0IG+EmpOvY8fGMxqhLZcjFE8'
'1w59hz+uw8G098OainwBEfcpr8KS70jrzs+qpvb1frtbDzXVC+y7SwNHfcFJmWMLA1p7vK+tNG'
'xvkbxmxhqdL1cD3jbOuL54JAFJ3J86tvpALO+lxAHBezsUhiorvB4Oicl4kLygPkrkscnRNISd'
'6+BvH6Qlnz6xk0qF4PPLtLV3CiAw7slu1xJW0i2pzcxYbg64F+SHBpQ7AHlDWgqwX9GzT6IqNv'
'Bs1oOYiX0U2COYNGLprMTqCsAVVkLH3q1/p+mhUGGafB6oL6AdrlqDKcWRPooLOH4MyaQKbCIG'
'Kjb+y7VBUGd7F1T/iNdb9LrHsGjTqgLECw7mGXD6k395Fd/i1jlyNsSZCC2uu9N8u/YZc/3MfO'
'+5uzzFX+CGci/eaEk5P7brihO01CDPggSa72LlK5Q9J5yd/FFNk0sy2sDrImjO9jToQ8yypcxt'
'8y9kbayUVytNUHSVpMRLux1OoGNdgWQ6RckqnT2iZjJzyEUF6LY8J89bAW8s1oToLuwGY3h2JX'
'a6kYEhPv4URQNKgfIHOjaUhMPAKZ6kJDYuIR6ErZLIfExCPQfklxGRITj0DXq0kuU8WfiVC/gf'
'5+q0/KVJlPRxAUZaqutSBM4tv6UPlqbMQGTTa4SDkftdtWuMeEdt3gLMC7yQfc44Az6pE+mylg'
'gQZc6AJnAcZidVFk1W/22UJpFoibRH02DyABc2vkAfw3I5sZ9Sg4sH/s/8rKiueSCiIEktyhP+'
'WsnT+r4zdbqCSHTUhsS05pZv2GCzhwxKzP1iOwWorgQJX9SiAGCXVmsMPFwSecTMwE9XtNqack'
'oy3UYqmdDbmqGrRatLlygXgu28hblU3+a3SXwVtpRCtlf94Ur5jUu4g5s8QG0tbfeuH8QD4G1c'
'aiNqvl/FUzzamaZmQOJvWjaZGGSf1osqkMyZQ/ik3lcgeUAwgi/fa8wLLqo0B109gv53mu9Md2'
'bUaYhJnCJCd2kQ0pzTQbn5MLC5FUZJGaGe5+Cn/ffnyN+YH3brnZX+El3A7J/2jwdKzW7zd1oD'
'x/nB7dcvOk35F/Y/mXGzFA/ppAGR+nAqsZiP1irqfrwInI8By649FZXMal4okgKzHSdUlge9c5'
'Q0snk0GE15GwKmlWAVlIZCglRWukGowEw/3VRqRNd32rIekW0SPWnNt4aj/naz0KGQQ7xCk3kT'
'A7xMsFGs+aN86zks5/LpliX5yDEpKrX49azuUeVj56rjzffnWYb3mnLDf7KZC2DoOklLutFhDJ'
'ZLuZEDq3U+d2ExjHHzonH9kLZXIS1svzkANrQA2JY/PRtKxzcbg+G8AcEt1EoKLsskPi2Hy0j6'
'9xJ6ACQAckHXFIHBsCjaujVn1n1MfQ3//pqu+Mhg6QVp20IKjvP+jjJK8rJEbspDfoO4KXua0J'
'yR+kVbMpMfgHfZzEVXbAGfVxjftKxt0jqXEX9ox5Y3cXOAtwN/as+uRFsCfZ8i4a0PPJXuyCCN'
'i/OyQszanH+zgN/MtDJpvHuRe0Yl2yRvBAvbF9h++fCh7Ytjnc5sxXTKop8NFUUtc3XhC3kLo9'
'WyYVVKf1Op4nX1VjQ0X3Nqn1Up0LqEm7Q3FSRYy1r9wdF/qQyy2OgbZVdd6R3gB09FFWUwqrKP'
'JqW1/2SPAxsVJJj4ve6TCOCSdK5j+GH/C1Noy2y2dCjGu1FYb6BII9PVvHhg065AatoZRXC+zd'
'trfO0lWW2ra8ljFgzXUwm5bq2U0qdfABxzHurK2FsSmdlIqwBfwhOFh+9VBXKgvYtwSeFD2pel'
'xcrzpqSZjXURgr5KmfD0NdThBlBtYxFyQREk2QD6qksijrPWrJJDX7gU6NZYrlO0y4Ebcqx1oI'
'kTpnNzTLt3l8jimJ3lxGigPD+GRM4FwqJr7d2WlhGmCgQNRQvWYK35ex34fxnM7qLj3O16g0wb'
'dxhlbbZIKazoCNVTzGrmN09kaQ7pDFudpp6auSvJM1dDmlNEIIfb2Jkmd8pYrrCSEpWkp5aLEk'
'Lrpx5W7vvTfWUF0Pq+dteSJjvumbcR5vkDT/qbtHNEvAjOp8NCSIxXw8r9ctcn7HJ4xFl1rdHv'
'fdCnFbSAskF0qSuEF6KSKmzx/q5CWQosw5hmiGGDC+nWqu2EQmS3IHnOz4yD3JSCeI6vVdb+pi'
'WbL58XdY+aIHGDOJjSEMrOm52WltRjo/BozxzMqAEdPs3nElysvsjp+W356NydvKU235IFK97X'
'LcHI04eXvO3BhtmSaDUcsnWg/r6zCHhYw6f+03RQoHQQ9zev1h7+mapXWT0We6+lYqoLgOoxsX'
'QIXjjpWAMMvjaSsBYZbH0xYxwiyP99miqkMSZiHQFY7hgBsKj8NIPuyABgBCKaFvZATWp/66j4'
'PM/9XNL4M6u2QhZhP3j3+yALOvq6k+q5wyqRJuGNBnhuuC8gC5/IXt9Nd9Nrw8JNGWv+6z4WUN'
'KgBkwssaNAAQwssnBJRXT6C/ybGjP/6X5gxapJ8/kaY6rxG7VCP9/Im0VCD9/AlIxZUOqADQmB'
'z4aNAAQNeRoFQE1K++/lONdDFOGsXX06Po1x3tcriKVOuvJ5EuDcoBZCJdSIf7+0sW6RriSNff'
'J5GuIYl0/X0S6RqSSNff60jXcxk0qJ7UgccR+RJbzb+PjT5Bi3jmk0mIcUjimT2grAEhgLZbfQ'
'cBtO+bABqy3b6jA2gV/gnb/amf6lTtlljTU8lU7ZZY01PJVO0WP+CpZKp2S6zpqWSqkK/3vUs2'
'Vbt5qr6XTNVumarvJVO1W6bqe0lQclj9T/D0V/LCU+Tp/c8+vkhb5Z/g6Y9AtT9W0akC6ZwZkz'
'kQ4EgcAfhN/VFxCUDqzynxBzvbyVUpz5QOZrKGhck/Spg8LGe2P0pW9bAw+UdY1fsdUA4g1G3+'
'WkZgGfWLedbiX0y0uNS+uoTnhPqG5KXV4XzI7rANQSMeqwvKA+SyLaM5YlT4sASNCGRU+LCcwx'
'LIqPBhOYclEFT41QwaVK/JP+2BwjAv7dfk7ToelqXdA8oaUEU6y6rX5X+aa3dYAg2vS/MH+vx1'
'ebt2h8UPfl3ert1hCTQQyKxdZLT+cv5Srd1hXruE36zdYVm7DBp1QFmAzNodUb+Wp7X7G2btIg'
'GUIAV6/MMM/8bifZNeCl/rWgraVbzkC0L3c6nPzp2CdzL5I6JT3pRM/ojolDcli2NEdMqbksUx'
'IjrlTcniGJFDgjcli2NEDgnepBdHRUAZ9eBPVYRHZIk/mB4Fyo09mIjwiCzxBxMRHpEl/mAiws'
'gYfviSifAIi/DDiQiPiAg/nIjwiIjww4kIK/U2iPAHjQgj5/dtec5VezzHvyHCj+b5zoeT+JHc'
'Qb+E8iudXGrhNdegyt4xWqTwzo/7upKZrRVz1DdFYm46ZqqgJV8+0Wb0odi3hnTl7AyyDlZbtN'
'fiEJ6cxJeigE3UiNYgbfwJsYgcNPFcY+c7VRF55iS2jQthLGkEPsr68K02U8FXB3/4fhbX5l7h'
'22BoVgurdQnfmLO+sxJIAqITumaIiLeSRfpoIt5KFumjySJVskgfTRapkkX6aN7ebFGySAlkbr'
'YoWaQEws2WioAy6rGf6iJVskgfS48Ci/SxZJEqWaSPJYtUySJ9LFmkSIP/wCVbpIoX6QeSRapk'
'kX4gWaRKFukHkkU6qn4Xi/RPzSJFIvrvYpFe5v3XHP/GIv2EXqSPu9lZHGK7xMlZ6OPS52bJ/e'
'3/v63QUVmhn0hke1RW6CeSFToqK/QTyQodlRX6iWSFjsoK/USyQkdlhX5Cr9D/kWEYDtv/Izr8'
'dF7l0ul+ErOthVO6JMIUB87HUUcAkVSa47uWls5iTTeCZjWc0IJRCzc2I0TNJrnUXFOHu+7QbX'
'Fbusb3W7sjY0k09OTcEgRnRVcsoJ48IxI6nfjsOed50p0NzpoTh66DubMLi0uW0TqdgMY9oK7g'
'c3sNwtL6VF71qav4jMYCqS2Dr+gCZwFGwdYJB5xR/wlt95X26pQnXNGzVHopDBnTeE8XOAvw5d'
'TfCx1wVv0xty0dcrms62yaQoJc4EVPV5zuC4Tx+8NdYEaLzxgWRUgy6jMQiP+clzoVo6JzP5OW'
'S+jcz+Rt4dlRGQ+BrpLsjlHRuQQyBUxGxfch5P1yX3BUfJ//DCqu5Z1jlMn67E915xgVD+Wz6V'
'HAQ/lssnOMCqc+m+wco+KhfDbZOXBh6E8u2c4xyjvHnyQ7x6jsHH+S7ByjsnP8id45fhVbQ1F9'
'EVvH39HWMfaPWX/ahn3tkT3UVGDjCQlX7QGPZaJcJNVJ7TimD/RtfTMkqSioP15gLvnbNL7jx8'
'9KXUbc3+H7TLZMbBQ1TF3ZWJQtn+txKUMQOOt8d4Pvdcbl1CX4LhLqzdSXOvQbuladnHFo+hK0'
'x48LivEJraMIk/40TVezmWhzeykan5iQw00udMPL7JxbCtLWizTFJnWZNFyT+mKei/z/WZZ/o5'
'r94xCb/w5d+3Gd2eMWj0hVmEyOFLmIqNTIsXOpizWvSeEKnA/VovaUKTVVM7nq9Xg5KY5T119+'
'8eurq87bLsqmU2bSH6+FJBSm/I3+OhgmLCUJSFuLu5NFUathjmZg8mf9f1NajaLSpM7Refkk/V'
'4JWuWV4AGCgRgGvbJzv23iv9qhyPPxenlc3pkoo6Ws6KJUuieWevKFyKKtdP/foOpKrOoskFY/'
'g4e6wHmAd4sSTsAZgPeqA13gHMAoCux2mFF/BczXpNpCaf5Vb4cIGv2VXsppMCPBFbI0OAcw6s'
'SNMBij+zKk6IBwQY/sy4leK0rU9MvQa3scUAagvaJVijIaAiHFg7//V+ShfBWoDuH7f0vpc+6d'
'JXQSs7+1TlKH9cHZNmxsRudDqJKWh+1KlwTm8qtB7Nc6LZ2gJUd2c3LfR74IqNWC5A/LBwaToY'
'GtX02PFiz9at4mtBSFnV/N2xutRWElga6jfc2wMqueAKYJ2wZbxBNp5HyUkkYOLj0B5Nc6oBxA'
'KLhjkOfU3wDTuG2DQ7a/SSPHIdvf5G0mpQZlABqVi+UaxLhQvt0g71N/C0xJmz4D8hxQHiCXcp'
'ww/W3eXlTUoBxAroTl1dfytog2Awj519LI87qVSzkOgr4Gyg84oBxApoh2kTfcrwPT9bYNn8+k'
'kaMUztfTlPP5DCi/2gHlAEId9schvnvUd7ADfrefdsD7/LlmNdiMpYxxvalvhMntwY6kupsP7+'
'mcWanMh8wASWJDmfNG2FXl3N8KnMJH5Ki84qdZODqhhnPSQLjeUHDH9Ds6XvOaUf7N1fj7Faq6'
'P6Vowz8VScncelKDO/A366HOz0ijTQoz8qh5wLgE1SLVuxk1a1Ke0TnfTgpY20tQDlfrsdRslU'
'8rJd96oh/zs3P8DcGafHgvxBFs+rZnUnFASh/WN+rUK3BFDfuZLCmeOkmeAT4IJXfz9BDsjZSL'
'3hMEb6XWHD0y10te7fmnQr7SGEXnUT+Zy20nqdvJuBn706G6V+6q3Huv/Qf/d++9eBjIw5Uq/0'
'O88Fd9f2297sEftYWjbckrokfPp762E2+ShelzeSs//Z+7X/r+vwkm6xP0j3/zpH/jpH+M/td/'
'ObeDOt9ajxq9AyvLiytdL076N+NdvNgIVsIGuX8y+gn9SnWy1vPK88wr+iulmk3SPpxc7Wl/1L'
'TXZYaJn9J4bXK9p/FNtrGu0Dt+dMJ8lQdsmqJlYNgmeS726wM2R1qSptrk16/KV1QlJ4QLUPqu'
'0OvvREp96np7wrn/1zFJaboCIl9roWUm6c+xLt3u+4gy6HSrsFltRHG6RqtcCtS2GPKgXCHnbN'
'B2vZUUOObU6Op5f3wziuP6SsMWcufQiUlnSmw4p+i8NmO56LC+0CppQZZdWyjfreWLuWaPEUuJ'
'+1KyXOSQik0W5ppdTc2tMqbhtKHFCnHipdoLlejLMFRnA8cmHdhUsLX8cz+mw1+13Gyxm4+OdX'
'11O3wuOCefsvA3opijNtHKhXrUiQ1zzQdl9dhqJeFrsIZUMVOl2hQ2d2tyu9OQ/uQPPtyL6q1S'
'89+p+r3DqNOieijWy9sktumrVlxqWqQKmT/aBJe2WlY0RSIuznhCFOVdTdfGdBlofDqdgAgsKy'
'FthSxGYut1c0Zf5Y7Xg5Z2lbqqxptENV3tmt/hQd6t86l0Xliw04jdYcbRhmSPdbcEZuuoIoXV'
'N1/LYhRwAonaYIdF5JfWWlFnsyTuOStJLnMcaA2FkTkfAbArM/X1pqTIbiLRQJRsmHX94cy2UX'
'w6Ex9IpUZkvcUeMhm5NmHWfvqJGDWTFHDTnyfjSzpibTvLSOxi2rtXghWdIEuDr681OdDIZeM5'
'DktdRqbUjhMo0dV4cDF6EqY43wjRieVIpbO9VHWZOF9/6amKxLyk+i5fFRKTfI84IGyIuKB+gI'
'wDskccEALtlYTyPeKAEAgfcCkyCD7yI8D0nX7JHt8jbh5B4ea9bsDCYP78fj/5V5Nj3+13vzsi'
'3z9AGW+R5YtZceZGv3zNzbMM4E8fOJdZJHLtxKHlu45a40npQ05ZDLQKhRmp78/jE9o7UmDqPE'
'FdSV1NCVkmdhavcWeF4xYIoWeLgHfGm3hnvJW3US9R3Me1fm6EZVMbAZM8fhNtqEeO8HvmZm2Z'
'RzV+64S1J6gBUNoG2JfHk8fc4GiS/mkW9w5DTHWuK0S4LLyZqbT7cjd/Ui/f7t+MdOJmTzNNfy'
'/yY2nkO32vyDfXgY8J6h0/asRmSQ/6ozsag9xWrhcnGkNXXGCxSL5AbffdKPlwqERt5ldh20o2'
'lxaiRoDcUS2MPZOPmbei0bV5pw2+ZAcYtzVWreb3zHLRtp5RZnx/yZyfbtSrUSNqTsj1hj1OcI'
'XX4lAXOA+w+XLfHie4QuA9ElHf4wRXCIyIehpcAPgqdYN3RRpM/js9OKAOe3+ZdZ5k1Ce1WvhU'
'1txaXucP0ugoA5K9Q/0JkU7LmmvHpfB/gxTCpPxNQ+1sNCfxxcgaP0js30knxzmI4w4KOPDujq'
'9oW0QTk/yqxmO/doMzK7mcRgreKYKvZ4kv0skdveo2iUOSRQ6cWlI1SntQBZQPhK1oSh+xwICx'
'Wf6op8+7jRTUxw0BD6dokbZV5LyqVo9JE23XzfefO/oitjsTCL18sneWEX75ZO8sZ/REdM8ywj'
'Cf7J1lHAt8sneWMzzLn9Sz/NiQ8ySrngQpE2NvHrJfwFhkFxc76Ty5puloqa1a7awCUzU/4Np0'
'2/B/N8R4ZwOpnhznsZXA33DWOjpZSRIr4BdMSrv5jpNZRK4tgaIwXChb9lw+MujtFxc9aScOqx'
'2+zolmsa4pjLKFLH2ejrJ1vaX3fdteH9O2JVTPH9y2rgd3CoIaZEsTcBnfdwbdy5YgRoBLJNhQ'
'g7VWsLnOZNsGLJiaAM8waxynUjDUaARNfUejHU3oQwJ9v8Ksu7LeZi1uvjhjgtj4viaqfXUPJk'
'o4lmzQ2jZJ3BNb5WCBb0StJ6/IpSW3gtJt9uFG0DqPFaWPEI4cmdB+XMzfqQ7Z4RALU9vFhg+T'
'hoeQh7YUe2OhwReRSG7q8Xkv+faKQderhdmH5KrIEIwouYWrIxJku5EiORNuMU9YcuUqd3INnL'
'/qp7/eZL41k9qsuI6QDVzwfj4jw3cFF6F3z7/oYwTld0BLYL2j7rQ/rgQP0MObbntatA+YXqeb'
'4gqAEz1tngbHKzv3C45nwmRaOh9a76zQ2iC4NjkEwawsDCsn+pNprTXnq+EQeCMEyJloBXW+lW'
'NERFDpXn3zvvvh7pZWRSuNoHleC71ZDXLdWVuVjAYuTPmZyUuWln+svOOc6Ga3+8/Ts3LYP+EK'
'tuUWm4OH9bc9eNj+KRmrEe9YmhghFwOm7B8+8rSYxW2hN4lO1GKVF7oESz8kQm+xsyIZJ36ta/'
'hx18aFePmTvRsXIt1P9tuznwScAXivurYLnAMYIf49DjinvgXMh1NtEer/Vm+HCPd/q7dDhPy/'
'hQ6v6wIz7nE1keqwT30bmI+l2vYZ8FAXOA9wd4c4Bvg2OpzqAucAvlEd9b6NgPpe9f1+ZKwPqA'
'xyQ5JPm2pN29C+4np9k2a7vYWLRen7fzpogOpo6UC7+XLNdPLlCrtnu9WP4jiq1gN7BGk/1WV7'
'8dzIfZILYT4+w5Ywf+sDYpskystLqfo2OsiO2oc05oK60vtZ/gkn84f9XPT0Ptwsm7aZUWZvi3'
'XQgqMZsJ/D+3kz6uIEBxfNZuWZiI4pWUaLyCFGPOu9Ylf/MPGs94pn/UN41pc5oAxAl0tV5r1i'
'SxMIn8MsMgie9T8A0y8PiGe9Vzzrf4Bnvdd7c8bCMOh/0ib0z7mONWcap/fb7iMUdxjOxxE541'
'4fFrAxYGJhQbfNVjaHGGUdJmP5tFQRN/4pEfIEnAfY2J8JOAOwsT8TcA5gY38m4ALAxv50wPhO'
'h7Y/l5wHGfULA0TJtWN3dHOI5Ynr92t/zHw5a0dOdY0QFjbjHeoC5wE2yzgBMxV71cEucA5gfE'
'j7VQ44q14DzAfG1ropZodFmx6rCMTR3OJTOTa8mZZl+aABr//krolzyKuzMLpGBr31mt6RQQW/'
'ZqBn7sBfAptPyyfgHMA4sP6yK6459Qag3j/2uUyPvEoK5LMZma/vWz/NyBiLLnMVNs1v56ga2m'
'kziNuO047MvwvwvPjrQ+Py5VJd8sQ43Kw0bmeUU9oSnOhiHzaUN/SyDxvKG3rZhw3lDWDf5V1g'
'ZtSVauz/A3VwEtY=')))
_INDEX = {
f.name: {
'descriptor': f,
'services': {s.name: s for s in f.service},
}
for f in FILE_DESCRIPTOR_SET.file
}
DiscoveryServiceDescription = {
'file_descriptor_set': FILE_DESCRIPTOR_SET,
'file_descriptor': _INDEX[u'service.proto']['descriptor'],
'service_descriptor': _INDEX[u'service.proto']['services'][u'Discovery'],
}
|
luci/luci-py
|
appengine/components/components/prpc/discovery/service_prpc_pb2.py
|
Python
|
apache-2.0
| 29,794
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Appier Framework
# Copyright (c) 2008-2015 Hive Solutions Lda.
#
# This file is part of Hive Appier Framework.
#
# Hive Appier Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Appier Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Appier Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2015 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import os
import json
import zipfile
import tempfile
from . import legacy
try: import bson
except: bson = None
IGNORE = 1
""" Ignore strategy for conflict solving in the import operation
basically this strategy skips importing a document that has the same
key value as one that already exists in the collection """
OVERWRITE = 2
""" Strategy for conflict solving that overwrites (completely) a
previously existing document in the data source if it has the same
key value as the one being imported, this should be used carefully
as it may create data loss """
DUPLICATE = 3
""" Conflict solving strategy that basically duplicates the entries
in the data source even if they have the same key value, this may
create a somehow inconsistent state and so must be used carefully """
JOIN = 4
""" Join strategy for conflict solving in document collision, that
basically adds new fields or updates existing fields in a previously
existing document, this strategy does not remove extra fields existing
in the previous document """
class ExportManager(object):
db = None
single = None
multiple = None
def __init__(self, db, single = (), multiple = ()):
self.db = db
self.single = single
self.multiple = multiple
def import_data(self, file_path, policy = IGNORE):
temporary_path = tempfile.mkdtemp()
base_path = temporary_path
single_path = os.path.join(base_path, "settings")
self._deploy_zip(file_path, temporary_path)
for name, key in self.single:
collection = self.db[name]
source_path = os.path.join(single_path, "%s.json" % name)
file = open(source_path, "rb")
try: data = file.read()
finally: file.close()
self._import_single(
collection,
data,
key = key,
policy = policy
)
for name, key in self.multiple:
source_directory = os.path.join(base_path, name)
if not os.path.exists(source_directory): continue
collection = self.db[name]
items = os.listdir(source_directory)
data = []
for item in items:
value, _extension = os.path.splitext(item)
source_path = os.path.join(source_directory, item)
file = open(source_path, "rb")
try: _data = file.read()
finally: file.close()
data.append((value, _data))
self._import_multiple(
collection,
data,
key = key,
policy = policy
)
def export_data(self, file_path):
temporary_path = tempfile.mkdtemp()
base_path = temporary_path
single_path = os.path.join(base_path, "settings")
if not os.path.exists(single_path): os.makedirs(single_path)
for name, key in self.single:
collection = self.db[name]
data = self._export_single(collection, key)
target_path = os.path.join(single_path, "%s.json" % name)
file = open(target_path, "wb")
try: file.write(data)
finally: file.close()
for name, key in self.multiple:
collection = self.db[name]
data = self._export_multiple(collection, key)
target_directory = os.path.join(base_path, name)
if not os.path.exists(target_directory): os.makedirs(target_directory)
for value, _data in data:
target_path = os.path.join(target_directory, "%s.json" % value)
file = open(target_path, "wb")
try: file.write(_data)
finally: file.close()
self._create_zip(file_path, temporary_path)
def _import_single(self, collection, data, key, policy = IGNORE):
# loads the provided json data as a sequence of key value items
# and then starts loading all the values into the data source
data = data.decode("utf-8")
data_s = json.loads(data)
for _key, entity in data_s.items():
# verifies if the "native" object id value for the mongo
# database exists and if that's the case tries to convert
# the value from the "underlying" string value to object
# identifier, defaulting to a string value if it fails
if "_id" in entity:
try: entity["_id"] = bson.ObjectId(entity["_id"])
except: entity["_id"] = entity["_id"]
# retrieves the key value for the current entity to
# be inserted and then tries to retrieve an existing
# entity for the same key, to avoid duplicated entry
value = entity.get(key, None)
if value: entity_e = collection.find_one({key : value})
else: entity_e = None
# in case there's no existing entity for the same key
# (normal situation) only need to insert the new entity
# otherwise must apply the selected conflict policy for
# the resolution of the data source conflict
if not entity_e: collection.insert(entity)
elif policy == IGNORE: continue
elif policy == OVERWRITE:
collection.remove({key : value})
collection.insert(entity)
elif policy == DUPLICATE:
collection.insert(entity)
elif policy == JOIN:
if "_id" in entity: del entity["_id"]
collection.update({
"_id" : entity_e["_id"]
}, {
"$set" : entity
})
def _import_multiple(self, collection, data, key, policy = IGNORE):
# iterates over the complete set of data element to load
# the json contents and then load the corresponding entity
# value into the data source
for _value, _data in data:
# loads the current data in iteration from the file
# as the entity to be loaded into the data source
_data = _data.decode("utf-8")
entity = json.loads(_data)
# verifies if the "native" object id value for the mongo
# database exists and if that's the case tries to convert
# the value from the "underlying" string value to object
# identifier, defaulting to a string value if it fails
if "_id" in entity:
try: entity["_id"] = bson.ObjectId(entity["_id"])
except: entity["_id"] = entity["_id"]
# retrieves the key value for the current entity to
# be inserted and then tries to retrieve an existing
# entity for the same key, to avoid duplicated entry
value = entity.get(key, None)
if value: entity_e = collection.find_one({key : value})
else: entity_e = None
# in case there's no existing entity for the same key
# (normal situation) only need to insert the new entity
# otherwise must apply the selected conflict policy for
# the resolution of the data source conflict
if not entity_e: collection.insert(entity)
elif policy == IGNORE: continue
elif policy == OVERWRITE:
collection.remove({key : value})
collection.insert(entity)
elif policy == DUPLICATE:
collection.insert(entity)
elif policy == JOIN:
if "_id" in entity: del entity["_id"]
collection.update({
"_id" : entity_e["_id"]
}, {
"$set" : entity
})
def _export_single(self, collection, key = "_id"):
entities = collection.find()
_entities = {}
for entity in entities:
value = entity[key]
value_s = self._to_key(value)
_entities[value_s] = entity
data = json.dumps(_entities, cls = MongoEncoder)
data = legacy.bytes(data)
return data
def _export_multiple(self, collection, key = "_id"):
entities = collection.find()
for entity in entities:
value = entity[key]
value_s = self._to_key(value)
value_s = self._escape_key(value_s)
_data = json.dumps(entity, cls = MongoEncoder)
_data = legacy.bytes(_data)
yield (value_s, _data)
def _to_key(self, key):
key_t = type(key)
if key_t in legacy.STRINGS: return key
key = legacy.UNICODE(key)
return key
def _escape_key(self, key):
return key.replace(":", "_")
def _deploy_zip(self, zip_path, path):
zip_file = zipfile.ZipFile(
zip_path,
mode = "r",
compression = zipfile.ZIP_DEFLATED
)
try: zip_file.extractall(path)
finally: zip_file.close()
def _create_zip(self, zip_path, path):
zip_file = zipfile.ZipFile(
zip_path,
mode = "w",
compression = zipfile.ZIP_DEFLATED
)
try:
list = os.listdir(path)
for name in list:
_path = os.path.join(path, name)
is_file = os.path.isfile(_path)
if is_file: zip_file.write(_path)
else: self.__add_to_zip(zip_file, _path, base = path)
finally:
zip_file.close()
def __add_to_zip(self, zip_file, path, base = ""):
list = os.listdir(path)
for name in list:
_path = os.path.join(path, name)
_path_out = _path[len(base):]
_path_out = _path_out.replace("\\", "/")
_path_out = _path_out.strip("/")
if os.path.isfile(_path):
zip_file.write(_path, _path_out)
elif os.path.isdir(_path):
self.__add_to_zip(zip_file, _path, base = base)
class MongoEncoder(json.JSONEncoder):
def default(self, obj, **kwargs):
if isinstance(obj, bson.objectid.ObjectId): return str(obj)
else: return json.JSONEncoder.default(obj, **kwargs)
|
rmoorman/appier
|
src/appier/export.py
|
Python
|
apache-2.0
| 11,824
|
# Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from flask import Blueprint, make_response, send_file
from flask import jsonify, request, abort
from werkzeug import secure_filename
from muranorepository.utils.parser import ManifestParser
from muranorepository.utils.archiver import Archiver
from muranorepository.consts import DATA_TYPES, MANIFEST
from oslo.config import cfg
CONF = cfg.CONF
v1_api = Blueprint('v1', __name__)
@v1_api.route('/client/ui')
def get_ui_data():
parser = ManifestParser(CONF.manifests)
manifests = parser.parse()
archive_name = Archiver().create(manifests, "ui")
return send_file(archive_name)
@v1_api.route('/client/conductor')
def get_conductor_data():
parser = ManifestParser(CONF.manifests)
manifests = parser.parse()
archive_name = Archiver().create(manifests,
"heat",
"agent",
"scripts")
return send_file(archive_name)
@v1_api.route('/admin/<data_type>', methods=['GET', 'POST'])
def get_data_type_locations(data_type):
####### validation ########
if data_type not in DATA_TYPES:
abort(404)
result_path = os.path.join(CONF.manifests, getattr(CONF, data_type))
####### end validation ########
if request.method == 'GET':
locations = []
if data_type == MANIFEST:
for item in os.listdir(result_path):
if '-manifest' in item:
locations.append(item)
else:
for path, subdirs, files in os.walk(result_path):
for name in files:
locations.append(name)
result = {data_type: locations}
return jsonify(result)
if request.method == 'POST':
try:
file_to_upload = request.files.get('files')
if file_to_upload:
filename = secure_filename(file_to_upload.filename)
file_to_upload.save(os.path.join(result_path, filename))
return jsonify(result="success")
except:
abort(403)
@v1_api.route('/admin/<data_type>/<path:path>', methods=['GET', 'POST'])
def get_data_type_locations_by_path_or_get_file(data_type, path):
if data_type not in DATA_TYPES:
abort(404)
result_path = os.path.join(os.path.join(CONF.manifests,
getattr(CONF, data_type),
path))
if not os.path.exists(result_path):
abort(404)
if request.method == 'GET':
locations = []
if os.path.isfile(result_path):
return send_file(result_path)
else:
for file in os.listdir(result_path):
locations.append(file)
result = {data_type: locations}
return jsonify(result)
if request.method == 'POST':
file_to_upload = request.files.get('files')
if file_to_upload:
filename = secure_filename(file_to_upload.filename)
file_to_upload.save(os.path.join(result_path, filename))
return jsonify(result="success")
else:
abort(403)
@v1_api.route('/admin/<data_type>/<path:path>', methods=['PUT', 'DELETE'])
def create_dirs(data_type, path):
if data_type not in DATA_TYPES:
abort(404)
result_path = os.path.join(CONF.manifests, getattr(CONF, data_type), path)
if request.method == 'PUT':
resp = make_response()
if os.path.exists(result_path):
return resp
if data_type == MANIFEST:
abort(403)
try:
os.makedirs(result_path)
except Exception as e:
abort(403)
return resp
if request.method == 'DELETE':
if not os.path.exists(result_path):
abort(404)
if os.path.isfile(result_path):
try:
os.remove(result_path)
except Exception as e:
abort(404)
else:
try:
os.rmdir(result_path)
except Exception as e:
abort(403)
resp = make_response()
return resp
|
Bloomie/murano-repository
|
muranorepository/api/v1.py
|
Python
|
apache-2.0
| 4,731
|
# -*- coding: utf-8 -*-
#
# pyspark documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 28 15:17:47 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shutil
import errno
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# Remove previously generated rst files. Ignore errors just in case it stops
# generating whole docs.
shutil.rmtree(
"%s/reference/api" % os.path.dirname(os.path.abspath(__file__)), ignore_errors=True)
shutil.rmtree(
"%s/reference/pyspark.pandas/api" % os.path.dirname(os.path.abspath(__file__)),
ignore_errors=True)
try:
os.mkdir("%s/reference/api" % os.path.dirname(os.path.abspath(__file__)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.mkdir("%s/reference/pyspark.pandas/api" % os.path.dirname(
os.path.abspath(__file__)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'nbsphinx', # Converts Jupyter Notebook to reStructuredText files for Sphinx.
# For ipython directive in reStructuredText files. It is generated by the notebook.
'IPython.sphinxext.ipython_console_highlighting',
'numpydoc', # handle NumPy documentation formatted docstrings.
'sphinx_plotly_directive', # For visualize plot result
]
# plotly plot directive
plotly_include_source = True
plotly_html_show_formats = False
plotly_html_show_source_link = False
plotly_pre_code = """import numpy as np
import pandas as pd
import pyspark.pandas as ps"""
numpydoc_show_class_members = False
# Links used globally in the RST files.
# These are defined here to allow link substitutions dynamically.
rst_epilog = """
.. |binder| replace:: Live Notebook
.. _binder: https://mybinder.org/v2/gh/apache/spark/{0}?filepath=python%2Fdocs%2Fsource%2Fgetting_started%2Fquickstart.ipynb
.. |examples| replace:: Examples
.. _examples: https://github.com/apache/spark/tree/{0}/examples/src/main/python
.. |downloading| replace:: Downloading
.. _downloading: https://spark.apache.org/docs/{1}/building-spark.html
.. |building_spark| replace:: Building Spark
.. _building_spark: https://spark.apache.org/docs/{1}/#downloading
""".format(
os.environ.get("GIT_HASH", "master"),
os.environ.get("RELEASE_VERSION", "latest"),
)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PySpark'
copyright = ''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'master'
# The full version, including alpha/beta/rc tags.
release = os.environ.get('RELEASE_VERSION', version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '.DS_Store', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for autodoc --------------------------------------------------
# Look at the first line of the docstring for function and method signatures.
autodoc_docstring_signature = True
autosummary_generate = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pydata_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../../docs/img/spark-logo-reverse.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'css/pyspark.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysparkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyspark.tex', 'pyspark Documentation',
'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyspark', 'pyspark Documentation',
['Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyspark', 'pyspark Documentation',
'Author', 'pyspark', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'pyspark'
epub_author = 'Author'
epub_publisher = 'Author'
epub_copyright = '2014, Author'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'pyspark'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
def setup(app):
# The app.add_javascript() is deprecated.
getattr(app, "add_js_file", getattr(app, "add_javascript"))('copybutton.js')
# Skip sample endpoint link (not expected to resolve)
linkcheck_ignore = [r'https://kinesis.us-east-1.amazonaws.com']
|
cloud-fan/spark
|
python/docs/source/conf.py
|
Python
|
apache-2.0
| 12,894
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Stats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ip_other_block_alloc: {"optional": true, "size": "8", "type": "number", "oid": "17", "format": "counter"}
:param entry_match_drop: {"optional": true, "size": "8", "type": "number", "oid": "6", "format": "counter"}
:param ip_port_block_free: {"optional": true, "size": "8", "type": "number", "oid": "15", "format": "counter"}
:param ip_node_alloc_failure: {"optional": true, "size": "8", "type": "number", "oid": "13", "format": "counter"}
:param entry_list_alloc_failure: {"optional": true, "size": "8", "type": "number", "oid": "10", "format": "counter"}
:param ip_node_alloc: {"optional": true, "size": "8", "type": "number", "oid": "11", "format": "counter"}
:param entry_added_shadow: {"optional": true, "size": "8", "type": "number", "oid": "20", "format": "counter"}
:param ip_port_block_alloc_failure: {"optional": true, "size": "8", "type": "number", "oid": "16", "format": "counter"}
:param ip_other_block_alloc_failure: {"optional": true, "size": "8", "type": "number", "oid": "19", "format": "counter"}
:param entry_removed_from_hw: {"optional": true, "size": "8", "type": "number", "oid": "4", "format": "counter"}
:param entry_deleted: {"optional": true, "size": "8", "type": "number", "oid": "2", "format": "counter"}
:param entry_list_alloc: {"optional": true, "size": "8", "type": "number", "oid": "8", "format": "counter"}
:param entry_list_free: {"optional": true, "size": "8", "type": "number", "oid": "9", "format": "counter"}
:param entry_added_to_hw: {"optional": true, "size": "8", "type": "number", "oid": "3", "format": "counter"}
:param ip_node_free: {"optional": true, "size": "8", "type": "number", "oid": "12", "format": "counter"}
:param entry_added: {"optional": true, "size": "8", "type": "number", "oid": "1", "format": "counter"}
:param ip_other_block_free: {"optional": true, "size": "8", "type": "number", "oid": "18", "format": "counter"}
:param entry_invalidated: {"optional": true, "size": "8", "type": "number", "oid": "21", "format": "counter"}
:param ip_port_block_alloc: {"optional": true, "size": "8", "type": "number", "oid": "14", "format": "counter"}
:param entry_match_drop_hw: {"optional": true, "size": "8", "type": "number", "oid": "7", "format": "counter"}
:param hw_out_of_entries: {"optional": true, "size": "8", "type": "number", "oid": "5", "format": "counter"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "stats"
self.DeviceProxy = ""
self.ip_other_block_alloc = ""
self.entry_match_drop = ""
self.ip_port_block_free = ""
self.ip_node_alloc_failure = ""
self.entry_list_alloc_failure = ""
self.ip_node_alloc = ""
self.entry_added_shadow = ""
self.ip_port_block_alloc_failure = ""
self.ip_other_block_alloc_failure = ""
self.entry_removed_from_hw = ""
self.entry_deleted = ""
self.entry_list_alloc = ""
self.entry_list_free = ""
self.entry_added_to_hw = ""
self.ip_node_free = ""
self.entry_added = ""
self.ip_other_block_free = ""
self.entry_invalidated = ""
self.ip_port_block_alloc = ""
self.entry_match_drop_hw = ""
self.hw_out_of_entries = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class DdosProtection(A10BaseClass):
"""Class Description::
Statistics for the object ddos-protection.
Class ddos-protection supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/ddos-protection/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "ddos-protection"
self.a10_url="/axapi/v3/cgnv6/ddos-protection/stats"
self.DeviceProxy = ""
self.stats = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
amwelch/a10sdk-python
|
a10sdk/core/cgnv6/cgnv6_ddos_protection_stats.py
|
Python
|
apache-2.0
| 4,533
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Networks package definition."""
from REDACTED.tf2_bert.modeling.networks.albert_transformer_encoder import AlbertTransformerEncoder
from REDACTED.tf2_bert.modeling.networks.classification import Classification
from REDACTED.tf2_bert.modeling.networks.encoder_scaffold import EncoderScaffold
from REDACTED.tf2_bert.modeling.networks.masked_lm import MaskedLM
from REDACTED.tf2_bert.modeling.networks.span_labeling import SpanLabeling
from REDACTED.tf2_bert.modeling.networks.transformer_encoder import TransformerEncoder
|
mlperf/training_results_v0.7
|
Google/benchmarks/bert/implementations/bert-cloud-TF2.0-tpu-v3-32/modeling/networks/__init__.py
|
Python
|
apache-2.0
| 1,212
|
"""This library brings support for forked_daapd to Home Assistant."""
import asyncio
from collections import defaultdict
import logging
from pyforked_daapd import ForkedDaapdAPI
from pylibrespot_java import LibrespotJavaAPI
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import MEDIA_TYPE_MUSIC
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.util.dt import utcnow
from .const import (
CALLBACK_TIMEOUT,
CONF_LIBRESPOT_JAVA_PORT,
CONF_MAX_PLAYLISTS,
CONF_TTS_PAUSE_TIME,
CONF_TTS_VOLUME,
DEFAULT_TTS_PAUSE_TIME,
DEFAULT_TTS_VOLUME,
DEFAULT_UNMUTE_VOLUME,
DOMAIN,
FD_NAME,
HASS_DATA_REMOVE_LISTENERS_KEY,
HASS_DATA_UPDATER_KEY,
KNOWN_PIPES,
PIPE_FUNCTION_MAP,
SIGNAL_ADD_ZONES,
SIGNAL_CONFIG_OPTIONS_UPDATE,
SIGNAL_UPDATE_DATABASE,
SIGNAL_UPDATE_MASTER,
SIGNAL_UPDATE_OUTPUTS,
SIGNAL_UPDATE_PLAYER,
SIGNAL_UPDATE_QUEUE,
SOURCE_NAME_CLEAR,
SOURCE_NAME_DEFAULT,
STARTUP_DATA,
SUPPORTED_FEATURES,
SUPPORTED_FEATURES_ZONE,
TTS_TIMEOUT,
)
_LOGGER = logging.getLogger(__name__)
WS_NOTIFY_EVENT_TYPES = ["player", "outputs", "volume", "options", "queue", "database"]
WEBSOCKET_RECONNECT_TIME = 30 # seconds
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up forked-daapd from a config entry."""
host = config_entry.data[CONF_HOST]
port = config_entry.data[CONF_PORT]
password = config_entry.data[CONF_PASSWORD]
forked_daapd_api = ForkedDaapdAPI(
async_get_clientsession(hass), host, port, password
)
forked_daapd_master = ForkedDaapdMaster(
clientsession=async_get_clientsession(hass),
api=forked_daapd_api,
ip_address=host,
api_port=port,
api_password=password,
config_entry=config_entry,
)
@callback
def async_add_zones(api, outputs):
zone_entities = []
for output in outputs:
zone_entities.append(ForkedDaapdZone(api, output, config_entry.entry_id))
async_add_entities(zone_entities, False)
remove_add_zones_listener = async_dispatcher_connect(
hass, SIGNAL_ADD_ZONES.format(config_entry.entry_id), async_add_zones
)
remove_entry_listener = config_entry.add_update_listener(update_listener)
if not hass.data.get(DOMAIN):
hass.data[DOMAIN] = {config_entry.entry_id: {}}
hass.data[DOMAIN][config_entry.entry_id] = {
HASS_DATA_REMOVE_LISTENERS_KEY: [
remove_add_zones_listener,
remove_entry_listener,
]
}
async_add_entities([forked_daapd_master], False)
forked_daapd_updater = ForkedDaapdUpdater(
hass, forked_daapd_api, config_entry.entry_id
)
await forked_daapd_updater.async_init()
hass.data[DOMAIN][config_entry.entry_id][
HASS_DATA_UPDATER_KEY
] = forked_daapd_updater
async def update_listener(hass, entry):
"""Handle options update."""
async_dispatcher_send(
hass, SIGNAL_CONFIG_OPTIONS_UPDATE.format(entry.entry_id), entry.options
)
class ForkedDaapdZone(MediaPlayerEntity):
"""Representation of a forked-daapd output."""
def __init__(self, api, output, entry_id):
"""Initialize the ForkedDaapd Zone."""
self._api = api
self._output = output
self._output_id = output["id"]
self._last_volume = DEFAULT_UNMUTE_VOLUME # used for mute/unmute
self._available = True
self._entry_id = entry_id
async def async_added_to_hass(self):
"""Use lifecycle hooks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_UPDATE_OUTPUTS.format(self._entry_id),
self._async_update_output_callback,
)
)
@callback
def _async_update_output_callback(self, outputs, _event=None):
new_output = next(
(output for output in outputs if output["id"] == self._output_id), None
)
self._available = bool(new_output)
if self._available:
self._output = new_output
self.async_write_ha_state()
@property
def unique_id(self):
"""Return unique ID."""
return f"{self._entry_id}-{self._output_id}"
@property
def should_poll(self) -> bool:
"""Entity pushes its state to HA."""
return False
async def async_toggle(self):
"""Toggle the power on the zone."""
if self.state == STATE_OFF:
await self.async_turn_on()
else:
await self.async_turn_off()
@property
def available(self) -> bool:
"""Return whether the zone is available."""
return self._available
async def async_turn_on(self):
"""Enable the output."""
await self._api.change_output(self._output_id, selected=True)
async def async_turn_off(self):
"""Disable the output."""
await self._api.change_output(self._output_id, selected=False)
@property
def name(self):
"""Return the name of the zone."""
return f"{FD_NAME} output ({self._output['name']})"
@property
def state(self):
"""State of the zone."""
if self._output["selected"]:
return STATE_ON
return STATE_OFF
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._output["volume"] / 100
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._output["volume"] == 0
async def async_mute_volume(self, mute):
"""Mute the volume."""
if mute:
if self.volume_level == 0:
return
self._last_volume = self.volume_level # store volume level to restore later
target_volume = 0
else:
target_volume = self._last_volume # restore volume level
await self.async_set_volume_level(volume=target_volume)
async def async_set_volume_level(self, volume):
"""Set volume - input range [0,1]."""
await self._api.set_volume(volume=volume * 100, output_id=self._output_id)
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORTED_FEATURES_ZONE
class ForkedDaapdMaster(MediaPlayerEntity):
"""Representation of the main forked-daapd device."""
def __init__(
self, clientsession, api, ip_address, api_port, api_password, config_entry
):
"""Initialize the ForkedDaapd Master Device."""
self._api = api
self._player = STARTUP_DATA[
"player"
] # _player, _outputs, and _queue are loaded straight from api
self._outputs = STARTUP_DATA["outputs"]
self._queue = STARTUP_DATA["queue"]
self._track_info = defaultdict(
str
) # _track info is found by matching _player data with _queue data
self._last_outputs = [] # used for device on/off
self._last_volume = DEFAULT_UNMUTE_VOLUME
self._player_last_updated = None
self._pipe_control_api = {}
self._ip_address = (
ip_address # need to save this because pipe control is on same ip
)
self._tts_pause_time = DEFAULT_TTS_PAUSE_TIME
self._tts_volume = DEFAULT_TTS_VOLUME
self._tts_requested = False
self._tts_queued = False
self._tts_playing_event = asyncio.Event()
self._on_remove = None
self._available = False
self._clientsession = clientsession
self._config_entry = config_entry
self.update_options(config_entry.options)
self._paused_event = asyncio.Event()
self._pause_requested = False
self._sources_uris = {}
self._source = SOURCE_NAME_DEFAULT
self._max_playlists = None
async def async_added_to_hass(self):
"""Use lifecycle hooks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_UPDATE_PLAYER.format(self._config_entry.entry_id),
self._update_player,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_UPDATE_QUEUE.format(self._config_entry.entry_id),
self._update_queue,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_UPDATE_OUTPUTS.format(self._config_entry.entry_id),
self._update_outputs,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_UPDATE_MASTER.format(self._config_entry.entry_id),
self._update_callback,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_CONFIG_OPTIONS_UPDATE.format(self._config_entry.entry_id),
self.update_options,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_UPDATE_DATABASE.format(self._config_entry.entry_id),
self._update_database,
)
)
@callback
def _update_callback(self, available):
"""Call update method."""
self._available = available
self.async_write_ha_state()
@callback
def update_options(self, options):
"""Update forked-daapd server options."""
if CONF_LIBRESPOT_JAVA_PORT in options:
self._pipe_control_api["librespot-java"] = LibrespotJavaAPI(
self._clientsession, self._ip_address, options[CONF_LIBRESPOT_JAVA_PORT]
)
if CONF_TTS_PAUSE_TIME in options:
self._tts_pause_time = options[CONF_TTS_PAUSE_TIME]
if CONF_TTS_VOLUME in options:
self._tts_volume = options[CONF_TTS_VOLUME]
if CONF_MAX_PLAYLISTS in options:
# sources not updated until next _update_database call
self._max_playlists = options[CONF_MAX_PLAYLISTS]
@callback
def _update_player(self, player, event):
self._player = player
self._player_last_updated = utcnow()
self._update_track_info()
if self._tts_queued:
self._tts_playing_event.set()
self._tts_queued = False
if self._pause_requested:
self._paused_event.set()
self._pause_requested = False
event.set()
@callback
def _update_queue(self, queue, event):
self._queue = queue
if (
self._tts_requested
and self._queue["count"] == 1
and self._queue["items"][0]["uri"].find("tts_proxy") != -1
):
self._tts_requested = False
self._tts_queued = True
if (
self._queue["count"] >= 1
and self._queue["items"][0]["data_kind"] == "pipe"
and self._queue["items"][0]["title"] in KNOWN_PIPES
): # if we're playing a pipe, set the source automatically so we can forward controls
self._source = f"{self._queue['items'][0]['title']} (pipe)"
self._update_track_info()
event.set()
@callback
def _update_outputs(self, outputs, event=None):
if event: # Calling without event is meant for zone, so ignore
self._outputs = outputs
event.set()
@callback
def _update_database(self, pipes, playlists, event):
self._sources_uris = {SOURCE_NAME_CLEAR: None, SOURCE_NAME_DEFAULT: None}
if pipes:
self._sources_uris.update(
{
f"{pipe['title']} (pipe)": pipe["uri"]
for pipe in pipes
if pipe["title"] in KNOWN_PIPES
}
)
if playlists:
self._sources_uris.update(
{
f"{playlist['name']} (playlist)": playlist["uri"]
for playlist in playlists[: self._max_playlists]
}
)
event.set()
def _update_track_info(self): # run during every player or queue update
try:
self._track_info = next(
track
for track in self._queue["items"]
if track["id"] == self._player["item_id"]
)
except (StopIteration, TypeError, KeyError):
_LOGGER.debug("Could not get track info")
self._track_info = defaultdict(str)
@property
def unique_id(self):
"""Return unique ID."""
return self._config_entry.entry_id
@property
def should_poll(self) -> bool:
"""Entity pushes its state to HA."""
return False
@property
def available(self) -> bool:
"""Return whether the master is available."""
return self._available
async def async_turn_on(self):
"""Restore the last on outputs state."""
# restore state
await self._api.set_volume(volume=self._last_volume * 100)
if self._last_outputs:
futures = []
for output in self._last_outputs:
futures.append(
self._api.change_output(
output["id"],
selected=output["selected"],
volume=output["volume"],
)
)
await asyncio.wait(futures)
else: # enable all outputs
await self._api.set_enabled_outputs(
[output["id"] for output in self._outputs]
)
async def async_turn_off(self):
"""Pause player and store outputs state."""
await self.async_media_pause()
self._last_outputs = self._outputs
if any(output["selected"] for output in self._outputs):
await self._api.set_enabled_outputs([])
async def async_toggle(self):
"""Toggle the power on the device.
Default media player component method counts idle as off.
We consider idle to be on but just not playing.
"""
if self.state == STATE_OFF:
await self.async_turn_on()
else:
await self.async_turn_off()
@property
def name(self):
"""Return the name of the device."""
return f"{FD_NAME} server"
@property
def state(self):
"""State of the player."""
if self._player["state"] == "play":
return STATE_PLAYING
if self._player["state"] == "pause":
return STATE_PAUSED
if not any(output["selected"] for output in self._outputs):
return STATE_OFF
if self._player["state"] == "stop": # this should catch all remaining cases
return STATE_IDLE
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._player["volume"] / 100
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._player["volume"] == 0
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self._player["item_id"]
@property
def media_content_type(self):
"""Content type of current playing media."""
return self._track_info["media_kind"]
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._player["item_length_ms"] / 1000
@property
def media_position(self):
"""Position of current playing media in seconds."""
return self._player["item_progress_ms"] / 1000
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self._player_last_updated
@property
def media_title(self):
"""Title of current playing media."""
# Use album field when data_kind is url
# https://github.com/ejurgensen/forked-daapd/issues/351
if self._track_info["data_kind"] == "url":
return self._track_info["album"]
return self._track_info["title"]
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._track_info["artist"]
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
# Use title field when data_kind is url
# https://github.com/ejurgensen/forked-daapd/issues/351
if self._track_info["data_kind"] == "url":
return self._track_info["title"]
return self._track_info["album"]
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
return self._track_info["album_artist"]
@property
def media_track(self):
"""Track number of current playing media, music track only."""
return self._track_info["track_number"]
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return self._player["shuffle"]
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORTED_FEATURES
@property
def source(self):
"""Name of the current input source."""
return self._source
@property
def source_list(self):
"""List of available input sources."""
return [*self._sources_uris]
async def async_mute_volume(self, mute):
"""Mute the volume."""
if mute:
if self.volume_level == 0:
return
self._last_volume = self.volume_level # store volume level to restore later
target_volume = 0
else:
target_volume = self._last_volume # restore volume level
await self._api.set_volume(volume=target_volume * 100)
async def async_set_volume_level(self, volume):
"""Set volume - input range [0,1]."""
await self._api.set_volume(volume=volume * 100)
async def async_media_play(self):
"""Start playback."""
if self._use_pipe_control():
await self._pipe_call(self._use_pipe_control(), "async_media_play")
else:
await self._api.start_playback()
async def async_media_pause(self):
"""Pause playback."""
if self._use_pipe_control():
await self._pipe_call(self._use_pipe_control(), "async_media_pause")
else:
await self._api.pause_playback()
async def async_media_stop(self):
"""Stop playback."""
if self._use_pipe_control():
await self._pipe_call(self._use_pipe_control(), "async_media_stop")
else:
await self._api.stop_playback()
async def async_media_previous_track(self):
"""Skip to previous track."""
if self._use_pipe_control():
await self._pipe_call(
self._use_pipe_control(), "async_media_previous_track"
)
else:
await self._api.previous_track()
async def async_media_next_track(self):
"""Skip to next track."""
if self._use_pipe_control():
await self._pipe_call(self._use_pipe_control(), "async_media_next_track")
else:
await self._api.next_track()
async def async_media_seek(self, position):
"""Seek to position."""
await self._api.seek(position_ms=position * 1000)
async def async_clear_playlist(self):
"""Clear playlist."""
await self._api.clear_queue()
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
await self._api.shuffle(shuffle)
@property
def media_image_url(self):
"""Image url of current playing media."""
if url := self._track_info.get("artwork_url"):
url = self._api.full_url(url)
return url
async def _save_and_set_tts_volumes(self):
if self.volume_level: # save master volume
self._last_volume = self.volume_level
self._last_outputs = self._outputs
if self._outputs:
await self._api.set_volume(volume=self._tts_volume * 100)
futures = []
for output in self._outputs:
futures.append(
self._api.change_output(
output["id"], selected=True, volume=self._tts_volume * 100
)
)
await asyncio.wait(futures)
async def _pause_and_wait_for_callback(self):
"""Send pause and wait for the pause callback to be received."""
self._pause_requested = True
await self.async_media_pause()
try:
await asyncio.wait_for(
self._paused_event.wait(), timeout=CALLBACK_TIMEOUT
) # wait for paused
except asyncio.TimeoutError:
self._pause_requested = False
self._paused_event.clear()
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a URI."""
if media_type == MEDIA_TYPE_MUSIC:
saved_state = self.state # save play state
saved_mute = self.is_volume_muted
sleep_future = asyncio.create_task(
asyncio.sleep(self._tts_pause_time)
) # start timing now, but not exact because of fd buffer + tts latency
await self._pause_and_wait_for_callback()
await self._save_and_set_tts_volumes()
# save position
saved_song_position = self._player["item_progress_ms"]
saved_queue = (
self._queue if self._queue["count"] > 0 else None
) # stash queue
if saved_queue:
saved_queue_position = next(
i
for i, item in enumerate(saved_queue["items"])
if item["id"] == self._player["item_id"]
)
self._tts_requested = True
await sleep_future
await self._api.add_to_queue(uris=media_id, playback="start", clear=True)
try:
await asyncio.wait_for(
self._tts_playing_event.wait(), timeout=TTS_TIMEOUT
)
# we have started TTS, now wait for completion
await asyncio.sleep(
self._queue["items"][0]["length_ms"]
/ 1000 # player may not have updated yet so grab length from queue
+ self._tts_pause_time
)
except asyncio.TimeoutError:
self._tts_requested = False
_LOGGER.warning("TTS request timed out")
self._tts_playing_event.clear()
# TTS done, return to normal
await self.async_turn_on() # restore outputs and volumes
if saved_mute: # mute if we were muted
await self.async_mute_volume(True)
if self._use_pipe_control(): # resume pipe
await self._api.add_to_queue(
uris=self._sources_uris[self._source], clear=True
)
if saved_state == STATE_PLAYING:
await self.async_media_play()
else: # restore stashed queue
if saved_queue:
uris = ""
for item in saved_queue["items"]:
uris += item["uri"] + ","
await self._api.add_to_queue(
uris=uris,
playback="start",
playback_from_position=saved_queue_position,
clear=True,
)
await self._api.seek(position_ms=saved_song_position)
if saved_state == STATE_PAUSED:
await self.async_media_pause()
elif saved_state != STATE_PLAYING:
await self.async_media_stop()
else:
_LOGGER.debug("Media type '%s' not supported", media_type)
async def async_select_source(self, source):
"""Change source.
Source name reflects whether in default mode or pipe mode.
Selecting playlists/clear sets the playlists/clears but ends up in default mode.
"""
if source == self._source:
return
if self._use_pipe_control(): # if pipe was playing, we need to stop it first
await self._pause_and_wait_for_callback()
self._source = source
if not self._use_pipe_control(): # playlist or clear ends up at default
self._source = SOURCE_NAME_DEFAULT
if self._sources_uris.get(source): # load uris for pipes or playlists
await self._api.add_to_queue(uris=self._sources_uris[source], clear=True)
elif source == SOURCE_NAME_CLEAR: # clear playlist
await self._api.clear_queue()
self.async_write_ha_state()
def _use_pipe_control(self):
"""Return which pipe control from KNOWN_PIPES to use."""
if self._source[-7:] == " (pipe)":
return self._source[:-7]
return ""
async def _pipe_call(self, pipe_name, base_function_name):
if self._pipe_control_api.get(pipe_name):
return await getattr(
self._pipe_control_api[pipe_name],
PIPE_FUNCTION_MAP[pipe_name][base_function_name],
)()
_LOGGER.warning("No pipe control available for %s", pipe_name)
class ForkedDaapdUpdater:
"""Manage updates for the forked-daapd device."""
def __init__(self, hass, api, entry_id):
"""Initialize."""
self.hass = hass
self._api = api
self.websocket_handler = None
self._all_output_ids = set()
self._entry_id = entry_id
async def async_init(self):
"""Perform async portion of class initialization."""
server_config = await self._api.get_request("config")
if websocket_port := server_config.get("websocket_port"):
self.websocket_handler = asyncio.create_task(
self._api.start_websocket_handler(
websocket_port,
WS_NOTIFY_EVENT_TYPES,
self._update,
WEBSOCKET_RECONNECT_TIME,
self._disconnected_callback,
)
)
else:
_LOGGER.error("Invalid websocket port")
def _disconnected_callback(self):
async_dispatcher_send(
self.hass, SIGNAL_UPDATE_MASTER.format(self._entry_id), False
)
async_dispatcher_send(
self.hass, SIGNAL_UPDATE_OUTPUTS.format(self._entry_id), []
)
async def _update(self, update_types):
"""Private update method."""
update_types = set(update_types)
update_events = {}
_LOGGER.debug("Updating %s", update_types)
if (
"queue" in update_types
): # update queue, queue before player for async_play_media
queue = await self._api.get_request("queue")
if queue:
update_events["queue"] = asyncio.Event()
async_dispatcher_send(
self.hass,
SIGNAL_UPDATE_QUEUE.format(self._entry_id),
queue,
update_events["queue"],
)
# order of below don't matter
if not {"outputs", "volume"}.isdisjoint(update_types): # update outputs
outputs = await self._api.get_request("outputs")
if outputs:
outputs = outputs["outputs"]
update_events[
"outputs"
] = asyncio.Event() # only for master, zones should ignore
async_dispatcher_send(
self.hass,
SIGNAL_UPDATE_OUTPUTS.format(self._entry_id),
outputs,
update_events["outputs"],
)
self._add_zones(outputs)
if not {"database"}.isdisjoint(update_types):
pipes, playlists = await asyncio.gather(
self._api.get_pipes(), self._api.get_playlists()
)
update_events["database"] = asyncio.Event()
async_dispatcher_send(
self.hass,
SIGNAL_UPDATE_DATABASE.format(self._entry_id),
pipes,
playlists,
update_events["database"],
)
if not {"update", "config"}.isdisjoint(update_types): # not supported
_LOGGER.debug("update/config notifications neither requested nor supported")
if not {"player", "options", "volume"}.isdisjoint(
update_types
): # update player
player = await self._api.get_request("player")
if player:
update_events["player"] = asyncio.Event()
if update_events.get("queue"):
await update_events[
"queue"
].wait() # make sure queue done before player for async_play_media
async_dispatcher_send(
self.hass,
SIGNAL_UPDATE_PLAYER.format(self._entry_id),
player,
update_events["player"],
)
if update_events:
await asyncio.wait(
[asyncio.create_task(event.wait()) for event in update_events.values()]
) # make sure callbacks done before update
async_dispatcher_send(
self.hass, SIGNAL_UPDATE_MASTER.format(self._entry_id), True
)
def _add_zones(self, outputs):
outputs_to_add = []
for output in outputs:
if output["id"] not in self._all_output_ids:
self._all_output_ids.add(output["id"])
outputs_to_add.append(output)
if outputs_to_add:
async_dispatcher_send(
self.hass,
SIGNAL_ADD_ZONES.format(self._entry_id),
self._api,
outputs_to_add,
)
|
aronsky/home-assistant
|
homeassistant/components/forked_daapd/media_player.py
|
Python
|
apache-2.0
| 30,898
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from glanceclient import exc
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
def str2bool(value):
"""Convert a string value to boolean
"""
return value.lower() in ("yes", "true", "1")
# Mapping of property names to type, used for converting input string value
# before submitting.
PROPERTY_TYPES = {'min_disk': long, 'min_ram': long, 'protected': str2bool}
def convert_value(key, value):
"""Convert the property value to the proper type if necessary.
"""
_type = PROPERTY_TYPES.get(key)
if _type:
return _type(value)
return value
class CreateProperty(forms.SelfHandlingForm):
key = forms.CharField(max_length="255", label=_("Key"))
value = forms.CharField(label=_("Value"))
def handle(self, request, data):
try:
api.glance.image_update_properties(request,
self.initial['image_id'],
**{data['key']: convert_value(data['key'], data['value'])})
msg = _('Created custom property "%s".') % data['key']
messages.success(request, msg)
return True
except exc.HTTPForbidden:
msg = _('Unable to create image custom property. Property "%s" '
'is read only.') % data['key']
exceptions.handle(request, msg)
except exc.HTTPConflict:
msg = _('Unable to create image custom property. Property "%s" '
'already exists.') % data['key']
exceptions.handle(request, msg)
except Exception:
msg = _('Unable to create image custom '
'property "%s".') % data['key']
exceptions.handle(request, msg)
class EditProperty(forms.SelfHandlingForm):
key = forms.CharField(widget=forms.widgets.HiddenInput)
value = forms.CharField(label=_("Value"))
def handle(self, request, data):
try:
api.glance.image_update_properties(request,
self.initial['image_id'],
**{data['key']: convert_value(data['key'], data['value'])})
msg = _('Saved custom property "%s".') % data['key']
messages.success(request, msg)
return True
except exc.HTTPForbidden:
msg = _('Unable to edit image custom property. Property "%s" '
'is read only.') % data['key']
exceptions.handle(request, msg)
except Exception:
msg = _('Unable to edit image custom '
'property "%s".') % data['key']
exceptions.handle(request, msg)
|
jumpstarter-io/horizon
|
openstack_dashboard/dashboards/admin/images/properties/forms.py
|
Python
|
apache-2.0
| 3,269
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.rbac_policies \
import forms as rbac_policy_forms
from openstack_dashboard.dashboards.admin.rbac_policies \
import tables as rbac_policy_tables
from openstack_dashboard.dashboards.admin.rbac_policies \
import tabs as rbac_policy_tabs
class IndexView(tables.DataTableView):
table_class = rbac_policy_tables.RBACPoliciesTable
page_title = _("RBAC Policies")
@memoized.memoized_method
def _get_tenants(self):
try:
tenants, has_more = api.keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _("Unable to retrieve information about the "
"policies' projects.")
exceptions.handle(self.request, msg)
tenant_dict = OrderedDict([(t.id, t.name) for t in tenants])
return tenant_dict
def _get_networks(self):
try:
networks = api.neutron.network_list(self.request)
except Exception:
networks = []
msg = _("Unable to retrieve information about the "
"policies' networks.")
exceptions.handle(self.request, msg)
return dict((n.id, n.name) for n in networks)
def _get_qos_policies(self):
qos_policies = []
try:
if api.neutron.is_extension_supported(self.request,
extension_alias='qos'):
qos_policies = api.neutron.policy_list(self.request)
except Exception:
msg = _("Unable to retrieve information about the "
"policies' qos policies.")
exceptions.handle(self.request, msg)
return dict((q.id, q.name) for q in qos_policies)
def get_data(self):
try:
rbac_policies = api.neutron.rbac_policy_list(self.request)
except Exception:
rbac_policies = []
messages.error(self.request,
_("Unable to retrieve RBAC policies."))
if rbac_policies:
tenant_dict = self._get_tenants()
network_dict = self._get_networks()
qos_policy_dict = self._get_qos_policies()
for p in rbac_policies:
# Set tenant name and object name
p.tenant_name = tenant_dict.get(p.tenant_id, p.tenant_id)
p.target_tenant_name = tenant_dict.get(p.target_tenant,
p.target_tenant)
if p.object_type == "network":
p.object_name = network_dict.get(p.object_id, p.object_id)
elif p.object_type == "qos_policy":
p.object_name = qos_policy_dict.get(p.object_id,
p.object_id)
return rbac_policies
class CreateView(forms.ModalFormView):
template_name = 'admin/rbac_policies/create.html'
form_id = "create_rbac_policy_form"
form_class = rbac_policy_forms.CreatePolicyForm
submit_label = _("Create RBAC Policy")
submit_url = reverse_lazy("horizon:admin:rbac_policies:create")
success_url = reverse_lazy("horizon:admin:rbac_policies:index")
page_title = _("Create A RBAC Policy")
class UpdateView(forms.ModalFormView):
context_object_name = 'rbac_policies'
template_name = 'admin/rbac_policies/update.html'
form_class = rbac_policy_forms.UpdatePolicyForm
form_id = "update_rbac_policy_form"
submit_label = _("Save Changes")
submit_url = 'horizon:admin:rbac_policies:update'
success_url = reverse_lazy('horizon:admin:rbac_policies:index')
page_title = _("Update RBAC Policy")
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
args = (self.kwargs['rbac_policy_id'],)
context["rbac_policy_id"] = self.kwargs['rbac_policy_id']
context["submit_url"] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
rbac_policy_id = self.kwargs['rbac_policy_id']
try:
return api.neutron.rbac_policy_get(self.request, rbac_policy_id)
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve rbac policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
rbac_policy = self._get_object()
return {'rbac_policy_id': rbac_policy['id'],
'target_tenant': rbac_policy['target_tenant']}
class DetailView(tabs.TabView):
tab_group_class = rbac_policy_tabs.RBACDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ rbac_policy.id }}"
|
NeCTAR-RC/horizon
|
openstack_dashboard/dashboards/admin/rbac_policies/views.py
|
Python
|
apache-2.0
| 5,711
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.objects import base as obj_base
from nova.objects import fields
EVENT_NAMES = [
# Network has changed for this instance, rebuild info_cache
'network-changed',
# VIF plugging notifications, tag is port_id
'network-vif-plugged',
'network-vif-unplugged',
]
EVENT_STATUSES = ['failed', 'completed', 'in-progress']
# TODO(berrange): Remove NovaObjectDictCompat
class InstanceExternalEvent(obj_base.NovaObject,
obj_base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Supports network-changed and vif-plugged
VERSION = '1.0'
fields = {
'instance_uuid': fields.UUIDField(),
'name': fields.EnumField(valid_values=EVENT_NAMES),
'status': fields.StringField(),
'tag': fields.StringField(nullable=True),
'data': fields.DictOfStringsField(),
}
@staticmethod
def make_key(name, tag=None):
if tag is not None:
return '%s-%s' % (name, tag)
else:
return name
@property
def key(self):
return self.make_key(self.name, self.tag)
|
thomasem/nova
|
nova/objects/external_event.py
|
Python
|
apache-2.0
| 1,735
|
# -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,base64
from resources.lib.libraries import client
from resources.lib.libraries import jsunpack
def resolve(url):
try:
page = re.compile('//.+?/(?:embed|v)/([0-9a-zA-Z-_]+)').findall(url)[0]
page = 'http://sawlive.tv/embed/%s' % page
try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
except: referer = page
result = client.request(page, referer=referer)
unpacked = ''
packed = result.split('\n')
for i in packed:
try: unpacked += jsunpack.unpack(i)
except: pass
result += unpacked
result = urllib.unquote_plus(result)
result = re.compile('<iframe(.+?)</iframe>').findall(result)[-1]
url = re.compile('src\s*=\s*[\'|\"](.+?)[\'|\"].+?[\'|\"](.+?)[\'|\"]').findall(result)[0]
url = '/'.join(url)
result = client.request(url, referer=referer)
strm = re.compile("'streamer'.+?'(.+?)'").findall(result)[0]
file = re.compile("'file'.+?'(.+?)'").findall(result)[0]
swf = re.compile("SWFObject\('(.+?)'").findall(result)[0]
url = '%s playpath=%s swfUrl=%s pageUrl=%s live=1 timeout=30' % (strm, file, swf, url)
return url
except:
return
|
mrknow/filmkodi
|
plugin.video.fanfilm/resources/lib/resolvers/sawlive.py
|
Python
|
apache-2.0
| 2,023
|
import pwsimple
# Midas volume rendering ParaviewWeb plugin
# Initialize the volume rendering state
def InitViewState (cameraFocalPoint, cameraPosition, colorArrayName, colorMap, sofPoints, viewSize):
if type(colorArrayName) is unicode:
colorArrayName = colorArrayName.encode('ascii', 'ignore')
activeView = pwsimple.CreateIfNeededRenderView()
activeView.CameraFocalPoint = cameraFocalPoint
activeView.CameraPosition = cameraPosition
activeView.CameraViewUp = [0.0, 0.0, 1.0]
activeView.CameraParallelProjection = False
activeView.CenterOfRotation = activeView.CameraFocalPoint
activeView.Background = [0.0, 0.0, 0.0]
activeView.Background2 = [0.0, 0.0, 0.0]
activeView.ViewSize = viewSize
lookupTable = pwsimple.GetLookupTableForArray(colorArrayName, 1)
lookupTable.RGBPoints = colorMap
lookupTable.ScalarRangeInitialized = 1.0
lookupTable.ColorSpace = 0 # 0 corresponds to RGB
# Initial scalar opacity function
sof = pwsimple.CreatePiecewiseFunction()
sof.Points = sofPoints
dataRep = pwsimple.Show()
dataRep.ScalarOpacityFunction = sof
dataRep.Representation = 'Volume'
dataRep.ColorArrayName = colorArrayName
dataRep.LookupTable = lookupTable
retVal = {}
retVal['sof'] = sof
retVal['lookupTable'] = lookupTable
retVal['activeView'] = activeView
return retVal
# Extract a subgrid of the source
def ExtractSubgrid (source, bounds, lookupTable, sof, colorArrayName, toHide):
pwsimple.SetActiveSource(source)
subgrid = pwsimple.ExtractSubset()
subgrid.VOI = bounds
pwsimple.SetActiveSource(subgrid)
if type(colorArrayName) is unicode:
colorArrayName = colorArrayName.encode('ascii', 'ignore')
dataRep = pwsimple.Show()
dataRep.ScalarOpacityFunction = sof
dataRep.Representation = 'Volume'
dataRep.SelectionPointFieldDataArrayName = colorArrayName
dataRep.ColorArrayName = colorArrayName
dataRep.LookupTable = lookupTable
pwsimple.SetActiveSource(source)
pwsimple.Hide(source)
if toHide:
pwsimple.Hide(toHide)
pwsimple.SetActiveSource(subgrid)
return subgrid
|
jcfr/Midas
|
modules/visualize/python/pvw-plugins/midasvr.py
|
Python
|
apache-2.0
| 2,091
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
class TrtConvertGroupNormTest(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self):
def generate_input(attrs: List[Dict[str, Any]], batch):
if attrs[0]['data_layout'] == 'NCHW':
return np.random.random([batch, 32, 64, 64]).astype(np.float32)
else:
return np.random.random([batch, 64, 64, 32]).astype(np.float32)
def generate_scale():
return np.random.randn(32).astype(np.float32)
def generate_bias():
return np.random.randn(32).astype(np.float32)
for batch in [1, 2, 4]:
for group in [1, 4, 32]:
for epsilon in [0.1, 0.7]:
for data_layout in ['NCHW', 'NHWC']:
for i in [0, 1]:
dics = [{
"epsilon": epsilon,
"groups": group,
"data_layout": data_layout
}, {
"groups": group,
"data_layout": data_layout
}]
ops_config = [{
"op_type": "group_norm",
"op_inputs": {
"X": ["input_data"],
"Scale": ["scale_weight"],
"Bias": ["bias_weight"]
},
"op_outputs": {
"Y": ["y_output"],
"Mean": ["mean_output"],
"Variance": ["variance_output"]
},
"op_attrs": dics[i]
}]
ops = self.generate_op_config(ops_config)
program_config = ProgramConfig(
ops=ops,
weights={
"scale_weight": TensorConfig(
data_gen=partial(generate_scale)),
"bias_weight": TensorConfig(
data_gen=partial(generate_bias))
},
inputs={
"input_data": TensorConfig(data_gen=partial(
generate_input, dics, batch))
},
outputs=["y_output"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 16, 32, 32]}
self.dynamic_shape.max_input_shape = {
"input_data": [4, 64, 128, 64]
}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 32, 64, 64]}
def clear_dynamic_shape():
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape):
if len(attrs[0]) == 3:
if dynamic_shape:
return 1, 2
else:
return 0, 3
else:
return 0, 3
attrs = [
program_config.ops[i].attrs
for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), (1e-5, 1e-5)
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), (1e-5, 1e-5)
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), (1e-5, 1e-5)
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), (1e-5, 1e-5)
def add_skip_trt_case(self):
def teller1(program_config, predictor_config):
if len(self.dynamic_shape.min_input_shape) != 0:
return True
return False
self.add_skip_case(
teller1, SkipReasons.TRT_NOT_IMPLEMENTED,
"The goup_norm plugin will check dim not -1 failed when dynamic fp16 mode."
)
def test(self):
self.add_skip_trt_case()
self.run_test()
if __name__ == "__main__":
unittest.main()
|
PaddlePaddle/Paddle
|
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_group_norm.py
|
Python
|
apache-2.0
| 6,063
|
# Copyright 2015 Huawei Technologies India Pvt Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from neutronclient.neutron.v2_0.qos import rule as qos_rule
from neutronclient.tests.unit import test_cli20
class CLITestV20QoSRuleJSON(test_cli20.CLITestV20Base):
non_admin_status_resources = ['bandwidth_limit_rule', 'dscp_marking_rule']
def setUp(self):
super(CLITestV20QoSRuleJSON, self).setUp()
def test_list_qos_rule_types(self):
# qos_rule_types.
resources = 'rule_types'
cmd_resources = 'qos_rule_types'
response_contents = [{'type': 'bandwidth_limit',
'type': 'dscp_marking'}]
cmd = qos_rule.ListQoSRuleTypes(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd, True,
response_contents=response_contents,
cmd_resources=cmd_resources)
|
eayunstack/python-neutronclient
|
neutronclient/tests/unit/qos/test_cli20_rule.py
|
Python
|
apache-2.0
| 1,542
|
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import base64
import copy
import json
import os
import sys
import threading
BOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(
0,
os.path.join(os.path.dirname(BOT_DIR), '..', '..', '..', 'client', 'tests'))
import httpserver
sys.path.pop(0)
sys.path.insert(0, os.path.join(os.path.dirname(BOT_DIR), 'server'))
import bot_archive
sys.path.pop(0)
def gen_zip(url):
"""Returns swarming_bot.zip content."""
with open(os.path.join(BOT_DIR, 'config', 'bot_config.py'), 'rb') as f:
bot_config_content = f.read()
return bot_archive.get_swarming_bot_zip(
BOT_DIR, url, '1', {'config/bot_config.py': bot_config_content}, None)
def flatten_task_updates(updates):
"""Flatten a list of task updates into a single result.
This is more or less the equivalent of what task_scheduler.bot_update_task()
would do after all the bot API calls.
"""
out = {}
for update in updates:
if out.get('output') and update.get('output'):
# Accumulate output.
update = update.copy()
out['output'] = base64.b64encode(
base64.b64decode(out['output']) +
base64.b64decode(update.pop('output')))
update.pop('output_chunk_start')
out.update(update)
return out
class Handler(httpserver.Handler):
"""Minimal Swarming bot server fake implementation."""
def do_GET(self):
if self.path == '/swarming/api/v1/bot/server_ping':
self.send_response(200)
self.end_headers()
return None
if self.path == '/auth/api/v1/server/oauth_config':
return self.send_json({
'client_id': 'id',
'client_not_so_secret': 'hunter2',
'primary_url': self.server.url,
})
raise NotImplementedError(self.path)
def do_POST(self):
data = json.loads(self.read_body())
if self.path == '/auth/api/v1/accounts/self/xsrf_token':
return self.send_json({'xsrf_token': 'a'})
if self.path == '/swarming/api/v1/bot/event':
self.server.parent._add_bot_event(data)
return self.send_json({})
if self.path == '/swarming/api/v1/bot/handshake':
return self.send_json({'xsrf_token': 'fine'})
if self.path == '/swarming/api/v1/bot/poll':
self.server.parent.has_polled.set()
return self.send_json({'cmd': 'sleep', 'duration': 60})
if self.path.startswith('/swarming/api/v1/bot/task_update/'):
task_id = self.path[len('/swarming/api/v1/bot/task_update/'):]
must_stop = self.server.parent._on_task_update(task_id, data)
return self.send_json({'ok': True, 'must_stop': must_stop})
if self.path.startswith('/swarming/api/v1/bot/task_error'):
task_id = self.path[len('/swarming/api/v1/bot/task_error/'):]
self.server.parent._add_task_error(task_id, data)
return self.send_json({'resp': 1})
raise NotImplementedError(self.path)
def do_PUT(self):
raise NotImplementedError(self.path)
class Server(httpserver.Server):
"""Fake a Swarming bot API server for local testing."""
_HANDLER_CLS = Handler
def __init__(self):
super(Server, self).__init__()
self._lock = threading.Lock()
# Accumulated bot events.
self._bot_events = []
# Running tasks.
self._tasks = {}
# Bot reported task errors.
self._task_errors = {}
self.has_polled = threading.Event()
self.has_updated_task = threading.Event()
self.must_stop = False
def get_bot_events(self):
"""Returns the events reported by the bots."""
with self._lock:
return self._bot_events[:]
def get_tasks(self):
"""Returns the tasks run by the bots."""
with self._lock:
return copy.deepcopy(self._tasks)
def get_task_errors(self):
"""Returns the task errors reported by the bots."""
with self._lock:
return self._task_errors.copy()
def _add_bot_event(self, data):
# Used by the handler.
with self._lock:
self._bot_events.append(data)
def _on_task_update(self, task_id, data):
with self._lock:
self._tasks.setdefault(task_id, []).append(data)
must_stop = self.must_stop
self.has_updated_task.set()
return must_stop
def _add_task_error(self, task_id, data):
# Used by the handler.
with self._lock:
self._task_errors.setdefault(task_id, []).append(data)
|
luci/luci-py
|
appengine/swarming/swarming_bot/swarmingserver_bot_fake.py
|
Python
|
apache-2.0
| 4,446
|
# Copyright (c) 2020 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
from copy import deepcopy
import time
from unittest import mock
import six
from cinder import exception
from cinder.objects import fields
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import test
from cinder.tests.unit.volume.drivers.dell_emc.powermax import (
powermax_data as tpd)
from cinder.tests.unit.volume.drivers.dell_emc.powermax import (
powermax_fake_objects as tpfo)
from cinder.volume.drivers.dell_emc.powermax import common
from cinder.volume.drivers.dell_emc.powermax import fc
from cinder.volume.drivers.dell_emc.powermax import masking
from cinder.volume.drivers.dell_emc.powermax import metadata
from cinder.volume.drivers.dell_emc.powermax import provision
from cinder.volume.drivers.dell_emc.powermax import rest
from cinder.volume.drivers.dell_emc.powermax import utils
from cinder.volume import volume_utils
class PowerMaxCommonTest(test.TestCase):
def setUp(self):
self.data = tpd.PowerMaxData()
super(PowerMaxCommonTest, self).setUp()
self.mock_object(volume_utils, 'get_max_over_subscription_ratio',
return_value=1.0)
replication_device = self.data.sync_rep_device
configuration = tpfo.FakeConfiguration(
emc_file=None, volume_backend_name='CommonTests', interval=1,
retries=1, san_ip='1.1.1.1', san_login='smc',
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc', san_api_port=8443,
powermax_port_groups=[self.data.port_group_name_f],
powermax_port_group_name_template='portGroupName',
replication_device=replication_device)
rest.PowerMaxRest._establish_rest_session = mock.Mock(
return_value=tpfo.FakeRequestsSession())
driver = fc.PowerMaxFCDriver(configuration=configuration)
self.driver = driver
self.common = self.driver.common
self.masking = self.common.masking
self.provision = self.common.provision
self.rest = self.common.rest
self.utils = self.common.utils
self.utils.get_volumetype_extra_specs = (
mock.Mock(return_value=self.data.vol_type_extra_specs))
self.rest.is_snap_id = True
@mock.patch.object(rest.PowerMaxRest, 'get_array_ucode_version',
return_value=tpd.PowerMaxData.next_gen_ucode)
@mock.patch.object(rest.PowerMaxRest, 'get_array_model_info',
return_value=('PowerMax 2000', True))
@mock.patch.object(rest.PowerMaxRest, 'set_rest_credentials')
@mock.patch.object(common.PowerMaxCommon, '_get_slo_workload_combinations',
return_value=[])
@mock.patch.object(common.PowerMaxCommon,
'get_attributes_from_cinder_config',
side_effect=[[], tpd.PowerMaxData.array_info_wl])
def test_gather_info_tests(self, mck_parse, mck_combo, mck_rest,
mck_nextgen, mck_ucode):
# Use-Case 1: Gather info no-opts
configuration = tpfo.FakeConfiguration(
None, 'config_group', None, None)
fc.PowerMaxFCDriver(configuration=configuration)
# Use-Case 2: Gather info next-gen with ucode/version
self.common._gather_info()
self.assertTrue(self.common.next_gen)
self.assertEqual(self.common.ucode_level, self.data.next_gen_ucode)
@mock.patch.object(rest.PowerMaxRest, 'get_array_ucode_version',
return_value=tpd.PowerMaxData.next_gen_ucode)
@mock.patch.object(rest.PowerMaxRest, 'get_array_model_info',
return_value=('PowerMax 2000', True))
@mock.patch.object(rest.PowerMaxRest, 'set_rest_credentials')
@mock.patch.object(
common.PowerMaxCommon, 'get_attributes_from_cinder_config',
return_value={'SerialNumber': tpd.PowerMaxData.array})
@mock.patch.object(
common.PowerMaxCommon, '_get_attributes_from_config')
def test_gather_info_rep_enabled_duplicate_serial_numbers(
self, mck_get_cnf, mck_get_c_cnf, mck_set, mck_model, mck_ucode):
is_enabled = self.common.replication_enabled
targets = self.common.replication_targets
self.common.replication_enabled = True
self.common.replication_targets = [self.data.array]
self.assertRaises(
exception.InvalidConfigurationValue, self.common._gather_info)
self.common.replication_enabled = is_enabled
self.common.replication_targets = targets
@mock.patch.object(common.PowerMaxCommon,
'_gather_info')
def test_get_attributes_from_config_short_host_template(
self, mock_gather):
configuration = tpfo.FakeConfiguration(
emc_file=None, volume_backend_name='config_group', interval='10',
retries='10', replication_device=None,
powermax_short_host_name_template='shortHostName')
driver = fc.PowerMaxFCDriver(configuration=configuration)
driver.common._get_attributes_from_config()
self.assertEqual(
'shortHostName', driver.common.powermax_short_host_name_template)
@mock.patch.object(common.PowerMaxCommon,
'_gather_info')
def test_get_attributes_from_config_no_short_host_template(
self, mock_gather):
configuration = tpfo.FakeConfiguration(
emc_file=None, volume_backend_name='config_group', interval='10',
retries='10', replication_device=None)
driver = fc.PowerMaxFCDriver(configuration=configuration)
driver.common._get_attributes_from_config()
self.assertIsNone(driver.common.powermax_short_host_name_template)
@mock.patch.object(common.PowerMaxCommon,
'_gather_info')
def test_get_attributes_from_config_port_group_template(
self, mock_gather):
configuration = tpfo.FakeConfiguration(
emc_file=None, volume_backend_name='config_group', interval='10',
retries='10', replication_device=None,
powermax_port_group_name_template='portGroupName')
driver = fc.PowerMaxFCDriver(configuration=configuration)
driver.common._get_attributes_from_config()
self.assertEqual(
'portGroupName', driver.common.powermax_port_group_name_template)
@mock.patch.object(common.PowerMaxCommon,
'_gather_info')
def test_get_attributes_from_config_no_port_group_template(
self, mock_gather):
configuration = tpfo.FakeConfiguration(
emc_file=None, volume_backend_name='config_group', interval='10',
retries='10', replication_device=None)
driver = fc.PowerMaxFCDriver(configuration=configuration)
driver.common._get_attributes_from_config()
self.assertIsNone(driver.common.powermax_port_group_name_template)
def test_get_slo_workload_combinations_powermax(self):
self.common.next_gen = True
self.common.array_model = 'PowerMax_2000'
array_info = {}
pools = self.common._get_slo_workload_combinations(array_info)
self.assertTrue(len(pools) == 24)
def test_get_slo_workload_combinations_afa_powermax(self):
self.common.next_gen = True
self.common.array_model = 'VMAX250F'
array_info = {}
pools = self.common._get_slo_workload_combinations(array_info)
self.assertTrue(len(pools) == 28)
def test_get_slo_workload_combinations_afa_hypermax(self):
self.common.next_gen = False
self.common.array_model = 'VMAX250F'
array_info = {}
pools = self.common._get_slo_workload_combinations(array_info)
self.assertTrue(len(pools) == 16)
def test_get_slo_workload_combinations_hybrid(self):
self.common.next_gen = False
self.common.array_model = 'VMAX100K'
array_info = {}
pools = self.common._get_slo_workload_combinations(array_info)
self.assertTrue(len(pools) == 44)
def test_get_slo_workload_combinations_failed(self):
self.common.array_model = 'xxxxxx'
array_info = {}
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._get_slo_workload_combinations, array_info)
@mock.patch.object(
common.PowerMaxCommon, 'get_volume_metadata',
return_value={'device-meta-key-1': 'device-meta-value-1',
'device-meta-key-2': 'device-meta-value-2'})
def test_create_volume(self, mck_meta):
ref_model_update = (
{'provider_location': six.text_type(self.data.provider_location),
'metadata': {'device-meta-key-1': 'device-meta-value-1',
'device-meta-key-2': 'device-meta-value-2',
'user-meta-key-1': 'user-meta-value-1',
'user-meta-key-2': 'user-meta-value-2'}})
volume = deepcopy(self.data.test_volume)
volume.metadata = {'user-meta-key-1': 'user-meta-value-1',
'user-meta-key-2': 'user-meta-value-2'}
model_update = self.common.create_volume(volume)
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata',
return_value=tpd.PowerMaxData.volume_metadata)
def test_create_volume_qos(self, mck_meta):
ref_model_update = (
{'provider_location': six.text_type(self.data.provider_location),
'metadata': self.data.volume_metadata})
extra_specs = deepcopy(self.data.extra_specs_intervals_set)
extra_specs['qos'] = {
'total_iops_sec': '4000', 'DistributionType': 'Always'}
with mock.patch.object(self.utils, 'get_volumetype_extra_specs',
return_value=extra_specs):
model_update = self.common.create_volume(self.data.test_volume)
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
@mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata',
return_value='')
def test_create_volume_from_snapshot(self, mck_meta, mck_cleanup_snaps):
ref_model_update = ({'provider_location': six.text_type(
deepcopy(self.data.provider_location_snapshot))})
model_update = self.common.create_volume_from_snapshot(
self.data.test_clone_volume, self.data.test_snapshot)
self.assertEqual(
ast.literal_eval(ref_model_update['provider_location']),
ast.literal_eval(model_update['provider_location']))
# Test from legacy snapshot
ref_model_update = (
{'provider_location': six.text_type(
deepcopy(self.data.provider_location_clone))})
model_update = self.common.create_volume_from_snapshot(
self.data.test_clone_volume, self.data.test_legacy_snapshot)
self.assertEqual(
ast.literal_eval(ref_model_update['provider_location']),
ast.literal_eval(model_update['provider_location']))
@mock.patch.object(common.PowerMaxCommon, 'gather_replication_updates',
return_value=(tpd.PowerMaxData.replication_update,
tpd.PowerMaxData.rep_info_dict))
@mock.patch.object(common.PowerMaxCommon, 'srdf_protect_storage_group')
@mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg',
return_value=tpd.PowerMaxData.volume_create_info_dict)
@mock.patch.object(common.PowerMaxCommon, 'prepare_replication_details',
return_value=(True, tpd.PowerMaxData.rep_extra_specs5,
tpd.PowerMaxData.rep_info_dict, True))
def test_create_replication_enabled_volume_first_volume(
self, mck_prep, mck_create, mck_protect, mck_updates):
array = self.data.array
volume = self.data.test_volume
volume_name = volume.name
volume_size = volume.size
rep_extra_specs = self.data.rep_extra_specs
rep_extra_specs5 = self.data.rep_extra_specs5
storagegroup_name = self.data.storagegroup_name_f
rep_info_dict = self.data.rep_info_dict
rep_vol = deepcopy(self.data.volume_create_info_dict)
rep_vol.update({'device_uuid': volume_name,
'storage_group': storagegroup_name,
'size': volume_size})
vol, update, info = self.common._create_replication_enabled_volume(
array, volume, volume_name, volume_size, rep_extra_specs,
storagegroup_name, rep_extra_specs['rep_mode'])
mck_prep.assert_called_once_with(self.data.rep_extra_specs)
mck_create.assert_called_once_with(
array, volume_name, storagegroup_name, volume_size,
rep_extra_specs, rep_info_dict)
mck_protect.assert_called_once_with(
rep_extra_specs, rep_extra_specs5, rep_vol)
rep_vol.update({'remote_device_id': self.data.device_id2})
mck_updates.assert_called_once_with(
rep_extra_specs, rep_extra_specs5, rep_vol)
self.assertEqual(self.data.volume_create_info_dict, vol)
self.assertEqual(self.data.replication_update, update)
self.assertEqual(self.data.rep_info_dict, info)
@mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status')
@mock.patch.object(common.PowerMaxCommon, 'gather_replication_updates',
return_value=(tpd.PowerMaxData.replication_update,
tpd.PowerMaxData.rep_info_dict))
@mock.patch.object(common.PowerMaxCommon, 'srdf_protect_storage_group')
@mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg',
return_value=tpd.PowerMaxData.volume_create_info_dict)
@mock.patch.object(common.PowerMaxCommon, 'prepare_replication_details',
side_effect=((False, '', '', True),
('', tpd.PowerMaxData.rep_extra_specs5,
tpd.PowerMaxData.rep_info_dict, '')))
def test_create_replication_enabled_volume_not_first_volume(
self, mck_prepare, mck_create, mck_protect, mck_updates,
mck_valid):
array = self.data.array
volume = self.data.test_volume
volume_name = volume.name
volume_size = volume.size
rep_extra_specs = self.data.rep_extra_specs
rep_extra_specs5 = self.data.rep_extra_specs5
storagegroup_name = self.data.storagegroup_name_f
rep_info_dict = self.data.rep_info_dict
rep_vol = deepcopy(self.data.volume_create_info_dict)
rep_vol.update({'device_uuid': volume_name,
'storage_group': storagegroup_name,
'size': volume_size})
vol, update, info = self.common._create_replication_enabled_volume(
array, volume, volume_name, volume_size, rep_extra_specs,
storagegroup_name, rep_extra_specs['rep_mode'])
self.assertEqual(2, mck_prepare.call_count)
mck_create.assert_called_once_with(
array, volume_name, storagegroup_name, volume_size,
rep_extra_specs, rep_info_dict)
mck_protect.assert_not_called()
mck_valid.assert_called_once_with(array, rep_extra_specs)
rep_vol.update({'remote_device_id': self.data.device_id2})
mck_updates.assert_called_once_with(
rep_extra_specs, rep_extra_specs5, rep_vol)
self.assertEqual(self.data.volume_create_info_dict, vol)
self.assertEqual(self.data.replication_update, update)
self.assertEqual(self.data.rep_info_dict, info)
@mock.patch.object(common.PowerMaxCommon, 'gather_replication_updates',
return_value=(tpd.PowerMaxData.replication_update,
tpd.PowerMaxData.rep_info_dict))
@mock.patch.object(common.PowerMaxCommon, 'get_and_set_remote_device_uuid',
return_value=tpd.PowerMaxData.device_id2)
@mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication')
@mock.patch.object(
common.PowerMaxCommon, 'configure_volume_replication',
return_value=(None, None, None, tpd.PowerMaxData.rep_extra_specs_mgmt,
True))
@mock.patch.object(common.PowerMaxCommon, 'srdf_protect_storage_group')
@mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg',
return_value=tpd.PowerMaxData.volume_create_info_dict)
@mock.patch.object(common.PowerMaxCommon, 'prepare_replication_details',
return_value=(True, {}, {}, False))
def test_create_replication_enabled_volume_not_first_rdfg_volume(
self, mck_prepare, mck_create, mck_protect, mck_configure,
mck_resume, mck_get_set, mck_updates):
array = self.data.array
volume = self.data.test_volume
volume_name = volume.name
volume_size = volume.size
rep_extra_specs = self.data.rep_extra_specs
storagegroup_name = self.data.storagegroup_name_f
self.common._create_replication_enabled_volume(
array, volume, volume_name, volume_size, rep_extra_specs,
storagegroup_name, rep_extra_specs['rep_mode'])
mck_prepare.assert_called_once()
mck_protect.assert_not_called()
mck_configure.assert_called_once()
mck_resume.assert_called_once()
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
@mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata',
return_value='')
def test_cloned_volume(self, mck_meta, mck_cleanup_snaps):
array = self.data.array
test_volume = self.data.test_clone_volume
source_device_id = self.data.device_id
extra_specs = self.common._initial_setup(test_volume)
ref_model_update = ({'provider_location': six.text_type(
self.data.provider_location_clone)})
model_update = self.common.create_cloned_volume(
self.data.test_clone_volume, self.data.test_volume)
self.assertEqual(
ast.literal_eval(ref_model_update['provider_location']),
ast.literal_eval(model_update['provider_location']))
mck_cleanup_snaps.assert_called_once_with(
array, source_device_id, extra_specs)
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list',
return_value=list())
def test_delete_volume(self, mck_get_snaps):
with mock.patch.object(self.common, '_delete_volume') as mock_delete:
self.common.delete_volume(self.data.test_volume)
mock_delete.assert_called_once_with(self.data.test_volume)
@mock.patch.object(common.PowerMaxCommon, '_delete_from_srp')
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snap_info',
return_value=tpd.PowerMaxData.volume_snap_vx)
def test_delete_volume_fail_if_active_snapshots(
self, mck_get_snaps, mck_cleanup, mck_delete):
array = self.data.array
test_volume = self.data.test_volume
device_id = self.data.device_id
extra_specs = self.common._initial_setup(test_volume)
self.assertRaises(exception.VolumeBackendAPIException,
self.common._delete_volume, test_volume)
mck_cleanup.assert_called_once_with(array, device_id, extra_specs)
mck_delete.assert_not_called()
@mock.patch.object(common.PowerMaxCommon, '_delete_from_srp')
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
@mock.patch.object(
rest.PowerMaxRest, 'find_snap_vx_sessions',
return_value=('', tpd.PowerMaxData.snap_tgt_session_cm_enabled))
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list',
return_value=list())
def test_delete_volume_fail_if_snapvx_target(
self, mck_get_snaps, mck_tgt_snap, mck_cleanup, mck_delete):
array = self.data.array
test_volume = self.data.test_volume
device_id = self.data.device_id
extra_specs = self.common._initial_setup(test_volume)
self.assertRaises(exception.VolumeBackendAPIException,
self.common._delete_volume, test_volume)
mck_cleanup.assert_called_once_with(array, device_id, extra_specs)
mck_delete.assert_not_called()
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
@mock.patch.object(
common.PowerMaxCommon, 'get_snapshot_metadata',
return_value={'snap-meta-key-1': 'snap-meta-value-1',
'snap-meta-key-2': 'snap-meta-value-2'})
def test_create_snapshot(self, mck_meta, mck_cleanup_snaps):
ref_model_update = (
{'provider_location': six.text_type(self.data.snap_location),
'metadata': {'snap-meta-key-1': 'snap-meta-value-1',
'snap-meta-key-2': 'snap-meta-value-2',
'user-meta-key-1': 'user-meta-value-1',
'user-meta-key-2': 'user-meta-value-2'}})
snapshot = deepcopy(self.data.test_snapshot_manage)
snapshot.metadata = {'user-meta-key-1': 'user-meta-value-1',
'user-meta-key-2': 'user-meta-value-2'}
model_update = self.common.create_snapshot(
snapshot, self.data.test_volume)
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(
common.PowerMaxCommon, '_parse_snap_info',
return_value=(tpd.PowerMaxData.device_id,
tpd.PowerMaxData.snap_location['snap_name'],
[tpd.PowerMaxData.snap_id]))
def test_delete_snapshot(self, mock_parse):
snap_name = self.data.snap_location['snap_name']
sourcedevice_id = self.data.snap_location['source_id']
with mock.patch.object(
self.provision, 'delete_volume_snap') as mock_delete_snap:
self.common.delete_snapshot(
self.data.test_snapshot, self.data.test_volume)
mock_delete_snap.assert_called_once_with(
self.data.array, snap_name, [sourcedevice_id],
self.data.snap_id, restored=False)
def test_delete_snapshot_not_found(self):
with mock.patch.object(self.common, '_parse_snap_info',
return_value=(None, 'Something', None)):
with mock.patch.object(
self.provision, 'delete_volume_snap') as mock_delete_snap:
self.common.delete_snapshot(self.data.test_snapshot,
self.data.test_volume)
mock_delete_snap.assert_not_called()
def test_delete_legacy_snap(self):
with mock.patch.object(self.common, '_delete_volume') as mock_del:
self.common.delete_snapshot(self.data.test_legacy_snapshot,
self.data.test_legacy_vol)
mock_del.assert_called_once_with(self.data.test_legacy_snapshot)
@mock.patch.object(masking.PowerMaxMasking,
'return_volume_to_fast_managed_group')
@mock.patch.object(masking.PowerMaxMasking, 'remove_and_reset_members')
def test_remove_members(self, mock_rm, mock_return):
array = self.data.array
device_id = self.data.device_id
volume = self.data.test_volume
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
self.common._remove_members(
array, volume, device_id, extra_specs, self.data.connector, False)
mock_rm.assert_called_once_with(
array, volume, device_id, volume_name,
extra_specs, True, self.data.connector, async_grp=None,
host_template=None)
@mock.patch.object(masking.PowerMaxMasking,
'return_volume_to_fast_managed_group')
@mock.patch.object(masking.PowerMaxMasking, 'remove_and_reset_members')
def test_remove_members_multiattach_case(self, mock_rm, mock_return):
array = self.data.array
device_id = self.data.device_id
volume = self.data.test_volume
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
self.common._remove_members(
array, volume, device_id, extra_specs, self.data.connector, True)
mock_rm.assert_called_once_with(
array, volume, device_id, volume_name,
extra_specs, False, self.data.connector, async_grp=None,
host_template=None)
mock_return.assert_called_once()
def test_unmap_lun(self):
array = self.data.array
device_id = self.data.device_id
volume = self.data.test_volume
extra_specs = deepcopy(self.data.extra_specs_intervals_set)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
connector = self.data.connector
with mock.patch.object(self.common, '_remove_members') as mock_remove:
self.common._unmap_lun(volume, connector)
mock_remove.assert_called_once_with(
array, volume, device_id, extra_specs,
connector, False, async_grp=None, host_template=None)
def test_unmap_lun_force(self):
volume = self.data.test_volume
extra_specs = deepcopy(self.data.extra_specs_intervals_set)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
connector = deepcopy(self.data.connector)
del connector['host']
with mock.patch.object(
self.common.utils, 'get_host_short_name') as mock_host:
self.common._unmap_lun(volume, connector)
mock_host.assert_not_called()
@mock.patch.object(common.PowerMaxCommon, '_remove_members')
def test_unmap_lun_attachments(self, mock_rm):
volume1 = deepcopy(self.data.test_volume)
volume1.volume_attachment.objects = [self.data.test_volume_attachment]
connector = self.data.connector
self.common._unmap_lun(volume1, connector)
mock_rm.assert_called_once()
mock_rm.reset_mock()
volume2 = deepcopy(volume1)
volume2.volume_attachment.objects.append(
self.data.test_volume_attachment)
self.common._unmap_lun(volume2, connector)
mock_rm.assert_not_called()
def test_unmap_lun_qos(self):
array = self.data.array
device_id = self.data.device_id
volume = self.data.test_volume
extra_specs = deepcopy(self.data.extra_specs_intervals_set)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
extra_specs['qos'] = {
'total_iops_sec': '4000', 'DistributionType': 'Always'}
connector = self.data.connector
with mock.patch.object(self.common, '_remove_members') as mock_remove:
with mock.patch.object(self.utils, 'get_volumetype_extra_specs',
return_value=extra_specs):
self.common._unmap_lun(volume, connector)
mock_remove.assert_called_once_with(
array, volume, device_id, extra_specs,
connector, False, async_grp=None, host_template=None)
def test_unmap_lun_not_mapped(self):
volume = self.data.test_volume
connector = self.data.connector
with mock.patch.object(self.common, 'find_host_lun_id',
return_value=({}, False)):
with mock.patch.object(
self.common, '_remove_members') as mock_remove:
self.common._unmap_lun(volume, connector)
mock_remove.assert_not_called()
def test_unmap_lun_connector_is_none(self):
array = self.data.array
device_id = self.data.device_id
volume = self.data.test_volume
extra_specs = deepcopy(self.data.extra_specs_intervals_set)
extra_specs['storagetype:portgroupname'] = (
self.data.port_group_name_f)
with mock.patch.object(self.common, '_remove_members') as mock_remove:
self.common._unmap_lun(volume, None)
mock_remove.assert_called_once_with(
array, volume, device_id, extra_specs, None,
False, async_grp=None, host_template=None)
@mock.patch.object(metadata.PowerMaxVolumeMetadata, 'capture_detach_info')
@mock.patch.object(common.PowerMaxCommon, '_remove_members')
def test_unmap_lun_multiattach_prints_metadata(self, mck_remove, mck_info):
volume = deepcopy(self.data.test_volume)
connector = deepcopy(self.data.connector)
volume.volume_attachment.objects = [
deepcopy(self.data.test_volume_attachment),
deepcopy(self.data.test_volume_attachment)]
self.common._unmap_lun(volume, connector)
self.assertEqual(0, mck_remove.call_count)
self.assertEqual(1, mck_info.call_count)
@mock.patch.object(provision.PowerMaxProvision, 'verify_slo_workload')
@mock.patch.object(common.PowerMaxCommon, '_remove_members')
@mock.patch.object(common.PowerMaxCommon, 'find_host_lun_id',
return_value=(tpd.PowerMaxData.iscsi_device_info,
False))
@mock.patch.object(
common.PowerMaxCommon, '_get_replication_extra_specs',
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config)
@mock.patch.object(
common.PowerMaxCommon, '_initial_setup',
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config)
def test_unmap_lun_replication_force_flag(
self, mck_setup, mck_rep, mck_find, mck_rem, mck_slo):
volume = deepcopy(self.data.test_volume)
connector = deepcopy(self.data.connector)
device_info = self.data.provider_location['device_id']
volume.volume_attachment.objects = [
deepcopy(self.data.test_volume_attachment)]
extra_specs = deepcopy(self.data.rep_extra_specs_rep_config)
array = extra_specs[utils.ARRAY]
extra_specs[utils.FORCE_VOL_EDIT] = True
self.common._unmap_lun(volume, connector)
mck_rem.assert_called_once_with(array, volume, device_info,
extra_specs, connector, False,
async_grp=None, host_template=None)
@mock.patch.object(utils.PowerMaxUtils, 'is_metro_device',
return_value=True)
@mock.patch.object(provision.PowerMaxProvision, 'verify_slo_workload')
@mock.patch.object(common.PowerMaxCommon, '_remove_members')
@mock.patch.object(common.PowerMaxCommon, 'find_host_lun_id',
return_value=(tpd.PowerMaxData.iscsi_device_info,
False))
@mock.patch.object(
common.PowerMaxCommon, '_get_replication_extra_specs',
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config_metro)
@mock.patch.object(
common.PowerMaxCommon, '_initial_setup',
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config_metro)
def test_unmap_lun_replication_metro(
self, mck_setup, mck_rep, mck_find, mck_rem, mck_slo, mck_metro):
volume = deepcopy(self.data.test_volume)
connector = deepcopy(self.data.connector)
volume.volume_attachment.objects = [
deepcopy(self.data.test_volume_attachment)]
extra_specs = deepcopy(self.data.rep_extra_specs_rep_config)
extra_specs[utils.FORCE_VOL_EDIT] = True
self.common._unmap_lun(volume, connector)
self.assertEqual(2, mck_rem.call_count)
@mock.patch.object(utils.PowerMaxUtils, 'is_metro_device',
return_value=True)
@mock.patch.object(provision.PowerMaxProvision, 'verify_slo_workload')
@mock.patch.object(common.PowerMaxCommon, '_remove_members')
@mock.patch.object(common.PowerMaxCommon, 'find_host_lun_id',
return_value=(tpd.PowerMaxData.iscsi_device_info,
False))
@mock.patch.object(
common.PowerMaxCommon, '_get_replication_extra_specs',
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config_metro)
@mock.patch.object(
common.PowerMaxCommon, '_initial_setup',
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config_metro)
def test_unmap_lun_replication_metro_promotion(
self, mck_setup, mck_rep, mck_find, mck_rem, mck_slo, mck_metro):
volume = deepcopy(self.data.test_volume)
connector = deepcopy(self.data.connector)
volume.volume_attachment.objects = [
deepcopy(self.data.test_volume_attachment)]
extra_specs = deepcopy(self.data.rep_extra_specs_rep_config)
extra_specs[utils.FORCE_VOL_EDIT] = True
self.common.promotion = True
self.common._unmap_lun(volume, connector)
self.common.promotion = False
self.assertEqual(1, mck_rem.call_count)
@mock.patch.object(common.PowerMaxCommon, '_unmap_lun')
@mock.patch.object(metadata.PowerMaxVolumeMetadata, 'capture_detach_info')
def test_unmap_lun_promotion_non_replicated_volume(
self, mck_unmap, mck_info):
volume = deepcopy(self.data.test_volume)
connector = deepcopy(self.data.connector)
self.common._unmap_lun_promotion(volume, connector)
self.assertEqual(0, mck_unmap.call_count)
self.assertEqual(0, mck_info.call_count)
@mock.patch.object(common.PowerMaxCommon, '_unmap_lun')
@mock.patch.object(
common.PowerMaxCommon, '_initial_setup',
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config_metro)
def test_unmap_lun_promotion_replicated_metro_volume(
self, mck_setup, mck_unmap):
volume = deepcopy(self.data.test_rep_volume)
connector = deepcopy(self.data.connector)
self.common._unmap_lun_promotion(volume, connector)
mck_setup.assert_called_once_with(volume)
mck_unmap.assert_called_once_with(volume, connector)
@mock.patch.object(metadata.PowerMaxVolumeMetadata, 'capture_detach_info')
@mock.patch.object(
common.PowerMaxCommon, '_initial_setup',
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config)
def test_unmap_lun_promotion_replicated_non_metro_volume(
self, mck_setup, mck_capture):
volume = deepcopy(self.data.test_rep_volume)
connector = deepcopy(self.data.connector)
extra_specs = self.data.rep_extra_specs_rep_config
device_id = self.data.device_id
promotion_key = [utils.PMAX_FAILOVER_START_ARRAY_PROMOTION]
self.common._unmap_lun_promotion(volume, connector)
mck_setup.assert_called_once_with(volume)
mck_capture.assert_called_once_with(
volume, extra_specs, device_id, promotion_key, promotion_key)
def test_initialize_connection_already_mapped(self):
volume = self.data.test_volume
connector = self.data.connector
host_lun = (self.data.maskingview[0]['maskingViewConnection'][0][
'host_lun_address'])
ref_dict = {'hostlunid': int(host_lun, 16),
'maskingview': self.data.masking_view_name_f,
'array': self.data.array,
'device_id': self.data.device_id}
device_info_dict = self.common.initialize_connection(volume, connector)
self.assertEqual(ref_dict, device_info_dict)
def test_initialize_connection_setup_init_conn(self):
volume = self.data.test_volume
connector = self.data.connector
with mock.patch.object(
self.common, '_initial_setup',
side_effect=self.common._initial_setup) as mck_setup:
self.common.initialize_connection(volume, connector)
mck_setup.assert_called_once_with(volume, init_conn=True)
def test_initialize_connection_already_mapped_next_gen(self):
with mock.patch.object(self.rest, 'is_next_gen_array',
return_value=True):
volume = self.data.test_volume
connector = self.data.connector
host_lun = (self.data.maskingview[0]['maskingViewConnection'][0][
'host_lun_address'])
ref_dict = {'hostlunid': int(host_lun, 16),
'maskingview': self.data.masking_view_name_f,
'array': self.data.array,
'device_id': self.data.device_id}
device_info_dict = self.common.initialize_connection(volume,
connector)
self.assertEqual(ref_dict, device_info_dict)
@mock.patch.object(common.PowerMaxCommon, 'find_host_lun_id',
return_value=({}, False))
@mock.patch.object(
common.PowerMaxCommon, '_attach_volume',
return_value=({}, tpd.PowerMaxData.port_group_name_f))
def test_initialize_connection_not_mapped(self, mock_attach, mock_id):
volume = self.data.test_volume
connector = self.data.connector
extra_specs = deepcopy(self.data.extra_specs_intervals_set)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
masking_view_dict[utils.IS_MULTIATTACH] = False
device_info_dict = self.common.initialize_connection(
volume, connector)
self.assertEqual({}, device_info_dict)
mock_attach.assert_called_once_with(
volume, connector, extra_specs, masking_view_dict)
@mock.patch.object(rest.PowerMaxRest, 'is_next_gen_array',
return_value=True)
@mock.patch.object(common.PowerMaxCommon, 'find_host_lun_id',
return_value=({}, False))
@mock.patch.object(
common.PowerMaxCommon, '_attach_volume',
return_value=({}, tpd.PowerMaxData.port_group_name_f))
def test_initialize_connection_not_mapped_next_gen(self, mock_attach,
mock_id, mck_gen):
volume = self.data.test_volume
connector = self.data.connector
device_info_dict = self.common.initialize_connection(
volume, connector)
self.assertEqual({}, device_info_dict)
@mock.patch.object(
masking.PowerMaxMasking, 'pre_multiattach',
return_value=tpd.PowerMaxData.masking_view_dict_multiattach)
@mock.patch.object(common.PowerMaxCommon, 'find_host_lun_id',
return_value=({}, True))
@mock.patch.object(
common.PowerMaxCommon, '_attach_volume',
return_value=({}, tpd.PowerMaxData.port_group_name_f))
def test_initialize_connection_multiattach_case(
self, mock_attach, mock_id, mock_pre):
volume = self.data.test_volume
connector = self.data.connector
self.common.initialize_connection(volume, connector)
mock_attach.assert_called_once()
mock_pre.assert_called_once()
def test_attach_volume_success(self):
volume = self.data.test_volume
connector = self.data.connector
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
host_lun = (self.data.maskingview[0]['maskingViewConnection'][0][
'host_lun_address'])
ref_dict = {'hostlunid': int(host_lun, 16),
'maskingview': self.data.masking_view_name_f,
'array': self.data.array,
'device_id': self.data.device_id}
with mock.patch.object(self.masking, 'setup_masking_view',
return_value={
utils.PORTGROUPNAME:
self.data.port_group_name_f}):
device_info_dict, pg = self.common._attach_volume(
volume, connector, extra_specs, masking_view_dict)
self.assertEqual(ref_dict, device_info_dict)
@mock.patch.object(masking.PowerMaxMasking,
'check_if_rollback_action_for_masking_required')
@mock.patch.object(masking.PowerMaxMasking, 'setup_masking_view',
return_value={})
@mock.patch.object(common.PowerMaxCommon, 'find_host_lun_id',
return_value=({}, False))
def test_attach_volume_failed(self, mock_lun, mock_setup, mock_rollback):
volume = self.data.test_volume
connector = self.data.connector
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
self.assertRaises(exception.VolumeBackendAPIException,
self.common._attach_volume, volume,
connector, extra_specs, masking_view_dict)
device_id = self.data.device_id
(mock_rollback.assert_called_once_with(
self.data.array, volume, device_id, {}))
def test_terminate_connection(self):
volume = self.data.test_volume
connector = self.data.connector
with mock.patch.object(self.common, '_unmap_lun') as mock_unmap:
self.common.terminate_connection(volume, connector)
mock_unmap.assert_called_once_with(
volume, connector)
def test_terminate_connection_promotion(self):
volume = self.data.test_volume
connector = self.data.connector
with mock.patch.object(
self.common, '_unmap_lun_promotion') as mock_unmap:
self.common.promotion = True
self.common.terminate_connection(volume, connector)
mock_unmap.assert_called_once_with(
volume, connector)
self.common.promotion = False
@mock.patch.object(provision.PowerMaxProvision, 'extend_volume')
@mock.patch.object(common.PowerMaxCommon, '_extend_vol_validation_checks')
def test_extend_vol_no_rep_success(self, mck_val_chk, mck_extend):
volume = self.data.test_volume
array = self.data.array
device_id = self.data.device_id
new_size = self.data.test_volume.size
ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set)
ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
self.common.extend_volume(volume, new_size)
mck_extend.assert_called_once_with(
array, device_id, new_size, ref_extra_specs, None)
@mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status')
@mock.patch.object(provision.PowerMaxProvision, 'extend_volume')
@mock.patch.object(common.PowerMaxCommon, '_array_ode_capabilities_check',
return_value=[True] * 4)
@mock.patch.object(common.PowerMaxCommon, 'get_rdf_details',
return_value=('1', None))
@mock.patch.object(common.PowerMaxCommon, '_extend_vol_validation_checks')
@mock.patch.object(common.PowerMaxCommon, '_initial_setup',
return_value=tpd.PowerMaxData.ex_specs_rep_config)
def test_extend_vol_rep_success_next_gen(
self, mck_setup, mck_val_chk, mck_get_rdf, mck_ode, mck_extend,
mck_validate):
self.common.next_gen = True
volume = self.data.test_volume
array = self.data.array
device_id = self.data.device_id
new_size = self.data.test_volume.size
ref_extra_specs = deepcopy(self.data.ex_specs_rep_config)
ref_extra_specs['array'] = self.data.array
self.common.extend_volume(volume, new_size)
mck_extend.assert_called_once_with(
array, device_id, new_size, ref_extra_specs, '1')
mck_ode.assert_called_once_with(
array, ref_extra_specs[utils.REP_CONFIG], True)
mck_validate.assert_called_once_with(array, ref_extra_specs)
@mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status')
@mock.patch.object(provision.PowerMaxProvision, 'extend_volume')
@mock.patch.object(common.PowerMaxCommon, '_extend_legacy_replicated_vol')
@mock.patch.object(common.PowerMaxCommon, '_array_ode_capabilities_check',
return_value=[True, True, False, False])
@mock.patch.object(common.PowerMaxCommon, 'get_rdf_details',
return_value=('1', None))
@mock.patch.object(common.PowerMaxCommon, '_extend_vol_validation_checks')
@mock.patch.object(common.PowerMaxCommon, '_initial_setup',
return_value=tpd.PowerMaxData.ex_specs_rep_config)
def test_extend_vol_rep_success_next_gen_legacy_r2(
self, mck_setup, mck_val_chk, mck_get_rdf, mck_ode, mck_leg_extend,
mck_extend, mck_validate):
self.common.next_gen = True
self.common.rep_config = self.data.rep_config
volume = self.data.test_volume
array = self.data.array
device_id = self.data.device_id
new_size = self.data.test_volume.size
ref_extra_specs = deepcopy(self.data.ex_specs_rep_config)
ref_extra_specs['array'] = self.data.array
self.common.extend_volume(volume, new_size)
mck_leg_extend.assert_called_once_with(
array, volume, device_id, volume.name, new_size,
ref_extra_specs, '1')
mck_ode.assert_called_once_with(
array, ref_extra_specs[utils.REP_CONFIG], True)
mck_extend.assert_not_called()
mck_validate.assert_called_once_with(array, ref_extra_specs)
@mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status')
@mock.patch.object(provision.PowerMaxProvision, 'extend_volume')
@mock.patch.object(common.PowerMaxCommon, '_extend_legacy_replicated_vol')
@mock.patch.object(common.PowerMaxCommon, '_array_ode_capabilities_check',
return_value=[False, False, False, False])
@mock.patch.object(common.PowerMaxCommon, 'get_rdf_details',
return_value=('1', None))
@mock.patch.object(common.PowerMaxCommon, '_extend_vol_validation_checks')
@mock.patch.object(common.PowerMaxCommon, '_initial_setup',
return_value=tpd.PowerMaxData.ex_specs_rep_config)
def test_extend_vol_rep_success_legacy(
self, mck_setup, mck_val_chk, mck_get_rdf, mck_ode, mck_leg_extend,
mck_extend, mck_validate):
self.common.rep_config = self.data.rep_config
self.common.next_gen = False
volume = self.data.test_volume
array = self.data.array
device_id = self.data.device_id
new_size = self.data.test_volume.size
ref_extra_specs = deepcopy(self.data.ex_specs_rep_config)
ref_extra_specs['array'] = self.data.array
self.common.extend_volume(volume, new_size)
mck_leg_extend.assert_called_once_with(
array, volume, device_id, volume.name, new_size,
ref_extra_specs, '1')
mck_ode.assert_called_once_with(
array, ref_extra_specs[utils.REP_CONFIG], True)
mck_extend.assert_not_called()
mck_validate.assert_called_once_with(array, ref_extra_specs)
@mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status')
@mock.patch.object(common.PowerMaxCommon, '_array_ode_capabilities_check',
return_value=[False, False, False, False])
@mock.patch.object(common.PowerMaxCommon, 'get_rdf_details',
return_value=('1', None))
@mock.patch.object(common.PowerMaxCommon, '_extend_vol_validation_checks')
@mock.patch.object(
common.PowerMaxCommon, '_initial_setup',
return_value=tpd.PowerMaxData.ex_specs_rep_config_no_extend)
def test_extend_vol_rep_success_legacy_allow_extend_false(
self, mck_setup, mck_val_chk, mck_get_rdf, mck_ode, mck_validate):
self.common.rep_config = self.data.rep_config
self.common.next_gen = False
volume = self.data.test_volume
new_size = self.data.test_volume.size
self.assertRaises(exception.VolumeBackendAPIException,
self.common.extend_volume, volume, new_size)
def test_update_volume_stats(self):
data = self.common.update_volume_stats()
self.assertEqual('CommonTests', data['volume_backend_name'])
def test_update_volume_stats_no_wlp(self):
with mock.patch.object(self.common, '_update_srp_stats',
return_value=('123s#SRP_1#None#None',
100, 90, 90, 10)):
data = self.common.update_volume_stats()
self.assertEqual('CommonTests', data['volume_backend_name'])
def test_update_srp_stats_with_wl(self):
with mock.patch.object(self.rest, 'get_srp_by_name',
return_value=self.data.srp_details):
location_info, __, __, __, __ = self.common._update_srp_stats(
self.data.array_info_wl)
self.assertEqual(location_info, '000197800123#SRP_1#Diamond#OLTP')
def test_update_srp_stats_no_wl(self):
with mock.patch.object(self.rest, 'get_srp_by_name',
return_value=self.data.srp_details):
location_info, __, __, __, __ = self.common._update_srp_stats(
self.data.array_info_no_wl)
self.assertEqual(location_info, '000197800123#SRP_1#Diamond')
def test_find_device_on_array_success(self):
volume = self.data.test_volume
extra_specs = self.data.extra_specs
ref_device_id = self.data.device_id
founddevice_id = self.common._find_device_on_array(volume, extra_specs)
self.assertEqual(ref_device_id, founddevice_id)
def test_find_device_on_array_provider_location_not_string(self):
volume = fake_volume.fake_volume_obj(
context='cxt', provider_location=None)
extra_specs = self.data.extra_specs
founddevice_id = self.common._find_device_on_array(
volume, extra_specs)
self.assertIsNone(founddevice_id)
def test_find_legacy_device_on_array(self):
volume = self.data.test_legacy_vol
extra_specs = self.data.extra_specs
ref_device_id = self.data.device_id
founddevice_id = self.common._find_device_on_array(volume, extra_specs)
self.assertEqual(ref_device_id, founddevice_id)
def test_find_host_lun_id_attached(self):
volume = self.data.test_volume
extra_specs = self.data.extra_specs
host = 'HostX'
host_lun = (
self.data.maskingview[0]['maskingViewConnection'][0][
'host_lun_address'])
ref_masked = {'hostlunid': int(host_lun, 16),
'maskingview': self.data.masking_view_name_f,
'array': self.data.array,
'device_id': self.data.device_id}
maskedvols, __ = self.common.find_host_lun_id(volume, host,
extra_specs)
self.assertEqual(ref_masked, maskedvols)
def test_find_host_lun_id_not_attached(self):
volume = self.data.test_volume
extra_specs = self.data.extra_specs
host = 'HostX'
with mock.patch.object(self.rest, 'find_mv_connections_for_vol',
return_value=None):
maskedvols, __ = self.common.find_host_lun_id(
volume, host, extra_specs)
self.assertEqual({}, maskedvols)
@mock.patch.object(
common.PowerMaxCommon, '_get_masking_views_from_volume',
return_value=([tpd.PowerMaxData.masking_view_name_f],
[tpd.PowerMaxData.masking_view_name_f,
tpd.PowerMaxData.masking_view_name_Y_f]))
def test_find_host_lun_id_multiattach(self, mock_mask):
volume = self.data.test_volume
extra_specs = self.data.extra_specs
__, is_multiattach = self.common.find_host_lun_id(
volume, 'HostX', extra_specs)
self.assertTrue(is_multiattach)
@mock.patch.object(rest.PowerMaxRest, 'get_rdf_pair_volume',
return_value=tpd.PowerMaxData.rdf_group_vol_details)
@mock.patch.object(rest.PowerMaxRest, 'get_volume',
return_value=tpd.PowerMaxData.volume_details[0])
def test_find_host_lun_id_rep_extra_specs(self, mock_vol, mock_tgt):
self.common.find_host_lun_id(
self.data.test_volume, 'HostX',
self.data.extra_specs, self.data.rep_extra_specs)
mock_tgt.assert_called_once()
@mock.patch.object(rest.PowerMaxRest, 'find_mv_connections_for_vol',
return_value='1')
@mock.patch.object(common.PowerMaxCommon, '_get_masking_views_from_volume',
side_effect=[([], ['OS-HostX-I-PG-MV']),
(['OS-HostX-I-PG-MV'],
['OS-HostX-I-PG-MV'])])
@mock.patch.object(rest.PowerMaxRest, 'get_volume',
return_value=tpd.PowerMaxData.volume_details[0])
def test_find_host_lun_id_backward_compatible(
self, mock_vol, mock_mvs, mock_mv_conns):
expected_dict = {'hostlunid': '1', 'maskingview': 'OS-HostX-I-PG-MV',
'array': '000197800123', 'device_id': '00001'}
self.common.powermax_short_host_name_template = (
'shortHostName[:7]finance')
masked_vols, is_multiattach = self.common.find_host_lun_id(
self.data.test_volume, 'HostX',
self.data.extra_specs)
self.assertEqual(expected_dict, masked_vols)
self.assertFalse(is_multiattach)
mock_mv_conns.assert_called_once()
def test_get_masking_views_from_volume(self):
array = self.data.array
device_id = self.data.device_id
host = 'HostX'
ref_mv_list = [self.data.masking_view_name_f]
maskingview_list, __ = self.common.get_masking_views_from_volume(
array, self.data.test_volume, device_id, host)
self.assertEqual(ref_mv_list, maskingview_list)
# is metro
with mock.patch.object(self.utils, 'is_metro_device',
return_value=True):
__, is_metro = self.common.get_masking_views_from_volume(
array, self.data.test_volume, device_id, host)
self.assertTrue(is_metro)
def test_get_masking_views_from_volume_wrong_host(self):
array = self.data.array
device_id = self.data.device_id
host = 'DifferentHost'
maskingview_list, __ = self.common.get_masking_views_from_volume(
array, self.data.test_volume, device_id, host)
self.assertEqual([], maskingview_list)
def test_find_host_lun_id_no_host_check(self):
volume = self.data.test_volume
extra_specs = self.data.extra_specs
host_lun = (self.data.maskingview[0]['maskingViewConnection'][0][
'host_lun_address'])
ref_masked = {'hostlunid': int(host_lun, 16),
'maskingview': self.data.masking_view_name_f,
'array': self.data.array,
'device_id': self.data.device_id}
maskedvols, __ = self.common.find_host_lun_id(
volume, None, extra_specs)
self.assertEqual(ref_masked, maskedvols)
def test_initial_setup_success(self):
volume = self.data.test_volume
ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set)
ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
extra_specs = self.common._initial_setup(volume)
self.assertEqual(ref_extra_specs, extra_specs)
def test_initial_setup_failed(self):
volume = self.data.test_volume
with mock.patch.object(
self.common, 'get_attributes_from_cinder_config',
return_value=None):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._initial_setup, volume)
def test_initial_setup_success_specs_init_conn_call(self):
volume = self.data.test_volume
array_info = self.common.get_attributes_from_cinder_config()
extra_specs, __ = self.common._set_config_file_and_get_extra_specs(
volume)
with mock.patch.object(
self.common, '_set_vmax_extra_specs',
side_effect=self.common._set_vmax_extra_specs) as mck_specs:
self.common._initial_setup(volume, init_conn=True)
mck_specs.assert_called_once_with(
extra_specs, array_info, True)
@mock.patch.object(rest.PowerMaxRest, 'get_rdf_pair_volume',
return_value=tpd.PowerMaxData.rdf_group_vol_details)
def test_populate_masking_dict(self, mock_tgt):
volume = self.data.test_volume
connector = self.data.connector
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
extra_specs[utils.WORKLOAD] = self.data.workload
ref_mv_dict = self.data.masking_view_dict
self.common.next_gen = False
self.common.powermax_port_group_name_template = 'portGroupName'
extra_specs.pop(utils.IS_RE, None)
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
self.assertEqual(ref_mv_dict, masking_view_dict)
# Metro volume, pass in rep_extra_specs and retrieve target device
rep_extra_specs = deepcopy(self.data.rep_extra_specs)
rep_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
self.common._populate_masking_dict(
volume, connector, extra_specs, rep_extra_specs)
mock_tgt.assert_called_once()
# device_id is None
with mock.patch.object(self.common, '_find_device_on_array',
return_value=None):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._populate_masking_dict,
volume, connector, extra_specs)
def test_populate_masking_dict_no_slo(self):
volume = self.data.test_volume
connector = self.data.connector
extra_specs = {'slo': None, 'workload': None, 'srp': self.data.srp,
'array': self.data.array,
utils.PORTGROUPNAME: self.data.port_group_name_f}
ref_mv_dict = self.data.masking_view_dict_no_slo
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
self.assertEqual(ref_mv_dict, masking_view_dict)
def test_populate_masking_dict_compr_disabled(self):
volume = self.data.test_volume
connector = self.data.connector
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
extra_specs[utils.DISABLECOMPRESSION] = "true"
ref_mv_dict = self.data.masking_view_dict_compression_disabled
extra_specs[utils.WORKLOAD] = self.data.workload
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
self.assertEqual(ref_mv_dict, masking_view_dict)
def test_populate_masking_dict_next_gen(self):
volume = self.data.test_volume
connector = self.data.connector
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
self.common.next_gen = True
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
self.assertEqual('NONE', masking_view_dict[utils.WORKLOAD])
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
def test_create_cloned_volume(self, mck_cleanup_snaps):
volume = self.data.test_clone_volume
source_volume = self.data.test_volume
extra_specs = self.data.extra_specs
ref_response = (self.data.provider_location_clone, dict(), dict())
clone_dict, rep_update, rep_info_dict = (
self.common._create_cloned_volume(
volume, source_volume, extra_specs))
self.assertEqual(ref_response, (clone_dict, rep_update, rep_info_dict))
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
def test_create_cloned_volume_is_snapshot(self, mck_cleanup_snaps):
volume = self.data.test_snapshot
source_volume = self.data.test_volume
extra_specs = self.data.extra_specs
ref_response = (self.data.snap_location, dict(), dict())
clone_dict, rep_update, rep_info_dict = (
self.common._create_cloned_volume(
volume, source_volume, extra_specs, True, False))
self.assertEqual(ref_response, (clone_dict, rep_update, rep_info_dict))
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
def test_create_cloned_volume_from_snapshot(self, mck_cleanup_snaps):
volume = self.data.test_clone_volume
source_volume = self.data.test_snapshot
extra_specs = self.data.extra_specs
ref_response = (self.data.provider_location_snapshot, dict(), dict())
clone_dict, rep_update, rep_info_dict = (
self.common._create_cloned_volume(
volume, source_volume, extra_specs, False, True))
self.assertEqual(ref_response, (clone_dict, rep_update, rep_info_dict))
def test_create_cloned_volume_not_licenced(self):
volume = self.data.test_clone_volume
source_volume = self.data.test_volume
extra_specs = self.data.extra_specs
with mock.patch.object(self.rest, 'is_snapvx_licensed',
return_value=False):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._create_cloned_volume,
volume, source_volume, extra_specs)
@mock.patch.object(common.PowerMaxCommon,
'_find_device_on_array')
def test_create_cloned_volume_not_licenced_2(self, mock_device):
volume = self.data.test_clone_volume
source_volume = self.data.test_volume
extra_specs = self.data.extra_specs
with mock.patch.object(self.rest, 'is_snapvx_licensed',
return_value=False):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._create_cloned_volume,
volume, source_volume, extra_specs,
False, False)
mock_device.assert_not_called()
@mock.patch.object(common.PowerMaxCommon,
'_find_device_on_array',
return_value=None)
@mock.patch.object(common.PowerMaxCommon,
'_cleanup_device_snapvx')
def test_create_cloned_volume_source_not_found(
self, mock_check, mock_device):
volume = self.data.test_clone_volume
source_volume = self.data.test_volume
extra_specs = self.data.extra_specs
with mock.patch.object(self.rest, 'is_snapvx_licensed',
return_value=True):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._create_cloned_volume,
volume, source_volume, extra_specs,
False, False)
mock_check.assert_not_called()
def test_parse_snap_info_found(self):
ref_device_id = self.data.device_id
ref_snap_name = self.data.snap_location['snap_name']
sourcedevice_id, foundsnap_name, __ = self.common._parse_snap_info(
self.data.array, self.data.test_snapshot)
self.assertEqual(ref_device_id, sourcedevice_id)
self.assertEqual(ref_snap_name, foundsnap_name)
def test_parse_snap_info_not_found(self):
ref_snap_name = None
with mock.patch.object(self.rest, 'get_volume_snap',
return_value=None):
__, foundsnap_name, __ = self.common._parse_snap_info(
self.data.array, self.data.test_snapshot)
self.assertIsNone(ref_snap_name, foundsnap_name)
def test_parse_snap_info_exception(self):
with mock.patch.object(
self.rest, 'get_volume_snaps',
side_effect=exception.VolumeBackendAPIException):
__, foundsnap_name, __ = self.common._parse_snap_info(
self.data.array, self.data.test_snapshot)
self.assertIsNone(foundsnap_name)
def test_parse_snap_info_provider_location_not_string(self):
snapshot = fake_snapshot.fake_snapshot_obj(
context='ctxt', provider_loaction={'not': 'string'})
sourcedevice_id, foundsnap_name, __ = self.common._parse_snap_info(
self.data.array, snapshot)
self.assertIsNone(foundsnap_name)
def test_create_snapshot_success(self):
array = self.data.array
snapshot = self.data.test_snapshot
source_device_id = self.data.device_id
extra_specs = self.data.extra_specs
ref_dict = {'snap_name': self.data.test_snapshot_snap_name,
'source_id': self.data.device_id}
snap_dict = self.common._create_snapshot(
array, snapshot, source_device_id, extra_specs)
self.assertEqual(ref_dict, snap_dict)
def test_create_snapshot_exception(self):
array = self.data.array
snapshot = self.data.test_snapshot
source_device_id = self.data.device_id
extra_specs = self.data.extra_specs
with mock.patch.object(
self.provision, 'create_volume_snapvx',
side_effect=exception.VolumeBackendAPIException):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._create_snapshot,
array, snapshot, source_device_id, extra_specs)
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list',
return_value=list())
@mock.patch.object(masking.PowerMaxMasking,
'remove_vol_from_storage_group')
def test_delete_volume_from_srp(self, mock_rm, mock_get_snaps):
array = self.data.array
device_id = self.data.device_id
volume_name = self.data.test_volume.name
ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set)
ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
volume = self.data.test_volume
with mock.patch.object(self.common, '_cleanup_device_snapvx'):
with mock.patch.object(
self.common, '_delete_from_srp') as mock_delete:
self.common._delete_volume(volume)
mock_delete.assert_called_once_with(
array, device_id, volume_name, ref_extra_specs)
def test_delete_volume_not_found(self):
volume = self.data.test_volume
with mock.patch.object(self.common, '_find_device_on_array',
return_value=None):
with mock.patch.object(
self.common, '_delete_from_srp') as mock_delete:
self.common._delete_volume(volume)
mock_delete.assert_not_called()
def test_create_volume_success(self):
volume = self.data.test_volume
volume_name = '1'
volume_size = self.data.test_volume.size
extra_specs = self.data.extra_specs
ref_response = (self.data.provider_location, dict(), dict())
with mock.patch.object(self.rest, 'get_volume',
return_value=self.data.volume_details[0]):
volume_dict, rep_update, rep_info_dict = (
self.common._create_volume(
volume, volume_name, volume_size, extra_specs))
self.assertEqual(ref_response,
(volume_dict, rep_update, rep_info_dict))
@mock.patch.object(rest.PowerMaxRest, 'find_volume_device_id',
return_value=tpd.PowerMaxData.device_id2)
@mock.patch.object(
common.PowerMaxCommon, '_create_non_replicated_volume',
return_value=deepcopy(tpd.PowerMaxData.provider_location))
@mock.patch.object(rest.PowerMaxRest, 'get_volume',
return_value=tpd.PowerMaxData.volume_details[0])
def test_create_volume_update_returning_device_id(
self, mck_get, mck_create, mck_find):
volume = self.data.test_volume
volume_name = '1'
volume_size = self.data.test_volume.size
extra_specs = self.data.extra_specs
ref_response = (self.data.provider_location2, dict(), dict())
volume_dict, rep_update, rep_info_dict = (
self.common._create_volume(
volume, volume_name, volume_size, extra_specs))
self.assertEqual(ref_response,
(volume_dict, rep_update, rep_info_dict))
def test_create_volume_success_next_gen(self):
volume = self.data.test_volume
volume_name = '1'
volume_size = self.data.test_volume.size
extra_specs = self.data.extra_specs
self.common.next_gen = True
with mock.patch.object(
self.utils, 'is_compression_disabled', return_value=True):
with mock.patch.object(
self.rest, 'get_array_model_info',
return_value=('PowerMax 2000', True)):
with mock.patch.object(
self.masking,
'get_or_create_default_storage_group') as mock_get:
self.common._create_volume(
volume, volume_name, volume_size, extra_specs)
mock_get.assert_called_once_with(
extra_specs['array'], extra_specs[utils.SRP],
extra_specs[utils.SLO], 'NONE', extra_specs, True,
False, None)
@mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg',
side_effect=exception.VolumeBackendAPIException(''))
@mock.patch.object(common.PowerMaxCommon,
'_cleanup_non_rdf_volume_create_post_failure')
@mock.patch.object(rest.PowerMaxRest, 'delete_storage_group')
def test_create_volume_failed(self, mck_del, mck_cleanup, mck_create):
volume = self.data.test_volume
volume_name = self.data.test_volume.name
volume_size = self.data.test_volume.size
extra_specs = self.data.extra_specs
dev1 = self.data.device_id
dev2 = self.data.device_id2
with mock.patch.object(
self.rest, 'get_volumes_in_storage_group',
side_effect=[[dev1], [dev1, dev2]]):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._create_volume,
volume, volume_name, volume_size,
extra_specs)
mck_cleanup.assert_called_once_with(
volume, volume_name, extra_specs, [dev2])
# path 2: no new volumes created
with mock.patch.object(
self.rest, 'get_volumes_in_storage_group',
side_effect=[[], []]):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._create_volume,
volume, volume_name, volume_size,
extra_specs)
mck_del.assert_called_once()
@mock.patch.object(common.PowerMaxCommon, 'cleanup_rdf_device_pair')
@mock.patch.object(
rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=('', '', [
{utils.RDF_GROUP_NO: tpd.PowerMaxData.rdf_group_no_1}]))
@mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication')
@mock.patch.object(utils.PowerMaxUtils, 'get_default_storage_group_name',
return_value=tpd.PowerMaxData.storagegroup_name_f)
@mock.patch.object(common.PowerMaxCommon, 'prepare_replication_details',
return_value=('', tpd.PowerMaxData.rep_extra_specs,
'', '',))
def test_cleanup_rdf_volume_create_post_failure_sync(
self, mck_prep, mck_sg, mck_resume, mck_sess, mck_clean):
array = self.data.array
volume = self.data.test_volume
volume_name = self.data.test_volume.name
extra_specs = deepcopy(self.data.extra_specs_rep_enabled)
extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync
extra_specs['rep_mode'] = utils.REP_SYNC
devices = [self.data.device_id]
self.common._cleanup_rdf_volume_create_post_failure(
volume, volume_name, extra_specs, devices)
mck_prep.assert_called_once_with(extra_specs)
mck_sg.assert_called_once_with(
extra_specs['srp'], extra_specs['slo'], extra_specs['workload'],
False, True, extra_specs['rep_mode'])
mck_resume.assert_called_once_with(
array, self.data.storagegroup_name_f, self.data.rdf_group_no_1,
self.data.rep_extra_specs)
mck_sess.assert_called_once_with(array, self.data.device_id)
mck_clean.assert_called_once_with(
array, self.data.rdf_group_no_1, self.data.device_id, extra_specs)
@mock.patch.object(common.PowerMaxCommon, 'cleanup_rdf_device_pair')
@mock.patch.object(
rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=('', '', [
{utils.RDF_GROUP_NO: tpd.PowerMaxData.rdf_group_no_1}]))
@mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication')
@mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name',
return_value=tpd.PowerMaxData.storagegroup_name_f)
@mock.patch.object(common.PowerMaxCommon, 'prepare_replication_details',
return_value=('', tpd.PowerMaxData.rep_extra_specs,
'', '',))
def test_cleanup_rdf_volume_create_post_failure_non_sync(
self, mck_prep, mck_mgmt, mck_resume, mck_sess, mck_clean):
array = self.data.array
volume = self.data.test_volume
volume_name = self.data.test_volume.name
extra_specs = deepcopy(self.data.extra_specs_rep_enabled)
extra_specs[utils.REP_CONFIG] = self.data.rep_config_async
extra_specs['rep_mode'] = utils.REP_ASYNC
devices = [self.data.device_id]
self.common._cleanup_rdf_volume_create_post_failure(
volume, volume_name, extra_specs, devices)
mck_prep.assert_called_once_with(extra_specs)
mck_mgmt.assert_called_once_with(extra_specs[utils.REP_CONFIG])
mck_resume.assert_called_once_with(
array, self.data.storagegroup_name_f, self.data.rdf_group_no_1,
self.data.rep_extra_specs)
mck_sess.assert_called_once_with(array, self.data.device_id)
mck_clean.assert_called_once_with(
array, self.data.rdf_group_no_1, self.data.device_id, extra_specs)
@mock.patch.object(common.PowerMaxCommon, '_delete_from_srp')
@mock.patch.object(masking.PowerMaxMasking, 'remove_and_reset_members')
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
return_value=('', '', False))
@mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication')
@mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name',
return_value=tpd.PowerMaxData.storagegroup_name_f)
@mock.patch.object(common.PowerMaxCommon, 'prepare_replication_details',
return_value=('', tpd.PowerMaxData.rep_extra_specs,
'', '',))
def test_cleanup_rdf_volume_create_post_failure_pre_rdf_establish(
self, mck_prep, mck_mgmt, mck_resume, mck_sess, mck_rem, mck_del):
array = self.data.array
volume = self.data.test_volume
volume_name = self.data.test_volume.name
extra_specs = deepcopy(self.data.extra_specs_rep_enabled)
extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync
extra_specs['rep_mode'] = utils.REP_ASYNC
devices = [self.data.device_id]
self.common._cleanup_rdf_volume_create_post_failure(
volume, volume_name, extra_specs, devices)
mck_prep.assert_called_once_with(extra_specs)
mck_mgmt.assert_called_once_with(extra_specs[utils.REP_CONFIG])
mck_resume.assert_called_once_with(
array, self.data.storagegroup_name_f, self.data.rdf_group_no_1,
self.data.rep_extra_specs)
mck_sess.assert_called_once_with(array, self.data.device_id)
mck_rem.assert_called_once_with(array, volume, self.data.device_id,
volume_name, extra_specs, False)
mck_del.assert_called_once_with(array, self.data.device_id,
volume_name, extra_specs)
@mock.patch.object(common.PowerMaxCommon, '_delete_from_srp')
@mock.patch.object(masking.PowerMaxMasking, 'remove_and_reset_members')
def test_cleanup_non_rdf_volume_create_post_failure(
self, mck_remove, mck_del):
array = self.data.array
volume = self.data.test_volume
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
devices = [self.data.device_id]
self.common._cleanup_non_rdf_volume_create_post_failure(
volume, volume_name, extra_specs, devices)
mck_remove.assert_called_once_with(
array, volume, self.data.device_id, volume_name, extra_specs,
False)
mck_del.assert_called_once_with(
array, self.data.device_id, volume_name, extra_specs)
def test_create_volume_incorrect_slo(self):
volume = self.data.test_volume
volume_name = self.data.test_volume.name
volume_size = self.data.test_volume.size
extra_specs = {'slo': 'Diamondz',
'workload': 'DSSSS',
'srp': self.data.srp,
'array': self.data.array}
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._create_volume,
volume, volume_name, volume_size, extra_specs)
def test_set_vmax_extra_specs(self):
srp_record = self.common.get_attributes_from_cinder_config()
extra_specs = self.common._set_vmax_extra_specs(
self.data.vol_type_extra_specs, srp_record)
ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set)
ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
self.assertEqual(ref_extra_specs, extra_specs)
def test_set_vmax_extra_specs_no_srp_name(self):
srp_record = self.common.get_attributes_from_cinder_config()
with mock.patch.object(self.rest, 'get_slo_list',
return_value=[]):
extra_specs = self.common._set_vmax_extra_specs({}, srp_record)
self.assertIsNone(extra_specs['slo'])
def test_set_vmax_extra_specs_compr_disabled(self):
with mock.patch.object(self.rest, 'is_compression_capable',
return_value=True):
srp_record = self.common.get_attributes_from_cinder_config()
extra_specs = self.common._set_vmax_extra_specs(
self.data.vol_type_extra_specs_compr_disabled, srp_record)
ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set)
ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
ref_extra_specs[utils.DISABLECOMPRESSION] = "true"
self.assertEqual(ref_extra_specs, extra_specs)
def test_set_vmax_extra_specs_compr_disabled_not_compr_capable(self):
srp_record = self.common.get_attributes_from_cinder_config()
extra_specs = self.common._set_vmax_extra_specs(
self.data.vol_type_extra_specs_compr_disabled, srp_record)
ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set)
ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
self.assertEqual(ref_extra_specs, extra_specs)
def test_set_vmax_extra_specs_portgroup_as_spec(self):
srp_record = self.common.get_attributes_from_cinder_config()
extra_specs = self.common._set_vmax_extra_specs(
{utils.PORTGROUPNAME: 'extra_spec_pg'}, srp_record)
self.assertEqual('extra_spec_pg', extra_specs[utils.PORTGROUPNAME])
def test_set_vmax_extra_specs_no_portgroup_set(self):
srp_record = {
'srpName': 'SRP_1', 'RestServerIp': '1.1.1.1',
'RestPassword': 'smc', 'SSLCert': None, 'RestServerPort': 8443,
'SSLVerify': False, 'RestUserName': 'smc',
'SerialNumber': '000197800123'}
self.assertRaises(exception.VolumeBackendAPIException,
self.common._set_vmax_extra_specs,
{}, srp_record)
def test_set_vmax_extra_specs_next_gen(self):
srp_record = self.common.get_attributes_from_cinder_config()
self.common.next_gen = True
extra_specs = self.common._set_vmax_extra_specs(
self.data.vol_type_extra_specs, srp_record)
ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set)
ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
self.assertEqual('NONE', extra_specs[utils.WORKLOAD])
def test_set_vmax_extra_specs_tags_not_set(self):
srp_record = self.common.get_attributes_from_cinder_config()
extra_specs = self.common._set_vmax_extra_specs(
self.data.vol_type_extra_specs, srp_record)
self.assertNotIn('storagetype:storagegrouptags', extra_specs)
def test_set_vmax_extra_specs_tags_set_correctly(self):
srp_record = self.common.get_attributes_from_cinder_config()
extra_specs = self.common._set_vmax_extra_specs(
self.data.vol_type_extra_specs_tags, srp_record)
self.assertEqual(
self.data.vol_type_extra_specs_tags[utils.STORAGE_GROUP_TAGS],
extra_specs[utils.STORAGE_GROUP_TAGS])
def test_set_vmax_extra_specs_tags_set_incorrectly(self):
srp_record = self.common.get_attributes_from_cinder_config()
self.assertRaises(exception.VolumeBackendAPIException,
self.common._set_vmax_extra_specs,
self.data.vol_type_extra_specs_tags_bad, srp_record)
def test_set_vmax_extra_specs_pg_specs_init_conn(self):
pool_record = self.common.get_attributes_from_cinder_config()
with mock.patch.object(
self.common, '_select_port_group_for_extra_specs',
side_effect=(
self.common._select_port_group_for_extra_specs)) as mck_s:
self.common._set_vmax_extra_specs(
self.data.vol_type_extra_specs, pool_record, init_conn=True)
mck_s.assert_called_once_with(
self.data.vol_type_extra_specs, pool_record, True)
def test_raise_exception_if_array_not_configured(self):
self.driver.configuration.powermax_array = None
self.assertRaises(exception.InvalidConfigurationValue,
self.common.get_attributes_from_cinder_config)
def test_raise_exception_if_srp_not_configured(self):
self.driver.configuration.powermax_srp = None
self.assertRaises(exception.InvalidConfigurationValue,
self.common.get_attributes_from_cinder_config)
def test_delete_volume_from_srp_success(self):
array = self.data.array
device_id = self.data.device_id
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
with mock.patch.object(
self.provision, 'delete_volume_from_srp') as mock_del:
self.common._delete_from_srp(array, device_id, volume_name,
extra_specs)
mock_del.assert_called_once_with(array, device_id, volume_name)
def test_delete_volume_from_srp_failed(self):
self.mock_object(time, 'sleep')
array = self.data.array
device_id = self.data.failed_resource
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
with mock.patch.object(
self.masking,
'add_volume_to_default_storage_group') as mock_add:
self.assertRaises(exception.VolumeBackendAPIException,
self.common._delete_from_srp, array,
device_id, volume_name, extra_specs)
mock_add.assert_not_called()
@mock.patch.object(utils.PowerMaxUtils, 'is_volume_failed_over',
side_effect=[True, False])
@mock.patch.object(common.PowerMaxCommon, '_get_replication_extra_specs',
return_value=tpd.PowerMaxData.rep_extra_specs)
def test_get_target_wwns_from_masking_view(self, mock_rep_specs, mock_fo):
ref_wwns = [self.data.wwpn1]
for x in range(0, 2):
target_wwns = self.common._get_target_wwns_from_masking_view(
self.data.device_id, self.data.connector['host'],
self.data.extra_specs)
self.assertEqual(ref_wwns, target_wwns)
def test_get_target_wwns_from_masking_view_no_mv(self):
with mock.patch.object(self.common, '_get_masking_views_from_volume',
return_value=([], None)):
target_wwns = self.common._get_target_wwns_from_masking_view(
self.data.device_id, self.data.connector['host'],
self.data.extra_specs)
self.assertEqual([], target_wwns)
@mock.patch.object(common.PowerMaxCommon, '_get_replication_extra_specs',
return_value=tpd.PowerMaxData.rep_extra_specs)
@mock.patch.object(common.PowerMaxCommon, 'get_rdf_details',
return_value=('1', tpd.PowerMaxData.remote_array))
@mock.patch.object(rest.PowerMaxRest, 'get_rdf_pair_volume',
return_value=tpd.PowerMaxData.rdf_group_vol_details)
@mock.patch.object(
common.PowerMaxCommon, '_get_target_wwns_from_masking_view',
return_value=[tpd.PowerMaxData.wwnn1])
def test_get_target_wwns(
self, mck_wwns, mock_tgt, mock_rdf_grp, mock_specs):
__, metro_wwns = self.common.get_target_wwns_from_masking_view(
self.data.test_volume, self.data.connector)
self.assertEqual([], metro_wwns)
# Is metro volume
with mock.patch.object(common.PowerMaxCommon, '_initial_setup',
return_value=self.data.ex_specs_rep_config):
__, metro_wwns = self.common.get_target_wwns_from_masking_view(
self.data.test_volume, self.data.connector)
self.assertEqual([self.data.wwnn1], metro_wwns)
@mock.patch.object(common.PowerMaxCommon,
'_get_target_wwns_from_masking_view')
@mock.patch.object(utils.PowerMaxUtils, 'get_host_name_label',
return_value = 'my_short_h94485')
@mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled',
return_value=False)
def test_get_target_wwns_host_override(
self, mock_rep_check, mock_label, mock_mv):
host_record = {'host': 'my_short_host_name'}
connector = deepcopy(self.data.connector)
connector.update(host_record)
extra_specs = {'pool_name': 'Diamond+DSS+SRP_1+000197800123',
'srp': 'SRP_1', 'array': '000197800123',
'storagetype:portgroupname': 'OS-fibre-PG',
'interval': 1, 'retries': 1, 'slo': 'Diamond',
'workload': 'DSS'}
host_template = 'shortHostName[:10]uuid[:5]'
self.common.powermax_short_host_name_template = host_template
self.common.get_target_wwns_from_masking_view(
self.data.test_volume, connector)
mock_label.assert_called_once_with(
connector['host'], host_template)
mock_mv.assert_called_once_with(
self.data.device_id, 'my_short_h94485', extra_specs)
def test_get_port_group_from_masking_view(self):
array = self.data.array
maskingview_name = self.data.masking_view_name_f
with mock.patch.object(self.rest,
'get_element_from_masking_view') as mock_get:
self.common.get_port_group_from_masking_view(
array, maskingview_name)
mock_get.assert_called_once_with(
array, maskingview_name, portgroup=True)
def test_get_initiator_group_from_masking_view(self):
array = self.data.array
maskingview_name = self.data.masking_view_name_f
with mock.patch.object(
self.rest, 'get_element_from_masking_view') as mock_get:
self.common.get_initiator_group_from_masking_view(
array, maskingview_name)
mock_get.assert_called_once_with(
array, maskingview_name, host=True)
def test_get_common_masking_views(self):
array = self.data.array
portgroup_name = self.data.port_group_name_f
initiator_group_name = self.data.initiatorgroup_name_f
with mock.patch.object(
self.rest, 'get_common_masking_views') as mock_get:
self.common.get_common_masking_views(
array, portgroup_name, initiator_group_name)
mock_get.assert_called_once_with(
array, portgroup_name, initiator_group_name)
def test_get_iscsi_ip_iqn_port(self):
phys_port = '%(dir)s:%(port)s' % {'dir': self.data.iscsi_dir,
'port': self.data.iscsi_port}
ref_ip_iqn = [{'iqn': self.data.initiator,
'ip': self.data.ip,
'physical_port': phys_port}]
director = self.data.portgroup[1]['symmetrixPortKey'][0]['directorId']
port = self.data.portgroup[1]['symmetrixPortKey'][0]['portId']
dirport = "%s:%s" % (director, port)
ip_iqn_list = self.common._get_iscsi_ip_iqn_port(self.data.array,
dirport)
self.assertEqual(ref_ip_iqn, ip_iqn_list)
def test_find_ip_and_iqns(self):
ref_ip_iqn = [{'iqn': self.data.initiator,
'ip': self.data.ip,
'physical_port': self.data.iscsi_dir_port}]
ip_iqn_list = self.common._find_ip_and_iqns(
self.data.array, self.data.port_group_name_i)
self.assertEqual(ref_ip_iqn, ip_iqn_list)
@mock.patch.object(rest.PowerMaxRest, 'get_portgroup',
return_value=None)
def test_find_ip_and_iqns_no_port_group(self, mock_port):
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._find_ip_and_iqns, self.data.array,
self.data.port_group_name_i)
def test_create_replica_snap_name(self):
array = self.data.array
clone_volume = self.data.test_clone_volume
source_device_id = self.data.device_id
snap_name = self.data.snap_location['snap_name']
ref_response = (self.data.provider_location_snapshot, dict(), dict())
clone_dict, rep_update, rep_info_dict = self.common._create_replica(
array, clone_volume, source_device_id,
self.data.extra_specs, snap_name)
self.assertEqual(ref_response, (clone_dict, rep_update, rep_info_dict))
@mock.patch.object(
rest.PowerMaxRest, 'get_slo_list', return_value=['Diamond'])
@mock.patch.object(
common.PowerMaxCommon, '_create_volume',
return_value=(tpd.PowerMaxData.rep_info_dict,
tpd.PowerMaxData.replication_update,
tpd.PowerMaxData.rep_info_dict))
@mock.patch.object(rest.PowerMaxRest, 'rdf_resume_with_retries')
@mock.patch.object(rest.PowerMaxRest, 'srdf_suspend_replication')
@mock.patch.object(rest.PowerMaxRest, 'wait_for_rdf_pair_sync')
def test_create_replica_rep_enabled(
self, mck_wait, mck_susp, mck_res, mck_create, mck_slo):
array = self.data.array
clone_volume = self.data.test_clone_volume
source_device_id = self.data.device_id
snap_name = self.data.snap_location['snap_name']
extra_specs = deepcopy(self.data.rep_extra_specs_rep_config)
__, rep_extra_specs, __, __ = self.common.prepare_replication_details(
extra_specs)
rdfg = extra_specs['rdf_group_no']
self.common._create_replica(
array, clone_volume, source_device_id, rep_extra_specs, snap_name)
mck_wait.assert_called_once_with(
array, rdfg, source_device_id, rep_extra_specs)
mck_susp.assert_called_once_with(
array, rep_extra_specs['sg_name'], rdfg, rep_extra_specs)
mck_res.assert_called_once_with(array, rep_extra_specs)
def test_create_replica_no_snap_name(self):
array = self.data.array
clone_volume = self.data.test_clone_volume
source_device_id = self.data.device_id
snap_name = "temp-" + source_device_id + "-snapshot_for_clone"
ref_response = (self.data.provider_location_clone, dict(), dict())
with mock.patch.object(
self.utils, 'get_temp_snap_name',
return_value=snap_name) as mock_get_snap:
clone_dict, rep_update, rep_info_dict = (
self.common._create_replica(
array, clone_volume, source_device_id,
self.data.extra_specs))
self.assertEqual(ref_response,
(clone_dict, rep_update, rep_info_dict))
mock_get_snap.assert_called_once_with(source_device_id)
def test_create_replica_failed_cleanup_target(self):
array = self.data.array
clone_volume = self.data.test_clone_volume
device_id = self.data.device_id
snap_name = self.data.failed_resource
clone_name = 'OS-' + clone_volume.id
extra_specs = self.data.extra_specs
with mock.patch.object(
self.common, '_cleanup_target') as mock_cleanup:
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._create_replica, array, clone_volume, device_id,
self.data.extra_specs, snap_name)
mock_cleanup.assert_called_once_with(
array, device_id, device_id, clone_name, snap_name,
extra_specs, target_volume=clone_volume)
def test_create_replica_failed_no_target(self):
array = self.data.array
clone_volume = self.data.test_clone_volume
source_device_id = self.data.device_id
snap_name = self.data.failed_resource
with mock.patch.object(self.common, '_create_volume',
return_value=({'device_id': None}, {}, {})):
with mock.patch.object(
self.common, '_cleanup_target') as mock_cleanup:
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._create_replica, array, clone_volume,
source_device_id, self.data.extra_specs, snap_name)
mock_cleanup.assert_not_called()
@mock.patch.object(
utils.PowerMaxUtils,
'compare_cylinders',
side_effect=exception.VolumeBackendAPIException)
def test_create_replica_cylinder_mismatch(self, mock_cyl):
array = self.data.array
clone_volume = self.data.test_clone_volume
source_device_id = self.data.device_id
snap_name = self.data.snap_location['snap_name']
clone_name = 'OS-' + clone_volume.id
with mock.patch.object(
self.common, '_cleanup_target') as mock_cleanup:
self.assertRaises( # noqa: H202
Exception, self.common._create_replica, array,
clone_volume, source_device_id,
self.data.extra_specs, snap_name) # noqa: ignore=H202
mock_cleanup.assert_called_once_with(
array, source_device_id, source_device_id,
clone_name, snap_name, self.data.extra_specs,
target_volume=clone_volume)
@mock.patch.object(rest.PowerMaxRest, 'get_snap_id',
return_value=tpd.PowerMaxData.snap_id)
@mock.patch.object(
masking.PowerMaxMasking,
'remove_and_reset_members')
def test_cleanup_target_sync_present(self, mock_remove, mock_snaps):
array = self.data.array
clone_volume = self.data.test_clone_volume
source_device_id = self.data.device_id
target_device_id = self.data.device_id2
snap_name = self.data.failed_resource
clone_name = clone_volume.name
extra_specs = self.data.extra_specs
with mock.patch.object(self.rest, 'get_sync_session',
return_value='session'):
with mock.patch.object(
self.provision, 'unlink_snapvx_tgt_volume') as mock_break:
self.common._cleanup_target(
array, target_device_id, source_device_id,
clone_name, snap_name, extra_specs)
mock_break.assert_called_with(
array, target_device_id, source_device_id,
snap_name, extra_specs, self.data.snap_id)
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snaps',
return_value=[{'snap_name': 'snap_name',
'snap_id': tpd.PowerMaxData.snap_id}])
@mock.patch.object(masking.PowerMaxMasking, 'remove_volume_from_sg')
def test_cleanup_target_no_sync(self, mock_remove, mock_snaps):
array = self.data.array
clone_volume = self.data.test_clone_volume
source_device_id = self.data.device_id
target_device_id = self.data.device_id2
snap_name = self.data.failed_resource
clone_name = clone_volume.name
extra_specs = self.data.extra_specs
with mock.patch.object(self.rest, 'get_sync_session',
return_value=None):
with mock.patch.object(
self.common, '_delete_from_srp') as mock_delete:
self.common._cleanup_target(
array, target_device_id, source_device_id,
clone_name, snap_name, extra_specs)
mock_delete.assert_called_once_with(
array, target_device_id, clone_name,
extra_specs)
@mock.patch.object(
common.PowerMaxCommon, 'get_volume_metadata',
return_value={'device-meta-key-1': 'device-meta-value-1',
'device-meta-key-2': 'device-meta-value-2'})
def test_manage_existing_success(self, mck_meta):
external_ref = {u'source-name': u'00002'}
provider_location = {'device_id': u'00002', 'array': u'000197800123'}
ref_update = {'provider_location': six.text_type(provider_location),
'metadata': {'device-meta-key-1': 'device-meta-value-1',
'device-meta-key-2': 'device-meta-value-2',
'user-meta-key-1': 'user-meta-value-1',
'user-meta-key-2': 'user-meta-value-2'}}
volume = deepcopy(self.data.test_volume)
volume.metadata = {'user-meta-key-1': 'user-meta-value-1',
'user-meta-key-2': 'user-meta-value-2'}
with mock.patch.object(
self.common, '_check_lun_valid_for_cinder_management',
return_value=('vol1', 'test_sg')):
model_update = self.common.manage_existing(volume, external_ref)
self.assertEqual(ref_update, model_update)
@mock.patch.object(
common.PowerMaxCommon, 'get_volume_metadata', return_value='')
@mock.patch.object(
common.PowerMaxCommon, '_check_lun_valid_for_cinder_management',
return_value=('vol1', 'test_sg'))
def test_manage_existing_no_fall_through(self, mock_check, mock_get):
external_ref = {u'source-name': self.data.device_id}
volume = deepcopy(self.data.test_volume)
with mock.patch.object(
self.common, '_manage_volume_with_uuid',
return_value=(
self.data.array, self.data.device_id2)) as mock_uuid:
self.common.manage_existing(volume, external_ref)
mock_uuid.assert_not_called()
@mock.patch.object(
common.PowerMaxCommon, 'get_volume_metadata', return_value='')
@mock.patch.object(
common.PowerMaxCommon, '_check_lun_valid_for_cinder_management',
return_value=('vol1', 'test_sg'))
def test_manage_existing_fall_through(self, mock_check, mock_get):
external_ref = {u'source-name': self.data.volume_id}
volume = deepcopy(self.data.test_volume)
with mock.patch.object(
self.common, '_manage_volume_with_uuid',
return_value=(
self.data.array, self.data.device_id2)) as mock_uuid:
self.common.manage_existing(volume, external_ref)
mock_uuid.assert_called()
@mock.patch.object(rest.PowerMaxRest, 'find_volume_device_id',
return_value=tpd.PowerMaxData.device_id2)
def test_manage_volume_with_uuid_success(self, mock_dev):
external_ref = {u'source-name': self.data.volume_id}
volume = deepcopy(self.data.test_volume)
array, device_id = self.common._manage_volume_with_uuid(
external_ref, volume)
self.assertEqual(array, self.data.array)
self.assertEqual(device_id, self.data.device_id2)
@mock.patch.object(rest.PowerMaxRest, 'find_volume_device_id',
return_value=tpd.PowerMaxData.device_id2)
def test_manage_volume_with_prefix_and_uuid_success(self, mock_dev):
source_name = 'OS-' + self.data.volume_id
external_ref = {u'source-name': source_name}
volume = deepcopy(self.data.test_volume)
array, device_id = self.common._manage_volume_with_uuid(
external_ref, volume)
self.assertEqual(array, self.data.array)
self.assertEqual(device_id, self.data.device_id2)
def test_manage_volume_with_uuid_exception(self):
external_ref = {u'source-name': u'non_compliant_string'}
volume = deepcopy(self.data.test_volume)
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._manage_volume_with_uuid,
external_ref, volume)
@mock.patch.object(rest.PowerMaxRest, 'get_volume_list',
return_value=[tpd.PowerMaxData.device_id3])
@mock.patch.object(
rest.PowerMaxRest, 'get_masking_views_from_storage_group',
return_value=None)
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
return_value=(False, False, None))
def test_check_lun_valid_for_cinder_management(
self, mock_rep, mock_mv, mock_list):
external_ref = {u'source-name': u'00003'}
vol, source_sg = self.common._check_lun_valid_for_cinder_management(
self.data.array, self.data.device_id3,
self.data.test_volume.id, external_ref)
self.assertEqual(vol, '123')
self.assertIsNone(source_sg)
@mock.patch.object(rest.PowerMaxRest, 'get_volume_list',
return_value=[tpd.PowerMaxData.device_id4])
@mock.patch.object(
rest.PowerMaxRest, 'get_masking_views_from_storage_group',
return_value=None)
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
return_value=(False, False, None))
def test_check_lun_valid_for_cinder_management_multiple_sg_exception(
self, mock_rep, mock_mv, mock_list):
external_ref = {u'source-name': u'00004'}
self.assertRaises(
exception.ManageExistingInvalidReference,
self.common._check_lun_valid_for_cinder_management,
self.data.array, self.data.device_id4,
self.data.test_volume.id, external_ref)
@mock.patch.object(rest.PowerMaxRest, 'get_volume_list',
return_value=[tpd.PowerMaxData.device_id3])
@mock.patch.object(rest.PowerMaxRest, 'get_volume',
side_effect=[None,
tpd.PowerMaxData.volume_details[2],
tpd.PowerMaxData.volume_details[2],
tpd.PowerMaxData.volume_details[1]])
@mock.patch.object(
rest.PowerMaxRest, 'get_masking_views_from_storage_group',
side_effect=[tpd.PowerMaxData.sg_details[1]['maskingview'],
None])
@mock.patch.object(
rest.PowerMaxRest, 'get_storage_groups_from_volume',
return_value=([tpd.PowerMaxData.defaultstoragegroup_name]))
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
side_effect=[(True, False, []), (False, False, None)])
def test_check_lun_valid_for_cinder_management_exception(
self, mock_rep, mock_sg, mock_mvs, mock_get_vol, mock_list):
external_ref = {u'source-name': u'00003'}
for x in range(0, 3):
self.assertRaises(
exception.ManageExistingInvalidReference,
self.common._check_lun_valid_for_cinder_management,
self.data.array, self.data.device_id3,
self.data.test_volume.id, external_ref)
self.assertRaises(exception.ManageExistingAlreadyManaged,
self.common._check_lun_valid_for_cinder_management,
self.data.array, self.data.device_id3,
self.data.test_volume.id, external_ref)
@mock.patch.object(rest.PowerMaxRest, 'get_volume_list',
return_value=[tpd.PowerMaxData.device_id])
@mock.patch.object(
rest.PowerMaxRest, 'get_masking_views_from_storage_group',
return_value=None)
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
return_value=(False, False, None))
def test_check_lun_valid_for_cinder_management_non_FBA(
self, mock_rep, mock_mv, mock_list):
external_ref = {u'source-name': u'00004'}
self.assertRaises(
exception.ManageExistingVolumeTypeMismatch,
self.common._check_lun_valid_for_cinder_management,
self.data.array, self.data.device_id4,
self.data.test_volume.id, external_ref)
def test_manage_existing_get_size(self):
external_ref = {u'source-name': u'00001'}
size = self.common.manage_existing_get_size(
self.data.test_volume, external_ref)
self.assertEqual(2, size)
def test_manage_existing_get_size_uuid(self):
external_ref = {u'source-name': self.data.volume_id}
size = self.common.manage_existing_get_size(
self.data.test_volume, external_ref)
self.assertEqual(2, size)
def test_manage_existing_get_size_prefix_and_uuid(self):
source_name = 'volume-' + self.data.volume_id
external_ref = {u'source-name': source_name}
size = self.common.manage_existing_get_size(
self.data.test_volume, external_ref)
self.assertEqual(2, size)
def test_manage_existing_get_size_invalid_input(self):
external_ref = {u'source-name': u'invalid_input'}
self.assertRaises(exception.VolumeBackendAPIException,
self.common.manage_existing_get_size,
self.data.test_volume, external_ref)
def test_manage_existing_get_size_exception(self):
external_ref = {u'source-name': u'00001'}
with mock.patch.object(self.rest, 'get_size_of_device_on_array',
return_value=3.5):
self.assertRaises(exception.ManageExistingInvalidReference,
self.common.manage_existing_get_size,
self.data.test_volume, external_ref)
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
return_value=(False, False, False))
@mock.patch.object(common.PowerMaxCommon,
'_remove_vol_and_cleanup_replication')
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
def test_unmanage_success(self, mck_cleanup_snaps, mock_rm, mck_sess):
volume = self.data.test_volume
with mock.patch.object(self.rest, 'rename_volume') as mock_rename:
self.common.unmanage(volume)
mock_rename.assert_called_once_with(
self.data.array, self.data.device_id,
self.data.test_volume.id)
# Test for success when create storage group fails
with mock.patch.object(self.rest, 'rename_volume') as mock_rename:
with mock.patch.object(
self.provision, 'create_storage_group',
side_effect=exception.VolumeBackendAPIException):
self.common.unmanage(volume)
mock_rename.assert_called_once_with(
self.data.array, self.data.device_id,
self.data.test_volume.id)
def test_unmanage_device_not_found(self):
volume = self.data.test_volume
with mock.patch.object(self.common, '_find_device_on_array',
return_value=None):
with mock.patch.object(self.rest, 'rename_volume') as mock_rename:
self.common.unmanage(volume)
mock_rename.assert_not_called()
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
return_value=(True, True, False))
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
def test_unmanage_temp_snapshot_links(self, mck_cleanup_snaps, mck_sess):
volume = self.data.test_volume
self.assertRaises(exception.VolumeIsBusy, self.common.unmanage,
volume)
@mock.patch.object(common.PowerMaxCommon, '_slo_workload_migration')
def test_retype(self, mock_migrate):
device_id = self.data.device_id
volume_name = self.data.test_volume.name
extra_specs = deepcopy(self.data.extra_specs_intervals_set)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
volume = self.data.test_volume
new_type = {'extra_specs': {}}
host = {'host': self.data.new_host}
self.common.retype(volume, new_type, host)
mock_migrate.assert_called_once_with(
device_id, volume, host, volume_name, new_type, extra_specs)
with mock.patch.object(
self.common, '_find_device_on_array', return_value=None):
self.assertFalse(self.common.retype(volume, new_type, host))
def test_retype_attached_vol(self):
host = {'host': self.data.new_host}
new_type = {'extra_specs': {}}
with mock.patch.object(
self.common, '_find_device_on_array', return_value=True):
with mock.patch.object(self.common,
'_slo_workload_migration') as mock_retype:
self.common.retype(self.data.test_attached_volume,
new_type, host)
mock_retype.assert_called_once()
@mock.patch.object(utils.PowerMaxUtils, 'is_retype_supported',
return_value=False)
def test_retype_not_supported(self, mck_retype):
volume = self.data.test_volume
new_type = {'extra_specs': self.data.rep_extra_specs}
host = self.data.new_host
self.assertFalse(self.common.retype(volume, new_type, host))
@mock.patch.object(
common.PowerMaxCommon, '_initial_setup',
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config)
@mock.patch.object(provision.PowerMaxProvision, 'verify_slo_workload',
return_value=(True, True))
@mock.patch.object(common.PowerMaxCommon, '_slo_workload_migration')
def test_retype_promotion_extra_spec_update(
self, mck_migrate, mck_slo, mck_setup):
device_id = self.data.device_id
volume_name = self.data.test_rep_volume.name
extra_specs = deepcopy(self.data.rep_extra_specs_rep_config)
rep_config = extra_specs[utils.REP_CONFIG]
rep_extra_specs = self.common._get_replication_extra_specs(
extra_specs, rep_config)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
volume = self.data.test_rep_volume
new_type = {'extra_specs': {}}
host = {'host': self.data.new_host}
self.common.promotion = True
self.common.retype(volume, new_type, host)
self.common.promotion = False
mck_migrate.assert_called_once_with(
device_id, volume, host, volume_name, new_type, rep_extra_specs)
def test_slo_workload_migration_valid(self):
device_id = self.data.device_id
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
new_type = {'extra_specs': self.data.vol_type_extra_specs}
volume = self.data.test_volume
host = {'host': self.data.new_host}
with mock.patch.object(self.common, '_migrate_volume') as mock_migrate:
self.common._slo_workload_migration(
device_id, volume, host, volume_name, new_type, extra_specs)
mock_migrate.assert_called_once_with(
extra_specs[utils.ARRAY], volume, device_id,
extra_specs[utils.SRP], 'Silver',
'OLTP', volume_name, new_type, extra_specs)
def test_slo_workload_migration_not_valid(self):
device_id = self.data.device_id
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
volume = self.data.test_volume
new_type = {'extra_specs': self.data.vol_type_extra_specs}
host = {'host': self.data.new_host}
with mock.patch.object(
self.common, '_is_valid_for_storage_assisted_migration',
return_value=(False, 'Silver', 'OLTP')):
migrate_status = self.common._slo_workload_migration(
device_id, volume, host, volume_name, new_type, extra_specs)
self.assertFalse(migrate_status)
def test_slo_workload_migration_same_hosts(self):
device_id = self.data.device_id
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
volume = self.data.test_volume
host = {'host': self.data.fake_host}
new_type = {'extra_specs': {'slo': 'Bronze'}}
migrate_status = self.common._slo_workload_migration(
device_id, volume, host, volume_name, new_type, extra_specs)
self.assertFalse(migrate_status)
@mock.patch.object(rest.PowerMaxRest, 'is_compression_capable',
return_value=True)
def test_slo_workload_migration_same_host_change_compression(
self, mock_cap):
device_id = self.data.device_id
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
volume = self.data.test_volume
host = {'host': self.data.fake_host}
new_type = {'extra_specs': {utils.DISABLECOMPRESSION: "true"}}
with mock.patch.object(
self.common, '_is_valid_for_storage_assisted_migration',
return_value=(True, self.data.slo, self.data.workload)):
with mock.patch.object(
self.common, '_migrate_volume') as mock_migrate:
migrate_status = self.common._slo_workload_migration(
device_id, volume, host, volume_name, new_type,
extra_specs)
self.assertTrue(bool(migrate_status))
mock_migrate.assert_called_once_with(
extra_specs[utils.ARRAY], volume, device_id,
extra_specs[utils.SRP], self.data.slo,
self.data.workload, volume_name, new_type, extra_specs)
@mock.patch.object(
common.PowerMaxCommon, 'get_volume_metadata', return_value='')
@mock.patch.object(
common.PowerMaxCommon, '_retype_volume',
return_value=(True, tpd.PowerMaxData.defaultstoragegroup_name))
def test_migrate_volume_success_no_rep(self, mck_retype, mck_get):
array_id = self.data.array
volume = self.data.test_volume
device_id = self.data.device_id
srp = self.data.srp
target_slo = self.data.slo_silver
target_workload = self.data.workload
volume_name = volume.name
new_type = {'extra_specs': {}}
extra_specs = self.data.extra_specs
target_extra_specs = {
utils.SRP: srp, utils.ARRAY: array_id, utils.SLO: target_slo,
utils.WORKLOAD: target_workload,
utils.INTERVAL: extra_specs[utils.INTERVAL],
utils.RETRIES: extra_specs[utils.RETRIES],
utils.DISABLECOMPRESSION: False}
success, model_update = self.common._migrate_volume(
array_id, volume, device_id, srp, target_slo, target_workload,
volume_name, new_type, extra_specs)
mck_retype.assert_called_once_with(
array_id, srp, device_id, volume, volume_name, extra_specs,
target_slo, target_workload, target_extra_specs)
self.assertTrue(success)
@mock.patch.object(utils.PowerMaxUtils, 'get_rep_config',
return_value=tpd.PowerMaxData.rep_config_metro)
@mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled',
side_effect=[False, True])
@mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status')
@mock.patch.object(rest.PowerMaxRest, 'get_slo_list', return_value=[])
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list',
return_value=[{'snapshotName': 'name',
'linkedDevices': 'details'}])
def test_migrate_to_metro_exception_on_linked_snapshot_source(
self, mck_get, mck_slo, mck_validate, mck_rep, mck_config):
array_id = self.data.array
volume = self.data.test_volume
device_id = self.data.device_id
srp = self.data.srp
target_slo = self.data.slo_silver
target_workload = self.data.workload
volume_name = volume.name
target_extra_specs = self.data.rep_extra_specs_rep_config_metro
new_type = {'extra_specs': target_extra_specs}
extra_specs = self.data.extra_specs
self.assertRaises(
exception.VolumeBackendAPIException, self.common._migrate_volume,
array_id, volume, device_id, srp, target_slo, target_workload,
volume_name, new_type, extra_specs)
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
@mock.patch.object(utils.PowerMaxUtils, 'get_rep_config',
return_value=tpd.PowerMaxData.rep_config_metro)
@mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled',
side_effect=[False, True])
@mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status')
@mock.patch.object(rest.PowerMaxRest, 'get_slo_list', return_value=[])
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list',
return_value=[{'snapshotName': 'name'}])
@mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions',
return_value=('', {'source_vol_id': 'source_vol_id',
'snap_name': 'snap_name'}))
def test_migrate_to_metro_exception_on_snapshot_target(
self, mck_find, mck_snap, mck_slo, mck_validate, mck_rep,
mck_config, mck_cleanup):
array_id = self.data.array
volume = self.data.test_volume
device_id = self.data.device_id
srp = self.data.srp
target_slo = self.data.slo_silver
target_workload = self.data.workload
volume_name = volume.name
target_extra_specs = self.data.rep_extra_specs_rep_config_metro
new_type = {'extra_specs': target_extra_specs}
extra_specs = self.data.extra_specs
self.assertRaises(
exception.VolumeBackendAPIException, self.common._migrate_volume,
array_id, volume, device_id, srp, target_slo, target_workload,
volume_name, new_type, extra_specs)
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state',
return_value=['activebias'])
@mock.patch.object(common.PowerMaxCommon,
'_post_retype_srdf_protect_storage_group',
return_value=(True, True, True))
@mock.patch.object(utils.PowerMaxUtils, 'get_volume_element_name',
return_value=tpd.PowerMaxData.volume_id)
@mock.patch.object(
common.PowerMaxCommon, 'configure_volume_replication',
return_value=('first_vol_in_rdf_group', True, True,
tpd.PowerMaxData.rep_extra_specs_mgmt, False))
@mock.patch.object(common.PowerMaxCommon, '_retype_volume')
@mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication')
@mock.patch.object(
common.PowerMaxCommon, 'break_rdf_device_pair_session',
return_value=(tpd.PowerMaxData.rep_extra_specs_mgmt, True))
@mock.patch.object(common.PowerMaxCommon, '_retype_remote_volume')
@mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled',
return_value=True)
def test_cleanup_on_migrate_failure(
self, mck_rep_enabled, mck_retype_remote, mck_break, mck_resume,
mck_retype, mck_configure, mck_get_vname, mck_protect, mck_states):
rdf_pair_broken = True
rdf_pair_created = True
vol_retyped = True
remote_retyped = True
extra_specs = deepcopy(self.data.extra_specs_rep_enabled)
target_extra_specs = deepcopy(self.data.extra_specs_rep_enabled)
rep_extra_specs = deepcopy(self.data.rep_extra_specs_mgmt)
volume = self.data.test_volume
volume_name = self.data.volume_id
device_id = self.data.device_id
source_sg = self.data.storagegroup_name_f
array = self.data.array
srp = extra_specs[utils.SRP]
slo = extra_specs[utils.SLO]
workload = extra_specs[utils.WORKLOAD]
rep_mode = utils.REP_ASYNC
extra_specs[utils.REP_MODE] = rep_mode
self.common._cleanup_on_migrate_failure(
rdf_pair_broken, rdf_pair_created, vol_retyped,
remote_retyped, extra_specs, target_extra_specs, volume,
volume_name, device_id, source_sg)
mck_rep_enabled.assert_called_once_with(extra_specs)
mck_retype_remote.assert_called_once_with(
array, volume, device_id, volume_name,
rep_mode, True, extra_specs)
mck_break.assert_called_once_with(
array, device_id, volume_name, extra_specs, volume)
mck_resume.assert_called_once_with(
array, rep_extra_specs['mgmt_sg_name'],
rep_extra_specs['rdf_group_no'], rep_extra_specs)
mck_retype.assert_called_once_with(
array, srp, device_id, volume, volume_name,
target_extra_specs, slo, workload, extra_specs)
mck_configure.assert_called_once_with(
array, volume, device_id, extra_specs)
mck_get_vname.assert_called_once_with(volume.id)
mck_protect.assert_called_once_with(
array, source_sg, device_id, volume_name,
rep_extra_specs, volume)
@mock.patch.object(
masking.PowerMaxMasking, 'return_volume_to_volume_group')
@mock.patch.object(
masking.PowerMaxMasking, 'move_volume_between_storage_groups')
@mock.patch.object(masking.PowerMaxMasking, 'add_child_sg_to_parent_sg')
@mock.patch.object(rest.PowerMaxRest, 'create_storage_group')
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group_list',
return_value=['sg'])
def test_cleanup_on_retype_volume_failure_moved_sg(
self, mck_get_sgs, mck_create_sg, mck_add_child, mck_move,
mck_return):
created_child_sg = False
add_sg_to_parent = False
got_default_sg = False
moved_between_sgs = True
extra_specs = deepcopy(self.data.extra_specs_rep_enabled)
array = extra_specs[utils.ARRAY]
source_sg = self.data.storagegroup_name_f
parent_sg = self.data.parent_sg_f
target_sg_name = self.data.storagegroup_name_i
device_id = self.data.device_id
volume = self.data.test_volume
volume_name = self.data.volume_id
self.common._cleanup_on_retype_volume_failure(
created_child_sg, add_sg_to_parent, got_default_sg,
moved_between_sgs, array, source_sg, parent_sg, target_sg_name,
extra_specs, device_id, volume, volume_name)
mck_get_sgs.assert_called_once_with(array)
mck_create_sg.assert_called_once_with(
array, source_sg, extra_specs['srp'], extra_specs['slo'],
extra_specs['workload'], extra_specs, False)
mck_add_child.assert_called_once_with(
array, source_sg, parent_sg, extra_specs)
mck_move.assert_called_once_with(
array, device_id, target_sg_name, source_sg, extra_specs,
force=True, parent_sg=parent_sg)
mck_return.assert_called_once_with(
array, volume, device_id, volume_name, extra_specs)
@mock.patch.object(rest.PowerMaxRest, 'delete_storage_group')
@mock.patch.object(rest.PowerMaxRest, 'get_volumes_in_storage_group',
return_value=[])
def test_cleanup_on_retype_volume_failure_got_default(
self, mck_get_vols, mck_del_sg):
created_child_sg = False
add_sg_to_parent = False
got_default_sg = True
moved_between_sgs = False
extra_specs = deepcopy(self.data.extra_specs_rep_enabled)
array = extra_specs[utils.ARRAY]
source_sg = self.data.storagegroup_name_f
parent_sg = self.data.parent_sg_f
target_sg_name = self.data.storagegroup_name_i
device_id = self.data.device_id
volume = self.data.test_volume
volume_name = self.data.volume_id
self.common._cleanup_on_retype_volume_failure(
created_child_sg, add_sg_to_parent, got_default_sg,
moved_between_sgs, array, source_sg, parent_sg, target_sg_name,
extra_specs, device_id, volume, volume_name)
mck_get_vols.assert_called_once_with(array, target_sg_name)
mck_del_sg.assert_called_once_with(array, target_sg_name)
@mock.patch.object(rest.PowerMaxRest, 'delete_storage_group')
@mock.patch.object(rest.PowerMaxRest, 'remove_child_sg_from_parent_sg')
def test_cleanup_on_retype_volume_failure_created_child(
self, mck_remove_child_sg, mck_del_sg):
created_child_sg = True
add_sg_to_parent = True
got_default_sg = False
moved_between_sgs = False
extra_specs = deepcopy(self.data.extra_specs_rep_enabled)
array = extra_specs[utils.ARRAY]
source_sg = self.data.storagegroup_name_f
parent_sg = self.data.parent_sg_f
target_sg_name = self.data.storagegroup_name_i
device_id = self.data.device_id
volume = self.data.test_volume
volume_name = self.data.volume_id
self.common._cleanup_on_retype_volume_failure(
created_child_sg, add_sg_to_parent, got_default_sg,
moved_between_sgs, array, source_sg, parent_sg, target_sg_name,
extra_specs, device_id, volume, volume_name)
mck_remove_child_sg.assert_called_once_with(
array, target_sg_name, parent_sg, extra_specs)
mck_del_sg.assert_called_once_with(array, target_sg_name)
def test_is_valid_for_storage_assisted_migration_true(self):
device_id = self.data.device_id
host = {'host': self.data.new_host}
volume_name = self.data.test_volume.name
ref_return = (True, 'Silver', 'OLTP')
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array,
self.data.srp, volume_name, False, False, self.data.slo,
self.data.workload, False)
self.assertEqual(ref_return, return_val)
# No current sgs found
with mock.patch.object(self.rest, 'get_storage_groups_from_volume',
return_value=None):
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array, self.data.srp,
volume_name, False, False, self.data.slo, self.data.workload,
False)
self.assertEqual(ref_return, return_val)
host = {'host': 'HostX@Backend#Silver+SRP_1+000197800123'}
ref_return = (True, 'Silver', 'NONE')
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array,
self.data.srp, volume_name, False, False, self.data.slo,
self.data.workload, False)
self.assertEqual(ref_return, return_val)
def test_is_valid_for_storage_assisted_migration_false(self):
device_id = self.data.device_id
volume_name = self.data.test_volume.name
ref_return = (False, None, None)
# IndexError
host = {'host': 'HostX@Backend#Silver+SRP_1+000197800123+dummy+data'}
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array,
self.data.srp, volume_name, False, False, self.data.slo,
self.data.workload, False)
self.assertEqual(ref_return, return_val)
# Wrong array
host2 = {'host': 'HostX@Backend#Silver+OLTP+SRP_1+00012345678'}
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host2, self.data.array,
self.data.srp, volume_name, False, False, self.data.slo,
self.data.workload, False)
self.assertEqual(ref_return, return_val)
# Wrong srp
host3 = {'host': 'HostX@Backend#Silver+OLTP+SRP_2+000197800123'}
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host3, self.data.array,
self.data.srp, volume_name, False, False, self.data.slo,
self.data.workload, False)
self.assertEqual(ref_return, return_val)
# Already in correct sg
with mock.patch.object(
self.common.provision,
'get_slo_workload_settings_from_storage_group',
return_value='Diamond+DSS') as mock_settings:
host4 = {'host': self.data.fake_host}
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host4, self.data.array,
self.data.srp, volume_name, False, False, self.data.slo,
self.data.workload, False)
self.assertEqual(ref_return, return_val)
mock_settings.assert_called_once()
def test_is_valid_for_storage_assisted_migration_next_gen(self):
device_id = self.data.device_id
host = {'host': self.data.new_host}
volume_name = self.data.test_volume.name
ref_return = (True, 'Silver', 'NONE')
with mock.patch.object(self.rest, 'is_next_gen_array',
return_value=True):
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array,
self.data.srp, volume_name, False, False, self.data.slo,
self.data.workload, False)
self.assertEqual(ref_return, return_val)
def test_is_valid_for_storage_assisted_migration_promotion_change_comp(
self):
device_id = self.data.device_id
host = {'host': self.data.new_host}
volume_name = self.data.test_volume.name
ref_return = (False, None, None)
self.common.promotion = True
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array,
self.data.srp, volume_name, True, False, self.data.slo_silver,
self.data.workload, False)
self.common.promotion = False
self.assertEqual(ref_return, return_val)
def test_is_valid_for_storage_assisted_migration_promotion_change_slo(
self):
device_id = self.data.device_id
host = {'host': self.data.new_host}
volume_name = self.data.test_volume.name
ref_return = (False, None, None)
self.common.promotion = True
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array,
self.data.srp, volume_name, False, False, self.data.slo,
self.data.workload, False)
self.common.promotion = False
self.assertEqual(ref_return, return_val)
def test_is_valid_for_storage_assisted_migration_promotion_change_workload(
self):
device_id = self.data.device_id
host = {'host': self.data.new_host}
volume_name = self.data.test_volume.name
ref_return = (False, None, None)
self.common.promotion = True
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array,
self.data.srp, volume_name, False, False, self.data.slo_silver,
'fail_workload', False)
self.common.promotion = False
self.assertEqual(ref_return, return_val)
def test_is_valid_for_storage_assisted_migration_promotion_target_not_rep(
self):
device_id = self.data.device_id
host = {'host': self.data.new_host}
volume_name = self.data.test_volume.name
ref_return = (False, None, None)
self.common.promotion = True
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array,
self.data.srp, volume_name, False, False, self.data.slo_silver,
'OLTP', True)
self.common.promotion = False
self.assertEqual(ref_return, return_val)
@mock.patch.object(
rest.PowerMaxRest, 'get_storage_groups_from_volume',
return_value=tpd.PowerMaxData.default_sg_re_managed_list)
def test_is_valid_for_storage_assisted_migration_rep_with_mgmt_group(
self, mock_sg_list):
device_id = self.data.device_id
host = {'host': self.data.fake_host}
volume_name = self.data.test_volume.name
ref_return = (True, 'Diamond', 'NONE')
with mock.patch.object(self.rest, 'is_next_gen_array',
return_value=True):
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array,
self.data.srp, volume_name, False, False, self.data.slo,
self.data.workload, False)
self.assertEqual(ref_return, return_val)
def test_find_volume_group(self):
group = self.data.test_group_1
array = self.data.array
volume_group = self.common._find_volume_group(array, group)
ref_group = self.data.sg_details_rep[0]
self.assertEqual(ref_group, volume_group)
def test_get_volume_device_ids(self):
array = self.data.array
volumes = [self.data.test_volume]
ref_device_ids = [self.data.device_id]
device_ids = self.common._get_volume_device_ids(volumes, array)
self.assertEqual(ref_device_ids, device_ids)
@mock.patch.object(common.PowerMaxCommon, '_find_device_on_array',
return_value=tpd.PowerMaxData.device_id)
def test_get_volume_device_ids_remote_volumes(self, mck_find):
array = self.data.array
volumes = [self.data.test_rep_volume]
ref_device_ids = [self.data.device_id]
replication_details = ast.literal_eval(
self.data.test_rep_volume.replication_driver_data)
remote_array = replication_details.get(utils.ARRAY)
specs = {utils.ARRAY: remote_array}
device_ids = self.common._get_volume_device_ids(volumes, array, True)
self.assertEqual(ref_device_ids, device_ids)
mck_find.assert_called_once_with(
self.data.test_rep_volume, specs, True)
def test_get_members_of_volume_group(self):
array = self.data.array
group_name = self.data.storagegroup_name_source
ref_volumes = [self.data.device_id, self.data.device_id2]
member_device_ids = self.common._get_members_of_volume_group(
array, group_name)
self.assertEqual(ref_volumes, member_device_ids)
def test_get_members_of_volume_group_empty(self):
array = self.data.array
group_name = self.data.storagegroup_name_source
with mock.patch.object(
self.rest, 'get_volumes_in_storage_group',
return_value=None):
member_device_ids = self.common._get_members_of_volume_group(
array, group_name
)
self.assertIsNone(member_device_ids)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_create_group_replica(self, mock_check):
source_group = self.data.test_group_1
snap_name = self.data.group_snapshot_name
with mock.patch.object(
self.common,
'_create_group_replica') as mock_create_replica:
self.common._create_group_replica(
source_group, snap_name)
mock_create_replica.assert_called_once_with(
source_group, snap_name)
def test_create_group_replica_exception(self):
source_group = self.data.test_group_failed
snap_name = self.data.group_snapshot_name
with mock.patch.object(
volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._create_group_replica,
source_group,
snap_name)
def test_create_group_snapshot(self):
context = None
group_snapshot = self.data.test_group_snapshot_1
snapshots = []
ref_model_update = {'status': fields.GroupStatus.AVAILABLE}
with mock.patch.object(
volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
model_update, snapshots_model_update = (
self.common.create_group_snapshot(
context, group_snapshot, snapshots))
self.assertEqual(ref_model_update, model_update)
def test_create_group_snapshot_exception(self):
context = None
group_snapshot = self.data.test_group_snapshot_failed
snapshots = []
with mock.patch.object(
volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.create_group_snapshot,
context,
group_snapshot,
snapshots)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
@mock.patch.object(volume_utils, 'is_group_a_type', return_value=False)
def test_create_group(self, mock_type, mock_cg_type):
ref_model_update = {'status': fields.GroupStatus.AVAILABLE}
model_update = self.common.create_group(None, self.data.test_group_1)
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(provision.PowerMaxProvision, 'create_volume_group',
side_effect=exception.CinderException)
@mock.patch.object(volume_utils, 'is_group_a_type', return_value=False)
def test_create_group_exception(self, mock_type, mock_create):
context = None
group = self.data.test_group_failed
with mock.patch.object(
volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.create_group,
context, group)
def test_delete_group_snapshot(self):
group_snapshot = self.data.test_group_snapshot_1
snapshots = []
context = None
ref_model_update = {'status': fields.GroupSnapshotStatus.DELETED}
with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
model_update, snapshots_model_update = (
self.common.delete_group_snapshot(context,
group_snapshot, snapshots))
self.assertEqual(ref_model_update, model_update)
def test_delete_group_snapshot_success(self):
group_snapshot = self.data.test_group_snapshot_1
snapshots = []
ref_model_update = {'status': fields.GroupSnapshotStatus.DELETED}
with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
model_update, snapshots_model_update = (
self.common._delete_group_snapshot(group_snapshot,
snapshots))
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(common.PowerMaxCommon, '_find_volume_group',
return_value=None)
def test_delete_group_snapshot_not_on_array(self, mock_gvg):
group_snapshot = self.data.test_group_snapshot_1
snapshots = []
ref_model_update = (
{'status': fields.GroupSnapshotStatus.DELETED})
with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
model_update, snapshots_model_update = (
self.common._delete_group_snapshot(group_snapshot,
snapshots))
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(volume_utils, 'is_group_a_type',
return_value=False)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_update_group(self, mock_cg_type, mock_type_check):
group = self.data.test_group_1
add_vols = [self.data.test_volume]
remove_vols = []
ref_model_update = {'status': fields.GroupStatus.AVAILABLE}
model_update, __, __ = self.common.update_group(
group, add_vols, remove_vols)
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(common.PowerMaxCommon, '_find_volume_group',
return_value=None)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_update_group_not_found(self, mock_check, mock_grp):
self.assertRaises(exception.GroupNotFound, self.common.update_group,
self.data.test_group_1, [], [])
@mock.patch.object(common.PowerMaxCommon, '_find_volume_group',
side_effect=exception.VolumeBackendAPIException)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_update_group_exception(self, mock_check, mock_grp):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.update_group,
self.data.test_group_1, [], [])
@mock.patch.object(volume_utils, 'is_group_a_type',
return_value=False)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_update_group_remove_volumes(self, mock_cg_type, mock_type_check):
group = self.data.test_group_1
add_vols = []
remove_vols = [self.data.test_volume_group_member]
ref_model_update = {'status': fields.GroupStatus.AVAILABLE}
with mock.patch.object(
rest.PowerMaxRest, 'is_volume_in_storagegroup',
return_value=False) as mock_exists:
model_update, __, __ = self.common.update_group(
group, add_vols, remove_vols)
mock_exists.assert_called_once()
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(volume_utils, 'is_group_a_type',
return_value=False)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_update_group_failover_failure(
self, mock_cg_type, mock_type_check):
group = self.data.test_group_1
add_vols = []
remove_vols = [self.data.test_volume_group_member]
self.common.failover = True
self.assertRaises(
exception.VolumeBackendAPIException, self.common.update_group,
group, add_vols, remove_vols)
self.common.failover = False
@mock.patch.object(volume_utils, 'is_group_a_type',
return_value=False)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
@mock.patch.object(common.PowerMaxCommon, '_update_group_promotion')
def test_update_group_during_promotion(
self, mck_update, mock_cg_type, mock_type_check):
group = self.data.test_group_1
add_vols = []
remove_vols = [self.data.test_volume_group_member]
ref_model_update = {'status': fields.GroupStatus.AVAILABLE}
self.common.promotion = True
model_update, __, __ = self.common.update_group(
group, add_vols, remove_vols)
self.common.promotion = False
mck_update.assert_called_once_with(group, add_vols, remove_vols)
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(rest.PowerMaxRest, 'is_volume_in_storagegroup',
return_value=True)
@mock.patch.object(
common.PowerMaxCommon, '_get_replication_extra_specs',
return_value=tpd.PowerMaxData.rep_extra_specs_rep_config)
@mock.patch.object(
common.PowerMaxCommon, '_initial_setup',
return_value=tpd.PowerMaxData.ex_specs_rep_config)
@mock.patch.object(volume_utils, 'is_group_a_type',
return_value=True)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
@mock.patch.object(
masking.PowerMaxMasking, 'remove_volumes_from_storage_group')
def test_update_group_promotion(
self, mck_rem, mock_cg_type, mock_type_check, mck_setup, mck_rep,
mck_in_sg):
group = self.data.test_rep_group
add_vols = []
remove_vols = [self.data.test_volume_group_member]
remote_array = self.data.remote_array
device_id = [self.data.device_id]
group_name = self.data.storagegroup_name_source
interval_retries_dict = {utils.INTERVAL: 1,
utils.RETRIES: 1,
utils.FORCE_VOL_EDIT: True}
self.common._update_group_promotion(group, add_vols, remove_vols)
mck_rem.assert_called_once_with(
remote_array, device_id, group_name, interval_retries_dict)
@mock.patch.object(volume_utils, 'is_group_a_type',
return_value=False)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_update_group_promotion_non_replicated(
self, mock_cg_type, mock_type_check):
group = self.data.test_group_failed
add_vols = []
remove_vols = [self.data.test_volume_group_member]
self.assertRaises(exception.VolumeBackendAPIException,
self.common._update_group_promotion,
group, add_vols, remove_vols)
@mock.patch.object(volume_utils, 'is_group_a_type',
return_value=True)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_update_group_promotion_add_volumes(
self, mock_cg_type, mock_type_check):
group = self.data.test_rep_group
add_vols = [self.data.test_volume_group_member]
remove_vols = []
self.assertRaises(exception.VolumeBackendAPIException,
self.common._update_group_promotion,
group, add_vols, remove_vols)
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list',
return_value=list())
@mock.patch.object(volume_utils, 'is_group_a_type', return_value=False)
def test_delete_group(self, mock_check, mck_snaps):
group = self.data.test_group_1
volumes = [self.data.test_volume]
context = None
ref_model_update = {'status': fields.GroupStatus.DELETED}
with mock.patch.object(
volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True), mock.patch.object(
self.rest, 'get_volumes_in_storage_group',
return_value=[]):
model_update, __ = self.common.delete_group(
context, group, volumes)
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list',
return_value=list())
@mock.patch.object(volume_utils, 'is_group_a_type', return_value=False)
def test_delete_group_success(self, mock_check, mck_get_snaps):
group = self.data.test_group_1
volumes = []
ref_model_update = {'status': fields.GroupStatus.DELETED}
with mock.patch.object(
volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True), mock.patch.object(
self.rest, 'get_volumes_in_storage_group',
return_value=[]):
model_update, __ = self.common._delete_group(group, volumes)
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(masking.PowerMaxMasking, 'remove_and_reset_members')
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
@mock.patch.object(common.PowerMaxCommon, '_get_members_of_volume_group',
return_value=[tpd.PowerMaxData.device_id])
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list',
return_value=[])
@mock.patch.object(volume_utils, 'is_group_a_type', return_value=False)
def test_delete_group_snapshot_and_volume_cleanup(
self, mock_check, mck_get_snaps, mock_members, mock_cleanup,
mock_remove):
group = self.data.test_group_1
volumes = [fake_volume.fake_volume_obj(
context='cxt', provider_location=None)]
with mock.patch.object(
volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True), mock.patch.object(
self.rest, 'get_volumes_in_storage_group',
return_value=[]):
self.common._delete_group(group, volumes)
mock_cleanup.assert_called_once()
mock_remove.assert_called_once()
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list',
return_value=list())
def test_delete_group_already_deleted(self, mck_get_snaps):
group = self.data.test_group_failed
ref_model_update = {'status': fields.GroupStatus.DELETED}
volumes = []
with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
model_update, __ = self.common._delete_group(group, volumes)
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list',
return_value=list())
@mock.patch.object(volume_utils, 'is_group_a_type', return_value=False)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_delete_group_failed(
self, mock_check, mock_type_check, mck_get_snaps):
group = self.data.test_group_1
volumes = []
ref_model_update = {'status': fields.GroupStatus.ERROR_DELETING}
with mock.patch.object(
self.rest, 'delete_storage_group',
side_effect=exception.VolumeBackendAPIException):
model_update, __ = self.common._delete_group(
group, volumes)
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list',
return_value=list())
@mock.patch.object(volume_utils, 'is_group_a_type', return_value=False)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
@mock.patch.object(rest.PowerMaxRest, 'get_volumes_in_storage_group',
return_value=[
tpd.PowerMaxData.test_volume_group_member])
@mock.patch.object(common.PowerMaxCommon, '_get_members_of_volume_group',
return_value=[tpd.PowerMaxData.device_id])
@mock.patch.object(common.PowerMaxCommon, '_find_device_on_array',
return_value=tpd.PowerMaxData.device_id)
@mock.patch.object(masking.PowerMaxMasking,
'remove_volumes_from_storage_group')
def test_delete_group_cleanup_snapvx(
self, mock_rem, mock_find, mock_mems, mock_vols, mock_chk1,
mock_chk2, mck_get_snaps):
group = self.data.test_group_1
volumes = [self.data.test_volume_group_member]
with mock.patch.object(
self.common, '_cleanup_device_snapvx') as mock_cleanup_snapvx:
self.common._delete_group(group, volumes)
mock_cleanup_snapvx.assert_called_once()
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list',
return_value=[{'snapshotName': 'name'}])
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
def test_delete_group_with_volumes_exception_on_remaining_snapshots(
self, mck_cleanup, mck_get):
group = self.data.test_group_1
volumes = [self.data.test_volume_group_member]
self.assertRaises(exception.VolumeBackendAPIException,
self.common._delete_group, group, volumes)
@mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions',
return_value=('', {'source_vol_id': 'id',
'snap_name': 'name'}))
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list',
return_value=None)
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
def test_delete_group_with_volumes_exception_on_target_links(
self, mck_cleanup, mck_get, mck_find):
group = self.data.test_group_1
volumes = [self.data.test_volume_group_member]
self.assertRaises(exception.VolumeBackendAPIException,
self.common._delete_group, group, volumes)
@mock.patch.object(rest.PowerMaxRest, 'delete_storage_group')
@mock.patch.object(common.PowerMaxCommon, '_failover_replication',
return_value=(True, None))
@mock.patch.object(masking.PowerMaxMasking, 'add_volumes_to_storage_group')
@mock.patch.object(common.PowerMaxCommon, '_get_volume_device_ids',
return_value=[tpd.PowerMaxData.device_id])
@mock.patch.object(provision.PowerMaxProvision, 'create_volume_group')
@mock.patch.object(common.PowerMaxCommon, '_initial_setup',
return_value=tpd.PowerMaxData.ex_specs_rep_config_sync)
def test_update_volume_list_from_sync_vol_list(
self, mck_setup, mck_grp, mck_ids, mck_add, mck_fover, mck_del):
vol_list = [self.data.test_rep_volume]
vol_ids = [self.data.device_id]
remote_array = self.data.remote_array
temp_group = 'OS-23_24_007-temp-rdf-sg'
extra_specs = self.data.ex_specs_rep_config_sync
self.common._update_volume_list_from_sync_vol_list(vol_list, None)
mck_grp.assert_called_once_with(remote_array, temp_group, extra_specs)
mck_ids.assert_called_once_with(
vol_list, remote_array, remote_volumes=True)
mck_add.assert_called_once_with(
remote_array, vol_ids, temp_group, extra_specs)
mck_fover.assert_called_once_with(
vol_list, None, temp_group, secondary_backend_id=None, host=True)
mck_del.assert_called_once_with(remote_array, temp_group)
@mock.patch.object(
common.PowerMaxCommon, '_remove_vol_and_cleanup_replication')
@mock.patch.object(
masking.PowerMaxMasking, 'remove_volumes_from_storage_group')
def test_rollback_create_group_from_src(
self, mock_rm, mock_clean):
rollback_dict = {
'target_group_name': self.data.target_group_name,
'snap_name': 'snap1', 'source_group_name': 'src_grp',
'volumes': (self.data.device_id, self.data.extra_specs,
self.data.test_volume),
'device_ids': [self.data.device_id],
'interval_retries_dict': self.data.extra_specs}
for x in range(0, 2):
self.common._rollback_create_group_from_src(
self.data.array, rollback_dict)
self.assertEqual(2, mock_rm.call_count)
def test_get_snap_src_dev_list(self):
src_dev_ids = self.common._get_snap_src_dev_list(
self.data.array, [self.data.test_snapshot])
ref_dev_ids = [self.data.device_id]
self.assertEqual(ref_dev_ids, src_dev_ids)
def test_get_clone_vol_info(self):
ref_dev_id = self.data.device_id
source_vols = [self.data.test_volume,
self.data.test_attached_volume]
src_snapshots = [self.data.test_snapshot]
src_dev_id1, extra_specs1, vol_size1, tgt_vol_name1 = (
self.common._get_clone_vol_info(
self.data.test_clone_volume, source_vols, []))
src_dev_id2, extra_specs2, vol_size2, tgt_vol_name2 = (
self.common._get_clone_vol_info(
self.data.test_clone_volume, [], src_snapshots))
self.assertEqual(ref_dev_id, src_dev_id1)
self.assertEqual(ref_dev_id, src_dev_id2)
def test_get_attributes_from_cinder_config_new_and_old(self):
kwargs_expected = (
{'RestServerIp': '1.1.1.1', 'RestServerPort': 8443,
'RestUserName': 'smc', 'RestPassword': 'smc', 'SSLVerify': False,
'SerialNumber': self.data.array, 'srpName': 'SRP_1',
'PortGroup': [self.data.port_group_name_i]})
old_conf = tpfo.FakeConfiguration(None, 'CommonTests', 1, 1)
configuration = tpfo.FakeConfiguration(
None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc', san_api_port=8443,
powermax_port_groups=[self.data.port_group_name_i])
self.common.configuration = configuration
kwargs_returned = self.common.get_attributes_from_cinder_config()
self.assertEqual(kwargs_expected, kwargs_returned)
self.common.configuration = old_conf
kwargs = self.common.get_attributes_from_cinder_config()
self.assertIsNone(kwargs)
def test_get_attributes_from_cinder_config_with_port(self):
kwargs_expected = (
{'RestServerIp': '1.1.1.1', 'RestServerPort': 3448,
'RestUserName': 'smc', 'RestPassword': 'smc', 'SSLVerify': False,
'SerialNumber': self.data.array, 'srpName': 'SRP_1',
'PortGroup': [self.data.port_group_name_i]})
configuration = tpfo.FakeConfiguration(
None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc', san_api_port=3448,
powermax_port_groups=[self.data.port_group_name_i])
self.common.configuration = configuration
kwargs_returned = self.common.get_attributes_from_cinder_config()
self.assertEqual(kwargs_expected, kwargs_returned)
def test_get_attributes_from_cinder_config_no_port(self):
kwargs_expected = (
{'RestServerIp': '1.1.1.1', 'RestServerPort': 8443,
'RestUserName': 'smc', 'RestPassword': 'smc', 'SSLVerify': False,
'SerialNumber': self.data.array, 'srpName': 'SRP_1',
'PortGroup': [self.data.port_group_name_i]})
configuration = tpfo.FakeConfiguration(
None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc',
powermax_port_groups=[self.data.port_group_name_i])
self.common.configuration = configuration
kwargs_returned = self.common.get_attributes_from_cinder_config()
self.assertEqual(kwargs_expected, kwargs_returned)
def test_get_ssl_attributes_from_cinder_config(self):
conf = tpfo.FakeConfiguration(
None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc',
powermax_port_groups=[self.data.port_group_name_i],
driver_ssl_cert_verify=True,
driver_ssl_cert_path='/path/to/cert')
self.common.configuration = conf
conf_returned = self.common.get_attributes_from_cinder_config()
self.assertEqual('/path/to/cert', conf_returned['SSLVerify'])
conf.driver_ssl_cert_verify = True
conf.driver_ssl_cert_path = None
conf_returned = self.common.get_attributes_from_cinder_config()
self.assertTrue(conf_returned['SSLVerify'])
conf.driver_ssl_cert_verify = False
conf.driver_ssl_cert_path = None
conf_returned = self.common.get_attributes_from_cinder_config()
self.assertFalse(conf_returned['SSLVerify'])
@mock.patch.object(rest.PowerMaxRest, 'get_size_of_device_on_array',
return_value=2.0)
def test_manage_snapshot_get_size_success(self, mock_get_size):
size = self.common.manage_existing_snapshot_get_size(
self.data.test_snapshot)
self.assertEqual(2, size)
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snaps',
return_value=[{'snap_name': 'snap_name',
'snap_id': tpd.PowerMaxData.snap_id}])
@mock.patch.object(
common.PowerMaxCommon, 'get_snapshot_metadata',
return_value={'snap-meta-key-1': 'snap-meta-value-1',
'snap-meta-key-2': 'snap-meta-value-2'})
def test_manage_snapshot_success(self, mck_meta, mock_snap):
snapshot = deepcopy(self.data.test_snapshot_manage)
snapshot.metadata = {'user-meta-key-1': 'user-meta-value-1',
'user-meta-key-2': 'user-meta-value-2'}
existing_ref = {u'source-name': u'test_snap'}
updates_response = self.common.manage_existing_snapshot(
snapshot, existing_ref)
prov_loc = {'source_id': self.data.device_id,
'snap_name': 'OS-%s' % existing_ref['source-name']}
updates = {'display_name': 'my_snap',
'provider_location': six.text_type(prov_loc),
'metadata': {'snap-meta-key-1': 'snap-meta-value-1',
'snap-meta-key-2': 'snap-meta-value-2',
'user-meta-key-1': 'user-meta-value-1',
'user-meta-key-2': 'user-meta-value-2'}}
self.assertEqual(updates_response, updates)
def test_manage_snapshot_fail_already_managed(self):
snapshot = self.data.test_snapshot_manage
existing_ref = {u'source-name': u'OS-test_snap'}
self.assertRaises(exception.VolumeBackendAPIException,
self.common.manage_existing_snapshot,
snapshot, existing_ref)
@mock.patch.object(utils.PowerMaxUtils, 'is_volume_failed_over',
return_value=True)
def test_manage_snapshot_fail_vol_failed_over(self, mock_failed):
snapshot = self.data.test_snapshot_manage
existing_ref = {u'source-name': u'test_snap'}
self.assertRaises(exception.VolumeBackendAPIException,
self.common.manage_existing_snapshot,
snapshot, existing_ref)
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snap',
return_value=False)
def test_manage_snapshot_fail_vol_not_snap_src(self, mock_snap):
snapshot = self.data.test_snapshot_manage
existing_ref = {u'source-name': u'test_snap'}
self.assertRaises(exception.VolumeBackendAPIException,
self.common.manage_existing_snapshot,
snapshot, existing_ref)
@mock.patch.object(utils.PowerMaxUtils, 'modify_snapshot_prefix',
side_effect=exception.VolumeBackendAPIException)
def test_manage_snapshot_fail_add_prefix(self, mock_mod):
snapshot = self.data.test_snapshot_manage
existing_ref = {u'source-name': u'test_snap'}
self.assertRaises(exception.VolumeBackendAPIException,
self.common.manage_existing_snapshot,
snapshot, existing_ref)
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snaps',
return_value=[{'snap_name': 'snap_name',
'snap_id': tpd.PowerMaxData.snap_id}])
def test_get_snap_id_with_uuid_success(self, mock_get_snaps):
snap_uuid = '_snapshot-' + fake.SNAPSHOT_ID
snap_id, snap_name = self.common._get_snap_id_with_uuid(
self.data.array, self.data.device_id, snap_uuid)
self.assertEqual(self.data.snap_id, snap_id)
self.assertEqual('253b28496ec7aab', snap_name)
snap_uuid = fake.SNAPSHOT_ID
snap_id, snap_name = self.common._get_snap_id_with_uuid(
self.data.array, self.data.device_id, snap_uuid)
self.assertEqual(self.data.snap_id, snap_id)
self.assertEqual('253b28496ec7aab', snap_name)
@mock.patch.object(
common.PowerMaxCommon, 'get_snapshot_metadata',
return_value={'snap-meta-key-1': 'snap-meta-value-1',
'snap-meta-key-2': 'snap-meta-value-2'})
@mock.patch.object(
rest.PowerMaxRest, 'get_volume_snaps',
return_value=[{'snap_name': tpd.PowerMaxData.test_snapshot_snap_name,
'snap_id': tpd.PowerMaxData.snap_id}])
def test_manage_existing_snapshot_no_fall_through(
self, mock_get_snaps, mock_meta):
external_ref = {u'source-name': u'test_snap'}
snapshot = deepcopy(self.data.test_snapshot)
with mock.patch.object(
self.common, '_get_snap_id_with_uuid',
return_value=(
self.data.snap_id,
self.data.test_snapshot_snap_name)) as mock_uuid:
self.common.manage_existing_snapshot(snapshot, external_ref)
mock_uuid.assert_not_called()
@mock.patch.object(
common.PowerMaxCommon, 'get_snapshot_metadata',
return_value={'snap-meta-key-1': 'snap-meta-value-1',
'snap-meta-key-2': 'snap-meta-value-2'})
def test_manage_existing_snapshot_fall_through(self, mock_meta):
external_ref = {u'source-name': fake.SNAPSHOT_ID}
snapshot = deepcopy(self.data.test_snapshot)
with mock.patch.object(
self.common, '_get_snap_id_with_uuid',
return_value=(
self.data.snap_id,
self.data.test_snapshot_snap_name)) as mock_uuid:
self.common.manage_existing_snapshot(snapshot, external_ref)
mock_uuid.assert_called()
@mock.patch.object(rest.PowerMaxRest, 'modify_volume_snap')
def test_unmanage_snapshot_success(self, mock_mod, ):
self.common.unmanage_snapshot(self.data.test_snapshot_manage)
mock_mod.assert_called_once()
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
@mock.patch.object(rest.PowerMaxRest, 'modify_volume_snap')
def test_unmanage_snapshot_no_snapvx_cleanup(self, mock_mod, mock_cleanup):
self.common.unmanage_snapshot(self.data.test_snapshot_manage)
mock_mod.assert_called_once()
mock_cleanup.assert_not_called()
@mock.patch.object(utils.PowerMaxUtils, 'is_volume_failed_over',
return_value=True)
def test_unmanage_snapshot_fail_failover(self, mock_failed):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.unmanage_snapshot,
self.data.test_snapshot_manage)
@mock.patch.object(rest.PowerMaxRest, 'modify_volume_snap',
side_effect=exception.VolumeBackendAPIException)
def test_unmanage_snapshot_fail_rename(self, mock_snap):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.unmanage_snapshot,
self.data.test_snapshot_manage)
@mock.patch.object(
common.PowerMaxCommon, '_parse_snap_info', return_value=(
tpd.PowerMaxData.device_id,
tpd.PowerMaxData.snap_location['snap_name'],
[tpd.PowerMaxData.snap_id]))
@mock.patch.object(provision.PowerMaxProvision, 'delete_volume_snap')
@mock.patch.object(provision.PowerMaxProvision, 'is_restore_complete',
return_value=True)
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
@mock.patch.object(provision.PowerMaxProvision, 'revert_volume_snapshot')
def test_revert_to_snapshot(self, mock_revert, mock_clone,
mock_complete, mock_delete, mock_parse):
volume = self.data.test_volume
snapshot = self.data.test_snapshot
array = self.data.array
device_id = self.data.device_id
snap_name = self.data.snap_location['snap_name']
snap_id = self.data.snap_id
extra_specs = deepcopy(self.data.extra_specs_intervals_set)
extra_specs['storagetype:portgroupname'] = (
self.data.port_group_name_f)
self.common.revert_to_snapshot(volume, snapshot)
mock_revert.assert_called_once_with(
array, device_id, snap_name, snap_id, extra_specs)
mock_clone.assert_called_once_with(array, device_id, extra_specs)
mock_complete.assert_called_once_with(array, device_id,
snap_name, snap_id, extra_specs)
mock_delete.assert_called_once_with(array, snap_name, device_id,
self.data.snap_id, restored=True)
@mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled',
return_value=True)
def test_revert_to_snapshot_replicated(self, mock_rep):
volume = self.data.test_volume
snapshot = self.data.test_snapshot
self.assertRaises(exception.VolumeDriverException,
self.common.revert_to_snapshot, volume, snapshot)
def test_get_initiator_check_flag(self):
self.common.configuration.initiator_check = False
initiator_check = self.common._get_initiator_check_flag()
self.assertFalse(initiator_check)
def test_get_initiator_check_flag_true(self):
self.common.configuration.initiator_check = True
initiator_check = self.common._get_initiator_check_flag()
self.assertTrue(initiator_check)
def test_get_manageable_volumes_success(self):
marker = limit = offset = sort_keys = sort_dirs = None
with mock.patch.object(
self.rest, 'get_private_volume_list',
return_value=self.data.priv_vol_func_response_single):
vols_lists = self.common.get_manageable_volumes(
marker, limit, offset, sort_keys, sort_dirs)
expected_response = [
{'reference': {'source-id': '00001'}, 'safe_to_manage': True,
'size': 1.0, 'reason_not_safe': None, 'cinder_id': None,
'extra_info': {'config': 'TDEV', 'emulation': 'FBA'}}]
self.assertEqual(vols_lists, expected_response)
def test_get_manageable_volumes_filters_set(self):
marker, limit, offset = '00002', 2, 1
sort_keys, sort_dirs = 'size', 'desc'
with mock.patch.object(
self.rest, 'get_private_volume_list',
return_value=self.data.priv_vol_func_response_multi):
vols_lists = self.common.get_manageable_volumes(
marker, limit, offset, sort_keys, sort_dirs)
expected_response = [
{'reference': {'source-id': '00003'}, 'safe_to_manage': True,
'size': 300, 'reason_not_safe': None, 'cinder_id': None,
'extra_info': {'config': 'TDEV', 'emulation': 'FBA'}},
{'reference': {'source-id': '00004'}, 'safe_to_manage': True,
'size': 400, 'reason_not_safe': None, 'cinder_id': None,
'extra_info': {'config': 'TDEV', 'emulation': 'FBA'}}]
self.assertEqual(vols_lists, expected_response)
def test_get_manageable_volumes_fail_no_vols(self):
marker = limit = offset = sort_keys = sort_dirs = None
with mock.patch.object(
self.rest, 'get_private_volume_list',
return_value=[]):
expected_response = []
vol_list = self.common.get_manageable_volumes(
marker, limit, offset, sort_keys, sort_dirs)
self.assertEqual(vol_list, expected_response)
def test_get_manageable_volumes_fail_no_valid_vols(self):
marker = limit = offset = sort_keys = sort_dirs = None
with mock.patch.object(
self.rest, 'get_private_volume_list',
return_value=self.data.priv_vol_func_response_multi_invalid):
expected_response = []
vol_list = self.common.get_manageable_volumes(
marker, limit, offset, sort_keys, sort_dirs)
self.assertEqual(vol_list, expected_response)
def test_get_manageable_snapshots_success(self):
marker = limit = offset = sort_keys = sort_dirs = None
with mock.patch.object(
self.rest, 'get_private_volume_list',
return_value=self.data.priv_vol_func_response_single):
snap_list = self.common.get_manageable_snapshots(
marker, limit, offset, sort_keys, sort_dirs)
expected_response = [{
'reference': {'source-name': 'testSnap1'},
'safe_to_manage': True, 'size': 1,
'reason_not_safe': None, 'cinder_id': None,
'extra_info': {
'generation': 0, 'secured': False, 'timeToLive': 'N/A',
'timestamp': mock.ANY, 'snap_id': self.data.snap_id},
'source_reference': {'source-id': '00001'}}]
self.assertEqual(expected_response, snap_list)
def test_get_manageable_snapshots_filters_set(self):
marker, limit, offset = 'testSnap2', 2, 1
sort_keys, sort_dirs = 'size', 'desc'
with mock.patch.object(
self.rest, 'get_private_volume_list',
return_value=self.data.priv_vol_func_response_multi):
vols_lists = self.common.get_manageable_snapshots(
marker, limit, offset, sort_keys, sort_dirs)
expected_response = [
{'reference': {'source-name': 'testSnap3'},
'safe_to_manage': True, 'size': 300, 'reason_not_safe': None,
'cinder_id': None, 'extra_info': {
'snap_id': self.data.snap_id, 'secured': False,
'timeToLive': 'N/A', 'timestamp': mock.ANY,
'generation': 0},
'source_reference': {'source-id': '00003'}},
{'reference': {'source-name': 'testSnap4'},
'safe_to_manage': True, 'size': 400, 'reason_not_safe': None,
'cinder_id': None, 'extra_info': {
'snap_id': self.data.snap_id, 'secured': False,
'timeToLive': 'N/A', 'timestamp': mock.ANY,
'generation': 0},
'source_reference': {'source-id': '00004'}}]
self.assertEqual(vols_lists, expected_response)
def test_get_manageable_snapshots_fail_no_snaps(self):
marker = limit = offset = sort_keys = sort_dirs = None
with mock.patch.object(self.rest, 'get_private_volume_list',
return_value=[]):
expected_response = []
vols_lists = self.common.get_manageable_snapshots(
marker, limit, offset, sort_keys, sort_dirs)
self.assertEqual(vols_lists, expected_response)
def test_get_manageable_snapshots_fail_no_valid_snaps(self):
marker = limit = offset = sort_keys = sort_dirs = None
with mock.patch.object(
self.rest, 'get_private_volume_list',
return_value=self.data.priv_vol_func_response_multi_invalid):
expected_response = []
vols_lists = self.common.get_manageable_snapshots(
marker, limit, offset, sort_keys, sort_dirs)
self.assertEqual(vols_lists, expected_response)
def test_get_slo_workload_combo_from_cinder_conf(self):
self.common.configuration.powermax_service_level = 'Diamond'
self.common.configuration.vmax_workload = 'DSS'
response1 = self.common.get_attributes_from_cinder_config()
self.assertEqual('Diamond', response1['ServiceLevel'])
self.assertEqual('DSS', response1['Workload'])
self.common.configuration.powermax_service_level = 'Diamond'
self.common.configuration.vmax_workload = None
response2 = self.common.get_attributes_from_cinder_config()
self.assertEqual(self.common.configuration.powermax_service_level,
response2['ServiceLevel'])
self.assertIsNone(response2['Workload'])
expected_response = {
'RestServerIp': '1.1.1.1', 'RestServerPort': 8443,
'RestUserName': 'smc', 'RestPassword': 'smc', 'SSLVerify': False,
'SerialNumber': '000197800123', 'srpName': 'SRP_1',
'PortGroup': ['OS-fibre-PG']}
self.common.configuration.powermax_service_level = None
self.common.configuration.vmax_workload = 'DSS'
response3 = self.common.get_attributes_from_cinder_config()
self.assertEqual(expected_response, response3)
self.common.configuration.powermax_service_level = None
self.common.configuration.vmax_workload = None
response4 = self.common.get_attributes_from_cinder_config()
self.assertEqual(expected_response, response4)
def test_get_u4p_failover_info(self):
configuration = tpfo.FakeConfiguration(
None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='test',
san_password='test', san_api_port=8443,
driver_ssl_cert_verify='/path/to/cert',
u4p_failover_target=(self.data.u4p_failover_config[
'u4p_failover_targets']), u4p_failover_backoff_factor='2',
u4p_failover_retries='3', u4p_failover_timeout='10',
u4p_primary='10.10.10.10', powermax_array=self.data.array,
powermax_srp=self.data.srp)
self.common.configuration = configuration
self.common._get_u4p_failover_info()
self.assertTrue(self.rest.u4p_failover_enabled)
self.assertIsNotNone(self.rest.u4p_failover_targets)
@mock.patch.object(rest.PowerMaxRest, 'set_u4p_failover_config')
def test_get_u4p_failover_info_failover_config(self, mck_set_fo):
configuration = tpfo.FakeConfiguration(
None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='test',
san_password='test', san_api_port=8443,
driver_ssl_cert_verify='/path/to/cert',
u4p_failover_target=(self.data.u4p_failover_config[
'u4p_failover_targets']), u4p_failover_backoff_factor='2',
u4p_failover_retries='3', u4p_failover_timeout='10',
u4p_primary='10.10.10.10', powermax_array=self.data.array,
powermax_srp=self.data.srp)
expected_u4p_failover_config = {
'u4p_failover_targets': [
{'RestServerIp': '10.10.10.11', 'RestServerPort': '8443',
'RestUserName': 'test', 'RestPassword': 'test',
'SSLVerify': 'True', 'SerialNumber': '000197800123'},
{'RestServerIp': '10.10.10.12', 'RestServerPort': '8443',
'RestUserName': 'test', 'RestPassword': 'test',
'SSLVerify': True, 'SerialNumber': '000197800123'},
{'RestServerIp': '10.10.10.11', 'RestServerPort': '8443',
'RestUserName': 'test', 'RestPassword': 'test',
'SSLVerify': 'False', 'SerialNumber': '000197800123'}],
'u4p_failover_backoff_factor': '2', 'u4p_failover_retries': '3',
'u4p_failover_timeout': '10', 'u4p_failover_autofailback': None,
'u4p_primary': {
'RestServerIp': '1.1.1.1', 'RestServerPort': 8443,
'RestUserName': 'test', 'RestPassword': 'test',
'SerialNumber': '000197800123', 'srpName': 'SRP_1',
'PortGroup': None, 'SSLVerify': True}}
self.common.configuration = configuration
self.common._get_u4p_failover_info()
self.assertIsNotNone(self.rest.u4p_failover_targets)
mck_set_fo.assert_called_once_with(expected_u4p_failover_config)
def test_update_vol_stats_retest_u4p(self):
self.rest.u4p_in_failover = True
self.rest.u4p_failover_autofailback = True
with mock.patch.object(
self.common, 'retest_primary_u4p') as mock_retest:
self.common.update_volume_stats()
mock_retest.assert_called_once()
self.rest.u4p_in_failover = True
self.rest.u4p_failover_autofailback = False
with mock.patch.object(
self.common, 'retest_primary_u4p') as mock_retest:
self.common.update_volume_stats()
mock_retest.assert_not_called()
@mock.patch.object(rest.PowerMaxRest, 'request', return_value=[200, None])
@mock.patch.object(
common.PowerMaxCommon, 'get_attributes_from_cinder_config',
return_value=tpd.PowerMaxData.u4p_failover_target[0])
def test_retest_primary_u4p(self, mock_primary_u4p, mock_request):
self.common.retest_primary_u4p()
self.assertFalse(self.rest.u4p_in_failover)
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
return_value=(None, False, None))
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
def test_extend_vol_validation_checks_success(self, mck_cleanup, mck_rep):
volume = self.data.test_volume
array = self.data.array
device_id = self.data.device_id
new_size = self.data.test_volume.size + 1
extra_specs = self.data.extra_specs
self.common._extend_vol_validation_checks(
array, device_id, volume.name, extra_specs, volume.size, new_size)
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
return_value=(None, False, None))
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
def test_extend_vol_val_check_no_device(self, mck_cleanup, mck_rep):
volume = self.data.test_volume
array = self.data.array
device_id = None
new_size = self.data.test_volume.size + 1
extra_specs = self.data.extra_specs
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._extend_vol_validation_checks,
array, device_id, volume.name, extra_specs, volume.size, new_size)
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
return_value=(None, True, None))
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
def test_extend_vol_val_check_snap_src(self, mck_cleanup, mck_rep):
volume = self.data.test_volume
array = self.data.array
device_id = self.data.device_id
new_size = self.data.test_volume.size + 1
extra_specs = deepcopy(self.data.extra_specs)
self.common.next_gen = False
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._extend_vol_validation_checks,
array, device_id, volume.name, extra_specs, volume.size, new_size)
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
return_value=(None, False, None))
@mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx')
def test_extend_vol_val_check_wrong_size(self, mck_cleanup, mck_rep):
volume = self.data.test_volume
array = self.data.array
device_id = self.data.device_id
new_size = volume.size - 1
extra_specs = self.data.extra_specs
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._extend_vol_validation_checks,
array, device_id, volume.name, extra_specs, volume.size, new_size)
def test_array_ode_capabilities_check_non_next_gen_local(self):
"""Rep enabled, neither array next gen, returns F,F,F,F"""
array = self.data.powermax_model_details['symmetrixId']
self.common.next_gen = False
(r1_ode, r1_ode_metro,
r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check(
array, self.data.rep_config_metro, True)
self.assertFalse(r1_ode)
self.assertFalse(r1_ode_metro)
self.assertFalse(r2_ode)
self.assertFalse(r2_ode_metro)
@mock.patch.object(rest.PowerMaxRest, 'get_array_detail',
return_value={'ucode': '5977.1.1'})
@mock.patch.object(common.PowerMaxCommon, 'get_rdf_details',
return_value=(10, tpd.PowerMaxData.remote_array))
def test_array_ode_capabilities_check_next_gen_non_rep_pre_elm(
self, mock_rdf, mock_det):
"""Rep disabled, local array next gen, pre elm, returns T,F,F,F"""
array = self.data.powermax_model_details['symmetrixId']
self.common.ucode_level = '5978.1.1'
self.common.next_gen = True
(r1_ode, r1_ode_metro,
r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check(
array, self.data.rep_config_metro, False)
self.assertTrue(r1_ode)
self.assertFalse(r1_ode_metro)
self.assertFalse(r2_ode)
self.assertFalse(r2_ode_metro)
@mock.patch.object(rest.PowerMaxRest, 'get_array_detail',
return_value={'ucode': '5977.1.1'})
@mock.patch.object(common.PowerMaxCommon, 'get_rdf_details',
return_value=(10, tpd.PowerMaxData.remote_array))
def test_array_ode_capabilities_check_next_gen_remote_rep(
self, mock_rdf, mock_det):
"""Rep enabled, remote not next gen, returns T,T,F,F"""
array = self.data.powermax_model_details['symmetrixId']
self.common.ucode_level = self.data.powermax_model_details['ucode']
self.common.next_gen = True
(r1_ode, r1_ode_metro,
r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check(
array, self.data.rep_config_metro, True)
self.assertTrue(r1_ode)
self.assertTrue(r1_ode_metro)
self.assertFalse(r2_ode)
self.assertFalse(r2_ode_metro)
@mock.patch.object(rest.PowerMaxRest, 'get_array_detail',
return_value={'ucode': '5978.1.1'})
@mock.patch.object(common.PowerMaxCommon, 'get_rdf_details',
return_value=(10, tpd.PowerMaxData.remote_array))
def test_array_ode_capabilities_check_next_gen_pre_elm_rep(
self, mock_rdf, mock_det):
"""Rep enabled, both array next gen, tgt<5978.221, returns T,T,T,F"""
array = self.data.powermax_model_details['symmetrixId']
self.common.ucode_level = self.data.powermax_model_details['ucode']
self.common.next_gen = True
(r1_ode, r1_ode_metro,
r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check(
array, self.data.rep_config_metro, True)
self.assertTrue(r1_ode)
self.assertTrue(r1_ode_metro)
self.assertTrue(r2_ode)
self.assertFalse(r2_ode_metro)
@mock.patch.object(rest.PowerMaxRest, 'get_array_detail',
return_value=tpd.PowerMaxData.ucode_5978_foxtail)
@mock.patch.object(common.PowerMaxCommon, 'get_rdf_details',
return_value=(10, tpd.PowerMaxData.remote_array))
def test_array_ode_capabilities_check_next_gen_post_elm_rep(
self, mock_rdf, mock_det):
"""Rep enabled, both array next gen, tgt>5978.221 returns T,T,T,T"""
array = self.data.powermax_model_details['symmetrixId']
self.common.ucode_level = self.data.powermax_model_details['ucode']
self.common.next_gen = True
(r1_ode, r1_ode_metro,
r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check(
array, self.data.rep_config_metro, True)
self.assertTrue(r1_ode)
self.assertTrue(r1_ode_metro)
self.assertTrue(r2_ode)
self.assertTrue(r2_ode_metro)
@mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication')
@mock.patch.object(common.PowerMaxCommon, '_protect_storage_group')
@mock.patch.object(
common.PowerMaxCommon, 'configure_volume_replication',
return_value=('first_vol_in_rdf_group', None, None,
tpd.PowerMaxData.rep_extra_specs_mgmt, True))
@mock.patch.object(provision.PowerMaxProvision, 'extend_volume')
@mock.patch.object(common.PowerMaxCommon, 'break_rdf_device_pair_session')
def test_extend_legacy_replicated_vol(
self, mck_break, mck_extend, mck_configure, mck_protect, mck_res):
volume = self.data.test_volume_group_member
array = self.data.array
device_id = self.data.device_id
new_size = volume.size + 1
extra_specs = self.data.extra_specs
rdf_group_no = self.data.rdf_group_no_1
self.common._extend_legacy_replicated_vol(
array, volume, device_id, volume.name, new_size, extra_specs,
rdf_group_no)
mck_protect.assert_called_once()
mck_res.assert_called_once()
@mock.patch.object(
common.PowerMaxCommon, 'break_rdf_device_pair_session',
side_effect=exception.VolumeBackendAPIException)
def test_extend_legacy_replicated_vol_fail(self, mck_resume):
volume = self.data.test_volume_group_member
array = self.data.array
device_id = self.data.device_id
new_size = volume.size + 1
extra_specs = self.data.extra_specs
rdf_group_no = self.data.rdf_group_no_1
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._extend_legacy_replicated_vol,
array, device_id, volume.name, extra_specs, volume.size, new_size,
rdf_group_no)
def test_get_unisphere_port(self):
# Test user set port ID
configuration = tpfo.FakeConfiguration(
None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc', san_api_port=1234,
powermax_port_groups=[self.data.port_group_name_i])
self.common.configuration = configuration
port = self.common._get_unisphere_port()
self.assertEqual(1234, port)
# Test no set port ID, use default port
configuration = tpfo.FakeConfiguration(
None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc',
powermax_port_groups=[self.data.port_group_name_i])
self.common.configuration = configuration
ref_port = utils.DEFAULT_PORT
port = self.common._get_unisphere_port()
self.assertEqual(ref_port, port)
@mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions',
return_value=(None, tpd.PowerMaxData.snap_tgt_session))
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
return_value=(True, False, False))
def test_get_target_source_device(self, mck_rep, mck_find):
array = self.data.array
tgt_device = self.data.device_id2
src_device = self.common._get_target_source_device(array, tgt_device)
self.assertEqual(src_device, self.data.device_id)
@mock.patch.object(rest.PowerMaxRest, '_get_private_volume',
return_value=tpd.PowerMaxData.priv_vol_response_rep)
@mock.patch.object(rest.PowerMaxRest, 'get_array_model_info',
return_value=(tpd.PowerMaxData.array_model, None))
@mock.patch.object(rest.PowerMaxRest, 'get_rdf_group',
return_value=(tpd.PowerMaxData.rdf_group_details))
def test_get_volume_metadata_rep(self, mck_rdf, mck_model, mck_priv):
ref_metadata = {
'DeviceID': self.data.device_id,
'DeviceLabel': self.data.device_label, 'ArrayID': self.data.array,
'ArrayModel': self.data.array_model, 'ServiceLevel': 'None',
'Workload': 'None', 'Emulation': 'FBA', 'Configuration': 'TDEV',
'CompressionDisabled': 'True', 'ReplicationEnabled': 'True',
'R2-DeviceID': self.data.device_id2,
'R2-ArrayID': self.data.remote_array,
'R2-ArrayModel': self.data.array_model,
'ReplicationMode': 'Synchronized',
'RDFG-Label': self.data.rdf_group_name_1,
'R1-RDFG': 1, 'R2-RDFG': 1}
array = self.data.array
device_id = self.data.device_id
act_metadata = self.common.get_volume_metadata(array, device_id)
self.assertEqual(ref_metadata, act_metadata)
@mock.patch.object(rest.PowerMaxRest, '_get_private_volume',
return_value=tpd.PowerMaxData.
priv_vol_response_metro_active_rep)
@mock.patch.object(rest.PowerMaxRest, 'get_array_model_info',
return_value=(tpd.PowerMaxData.array_model, None))
@mock.patch.object(rest.PowerMaxRest, 'get_rdf_group',
return_value=(tpd.PowerMaxData.rdf_group_details))
def test_get_volume_metadata_metro_active_rep(self, mck_rdf,
mck_model, mck_priv):
ref_metadata = {
'DeviceID': self.data.device_id,
'DeviceLabel': self.data.device_label, 'ArrayID': self.data.array,
'ArrayModel': self.data.array_model, 'ServiceLevel': 'None',
'Workload': 'None', 'Emulation': 'FBA', 'Configuration': 'TDEV',
'CompressionDisabled': 'True', 'ReplicationEnabled': 'True',
'R2-DeviceID': self.data.device_id2,
'R2-ArrayID': self.data.remote_array,
'R2-ArrayModel': self.data.array_model,
'ReplicationMode': 'Metro',
'RDFG-Label': self.data.rdf_group_name_1,
'R1-RDFG': 1, 'R2-RDFG': 1}
array = self.data.array
device_id = self.data.device_id
act_metadata = self.common.get_volume_metadata(array, device_id)
self.assertEqual(ref_metadata, act_metadata)
@mock.patch.object(rest.PowerMaxRest, '_get_private_volume',
return_value=tpd.PowerMaxData.priv_vol_response_no_rep)
@mock.patch.object(rest.PowerMaxRest, 'get_array_model_info',
return_value=(tpd.PowerMaxData.array_model, None))
def test_get_volume_metadata_no_rep(self, mck_model, mck_priv):
ref_metadata = {
'DeviceID': self.data.device_id,
'DeviceLabel': self.data.device_label, 'ArrayID': self.data.array,
'ArrayModel': self.data.array_model, 'ServiceLevel': 'None',
'Workload': 'None', 'Emulation': 'FBA', 'Configuration': 'TDEV',
'CompressionDisabled': 'True', 'ReplicationEnabled': 'False'}
array = self.data.array
device_id = self.data.device_id
act_metadata = self.common.get_volume_metadata(array, device_id)
self.assertEqual(ref_metadata, act_metadata)
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snap_info',
return_value=tpd.PowerMaxData.priv_snap_response)
def test_get_snapshot_metadata(self, mck_snap):
array = self.data.array
device_id = self.data.device_id
device_label = self.data.managed_snap_id
snap_name = self.data.test_snapshot_snap_name
ref_metadata = {'SnapshotLabel': snap_name,
'SourceDeviceID': device_id,
'SourceDeviceLabel': device_label,
'SnapIdList': six.text_type(self.data.snap_id),
'is_snap_id': True}
act_metadata = self.common.get_snapshot_metadata(
array, device_id, snap_name)
self.assertEqual(ref_metadata, act_metadata)
@mock.patch.object(
rest.PowerMaxRest, 'get_volume_snap_info',
return_value=(tpd.PowerMaxData.priv_snap_response_no_label))
def test_get_snapshot_metadata_no_label(self, mck_snap):
array = self.data.array
device_id = self.data.device_id
snap_name = self.data.test_snapshot_snap_name
ref_metadata = {'SnapshotLabel': snap_name,
'SourceDeviceID': device_id,
'SnapIdList': six.text_type(self.data.snap_id),
'is_snap_id': True}
act_metadata = self.common.get_snapshot_metadata(
array, device_id, snap_name)
self.assertEqual(ref_metadata, act_metadata)
def test_update_metadata(self):
model_update = {'provider_location': six.text_type(
self.data.provider_location)}
ref_model_update = (
{'provider_location': six.text_type(self.data.provider_location),
'metadata': {'device-meta-key-1': 'device-meta-value-1',
'device-meta-key-2': 'device-meta-value-2',
'user-meta-key-1': 'user-meta-value-1',
'user-meta-key-2': 'user-meta-value-2'}})
existing_metadata = {'user-meta-key-1': 'user-meta-value-1',
'user-meta-key-2': 'user-meta-value-2'}
object_metadata = {'device-meta-key-1': 'device-meta-value-1',
'device-meta-key-2': 'device-meta-value-2'}
model_update = self.common.update_metadata(
model_update, existing_metadata, object_metadata)
self.assertEqual(ref_model_update, model_update)
def test_update_metadata_no_model(self):
model_update = None
ref_model_update = (
{'metadata': {'device-meta-key-1': 'device-meta-value-1',
'device-meta-key-2': 'device-meta-value-2',
'user-meta-key-1': 'user-meta-value-1',
'user-meta-key-2': 'user-meta-value-2'}})
existing_metadata = {'user-meta-key-1': 'user-meta-value-1',
'user-meta-key-2': 'user-meta-value-2'}
object_metadata = {'device-meta-key-1': 'device-meta-value-1',
'device-meta-key-2': 'device-meta-value-2'}
model_update = self.common.update_metadata(
model_update, existing_metadata, object_metadata)
self.assertEqual(ref_model_update, model_update)
def test_update_metadata_no_existing_metadata(self):
model_update = {'provider_location': six.text_type(
self.data.provider_location)}
ref_model_update = (
{'provider_location': six.text_type(self.data.provider_location),
'metadata': {'device-meta-key-1': 'device-meta-value-1',
'device-meta-key-2': 'device-meta-value-2'}})
existing_metadata = None
object_metadata = {'device-meta-key-1': 'device-meta-value-1',
'device-meta-key-2': 'device-meta-value-2'}
model_update = self.common.update_metadata(
model_update, existing_metadata, object_metadata)
self.assertEqual(ref_model_update, model_update)
def test_update_metadata_model_list_exception(self):
model_update = [{'provider_location': six.text_type(
self.data.provider_location)}]
existing_metadata = None
object_metadata = {'device-meta-key-1': 'device-meta-value-1',
'device-meta-key-2': 'device-meta-value-2'}
self.assertRaises(
exception.VolumeBackendAPIException,
self.common.update_metadata, model_update, existing_metadata,
object_metadata)
def test_remove_stale_data(self):
ret_model_update = self.common.remove_stale_data(
self.data.replication_model)
self.assertEqual(self.data.non_replication_model, ret_model_update)
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group',
return_value=tpd.PowerMaxData.add_volume_sg_info_dict)
def test_get_tags_of_storage_group_none(self, mock_sg):
self.assertIsNone(self.common.get_tags_of_storage_group(
self.data.array, self.data.defaultstoragegroup_name))
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group',
return_value=tpd.PowerMaxData.storage_group_with_tags)
def test_get_tags_of_storage_group_exists(self, mock_sg):
tag_list = self.common.get_tags_of_storage_group(
self.data.array, self.data.defaultstoragegroup_name)
self.assertEqual(tpd.PowerMaxData.sg_tags, tag_list)
@mock.patch.object(rest.PowerMaxRest, 'get_storage_group',
side_effect=exception.APIException)
def test_get_tags_of_storage_group_exception(self, mock_sg):
self.assertIsNone(self.common.get_tags_of_storage_group(
self.data.array, self.data.storagegroup_name_f))
@mock.patch.object(rest.PowerMaxRest, 'add_storage_array_tags')
@mock.patch.object(rest.PowerMaxRest, 'get_array_tags',
return_value=[])
def test_check_and_add_tags_to_storage_array(
self, mock_get_tags, mock_add_tags):
array_tag_list = ['OpenStack']
self.common._check_and_add_tags_to_storage_array(
self.data.array, array_tag_list, self.data.extra_specs)
mock_add_tags.assert_called_with(
self.data.array, array_tag_list, self.data.extra_specs)
@mock.patch.object(rest.PowerMaxRest, 'add_storage_array_tags')
@mock.patch.object(rest.PowerMaxRest, 'get_array_tags',
return_value=[])
def test_check_and_add_tags_to_storage_array_add_2_tags(
self, mock_get_tags, mock_add_tags):
array_tag_list = ['OpenStack', 'Production']
self.common._check_and_add_tags_to_storage_array(
self.data.array, array_tag_list, self.data.extra_specs)
mock_add_tags.assert_called_with(
self.data.array, array_tag_list, self.data.extra_specs)
@mock.patch.object(rest.PowerMaxRest, 'add_storage_array_tags')
@mock.patch.object(rest.PowerMaxRest, 'get_array_tags',
return_value=['Production'])
def test_check_and_add_tags_to_storage_array_add_1_tags(
self, mock_get_tags, mock_add_tags):
array_tag_list = ['OpenStack', 'Production']
add_tag_list = ['OpenStack']
self.common._check_and_add_tags_to_storage_array(
self.data.array, array_tag_list, self.data.extra_specs)
mock_add_tags.assert_called_with(
self.data.array, add_tag_list, self.data.extra_specs)
@mock.patch.object(rest.PowerMaxRest, 'add_storage_array_tags')
@mock.patch.object(rest.PowerMaxRest, 'get_array_tags',
return_value=['openstack'])
def test_check_and_add_tags_to_storage_array_already_tagged(
self, mock_get_tags, mock_add_tags):
array_tag_list = ['OpenStack']
self.common._check_and_add_tags_to_storage_array(
self.data.array, array_tag_list, self.data.extra_specs)
mock_add_tags.assert_not_called()
@mock.patch.object(rest.PowerMaxRest, 'get_array_tags',
return_value=[])
def test_check_and_add_tags_to_storage_array_invalid_tag(
self, mock_get_tags):
array_tag_list = ['Open$tack']
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._check_and_add_tags_to_storage_array,
self.data.array, array_tag_list, self.data.extra_specs)
def test_validate_storage_group_tag_list_good_tag_list(self):
self.common._validate_storage_group_tag_list(
self.data.vol_type_extra_specs_tags)
@mock.patch.object(utils.PowerMaxUtils, 'verify_tag_list')
def test_validate_storage_group_tag_list_no_tag_list(
self, mock_verify):
self.common._validate_storage_group_tag_list(
self.data.extra_specs)
mock_verify.assert_not_called()
def test_set_config_file_and_get_extra_specs(self):
self.common.rep_config = {
'mode': utils.REP_METRO, utils.METROBIAS: True}
with mock.patch.object(self.utils, 'get_volumetype_extra_specs',
return_value=self.data.rep_extra_specs_metro):
extra_specs, __ = self.common._set_config_file_and_get_extra_specs(
self.data.test_volume, None)
self.assertEqual(self.data.rep_extra_specs_metro, extra_specs)
@mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name')
def test_retype_volume_promotion_get_extra_specs_mgmt_group(self, mck_get):
array = self.data.array
srp = self.data.srp
device_id = self.data.device_id
volume = self.data.test_volume
volume_name = self.data.volume_id
extra_specs = deepcopy(self.data.rep_extra_specs)
target_slo = self.data.slo_silver
target_workload = self.data.workload
target_extra_specs = deepcopy(self.data.extra_specs)
target_extra_specs[utils.DISABLECOMPRESSION] = False
extra_specs[utils.REP_CONFIG] = self.data.rep_config_async
self.common.promotion = True
self.common._retype_volume(
array, srp, device_id, volume, volume_name, extra_specs,
target_slo, target_workload, target_extra_specs)
self.common.promotion = False
mck_get.assert_called_once_with(extra_specs[utils.REP_CONFIG])
@mock.patch.object(rest.PowerMaxRest, 'is_volume_in_storagegroup',
return_value=True)
@mock.patch.object(masking.PowerMaxMasking,
'return_volume_to_volume_group')
@mock.patch.object(masking.PowerMaxMasking,
'move_volume_between_storage_groups')
@mock.patch.object(
masking.PowerMaxMasking, 'get_or_create_default_storage_group',
return_value=tpd.PowerMaxData.rdf_managed_async_grp)
@mock.patch.object(rest.PowerMaxRest, 'get_volume',
return_value=tpd.PowerMaxData.volume_details[0])
@mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name',
return_value=tpd.PowerMaxData.rdf_managed_async_grp)
def test_retype_volume_detached(
self, mck_get_rdf, mck_get_vol, mck_get_sg, mck_move_vol,
mck_return_vol, mck_is_vol):
array = self.data.array
srp = self.data.srp
device_id = self.data.device_id
volume = self.data.test_volume
volume_name = self.data.volume_id
extra_specs = deepcopy(self.data.rep_extra_specs)
target_slo = self.data.slo_silver
target_workload = self.data.workload
target_extra_specs = deepcopy(self.data.rep_extra_specs)
target_extra_specs[utils.DISABLECOMPRESSION] = False
group_name = self.data.rdf_managed_async_grp
extra_specs[utils.REP_CONFIG] = self.data.rep_config_async
target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async
success, target_sg_name = self.common._retype_volume(
array, srp, device_id, volume, volume_name, extra_specs,
target_slo, target_workload, target_extra_specs, remote=True)
mck_get_rdf.assert_called_once_with(self.data.rep_config_async)
mck_get_vol.assert_called_once_with(array, device_id)
mck_get_sg.assert_called_once_with(
array, srp, target_slo, target_workload, extra_specs,
False, True, target_extra_specs['rep_mode'])
mck_move_vol.assert_called_once_with(
array, device_id, self.data.volume_details[0]['storageGroupId'][0],
group_name, extra_specs, force=True, parent_sg=None)
mck_return_vol.assert_called_once_with(
array, volume, device_id, volume_name, extra_specs)
mck_is_vol.assert_called_once_with(array, device_id, group_name)
self.assertTrue(success)
self.assertEqual(group_name, target_sg_name)
@mock.patch.object(
utils.PowerMaxUtils, 'get_port_name_label',
return_value='my_pg')
@mock.patch.object(
utils.PowerMaxUtils, 'get_volume_attached_hostname',
return_value='HostX')
@mock.patch.object(
rest.PowerMaxRest, 'is_volume_in_storagegroup', return_value=True)
@mock.patch.object(
masking.PowerMaxMasking, 'return_volume_to_volume_group')
@mock.patch.object(
masking.PowerMaxMasking, 'move_volume_between_storage_groups')
@mock.patch.object(
masking.PowerMaxMasking, 'add_child_sg_to_parent_sg')
@mock.patch.object(
provision.PowerMaxProvision, 'create_storage_group')
@mock.patch.object(
rest.PowerMaxRest, 'get_storage_group',
side_effect=[None, tpd.PowerMaxData.volume_info_dict])
@mock.patch.object(
rest.PowerMaxRest, 'get_volume',
return_value=tpd.PowerMaxData.volume_details[0])
@mock.patch.object(
utils.PowerMaxUtils, 'get_rdf_management_group_name',
return_value=tpd.PowerMaxData.rdf_managed_async_grp)
def test_retype_volume_attached(
self, mck_get_rdf, mck_get_vol, mck_get_sg, mck_create, mck_add,
mck_move_vol, mck_return_vol, mck_is_vol, mck_host, mck_pg):
array = self.data.array
srp = self.data.srp
device_id = self.data.device_id
volume = self.data.test_attached_volume
volume_name = self.data.volume_id
extra_specs = self.data.rep_extra_specs_rep_config
target_slo = self.data.slo_silver
target_workload = self.data.workload
target_extra_specs = deepcopy(self.data.rep_extra_specs)
target_extra_specs[utils.DISABLECOMPRESSION] = False
target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync
success, target_sg_name = self.common._retype_volume(
array, srp, device_id, volume, volume_name, extra_specs,
target_slo, target_workload, target_extra_specs)
mck_get_rdf.assert_called_once()
mck_get_vol.assert_called_once()
mck_create.assert_called_once()
mck_add.assert_called_once()
mck_move_vol.assert_called_once()
mck_return_vol.assert_called_once()
mck_is_vol.assert_called_once()
self.assertEqual(2, mck_get_sg.call_count)
self.assertTrue(success)
@mock.patch.object(
utils.PowerMaxUtils, 'get_port_name_label',
return_value='my_pg')
@mock.patch.object(
utils.PowerMaxUtils, 'get_volume_attached_hostname',
return_value='HostX')
@mock.patch.object(
rest.PowerMaxRest, 'is_volume_in_storagegroup', return_value=True)
@mock.patch.object(
masking.PowerMaxMasking, 'return_volume_to_volume_group')
@mock.patch.object(
masking.PowerMaxMasking, 'move_volume_between_storage_groups')
@mock.patch.object(
masking.PowerMaxMasking, 'add_child_sg_to_parent_sg')
@mock.patch.object(
provision.PowerMaxProvision, 'create_storage_group')
@mock.patch.object(
rest.PowerMaxRest, 'get_storage_group',
side_effect=[None, tpd.PowerMaxData.volume_info_dict])
@mock.patch.object(
rest.PowerMaxRest, 'get_volume',
return_value=tpd.PowerMaxData.volume_details[0])
@mock.patch.object(
utils.PowerMaxUtils, 'get_rdf_management_group_name',
return_value=tpd.PowerMaxData.rdf_managed_async_grp)
def test_retype_volume_attached_metro(
self, mck_get_rdf, mck_get_vol, mck_get_sg, mck_create, mck_add,
mck_move_vol, mck_return_vol, mck_is_vol, mck_host, mck_pg):
array = self.data.array
srp = self.data.srp
device_id = self.data.device_id
volume = self.data.test_attached_volume
volume_name = self.data.volume_id
extra_specs = self.data.rep_extra_specs_rep_config_metro
target_slo = self.data.slo_silver
target_workload = self.data.workload
target_extra_specs = deepcopy(self.data.rep_extra_specs)
target_extra_specs[utils.DISABLECOMPRESSION] = False
target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync
success, target_sg_name = self.common._retype_volume(
array, srp, device_id, volume, volume_name, extra_specs,
target_slo, target_workload, target_extra_specs, remote=True,
metro_attach=True)
mck_get_rdf.assert_called_once()
mck_get_vol.assert_called_once()
mck_create.assert_called_once()
mck_add.assert_called_once()
mck_move_vol.assert_called_once()
mck_return_vol.assert_called_once()
mck_is_vol.assert_called_once()
self.assertEqual(2, mck_get_sg.call_count)
self.assertTrue(success)
@mock.patch.object(
utils.PowerMaxUtils, 'get_volume_attached_hostname', return_value=None)
@mock.patch.object(
rest.PowerMaxRest, 'get_volume',
return_value=tpd.PowerMaxData.volume_details[0])
@mock.patch.object(
utils.PowerMaxUtils, 'get_rdf_management_group_name',
return_value=tpd.PowerMaxData.rdf_managed_async_grp)
def test_retype_volume_attached_no_host_fail(
self, mck_get_rdf, mck_get_vol, mck_get_host):
array = self.data.array
srp = self.data.srp
device_id = self.data.device_id
volume = self.data.test_attached_volume
volume_name = self.data.volume_id
extra_specs = deepcopy(self.data.rep_extra_specs_rep_config)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
target_slo = self.data.slo_silver
target_workload = self.data.workload
target_extra_specs = deepcopy(self.data.rep_extra_specs)
target_extra_specs[utils.DISABLECOMPRESSION] = False
target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async
success, target_sg_name = self.common._retype_volume(
array, srp, device_id, volume, volume_name, extra_specs,
target_slo, target_workload, target_extra_specs)
mck_get_rdf.assert_called_once()
mck_get_vol.assert_called_once()
self.assertFalse(success)
self.assertIsNone(target_sg_name)
@mock.patch.object(rest.PowerMaxRest, 'is_volume_in_storagegroup',
return_value=False)
@mock.patch.object(masking.PowerMaxMasking,
'return_volume_to_volume_group')
@mock.patch.object(masking.PowerMaxMasking,
'move_volume_between_storage_groups')
@mock.patch.object(
masking.PowerMaxMasking, 'get_or_create_default_storage_group',
return_value=tpd.PowerMaxData.rdf_managed_async_grp)
@mock.patch.object(rest.PowerMaxRest, 'get_volume',
return_value=tpd.PowerMaxData.volume_details[0])
@mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name',
return_value=tpd.PowerMaxData.rdf_managed_async_grp)
def test_retype_volume_detached_vol_not_in_sg_fail(
self, mck_get_rdf, mck_get_vol, mck_get_sg, mck_move_vol,
mck_return_vol, mck_is_vol):
array = self.data.array
srp = self.data.srp
device_id = self.data.device_id
volume = self.data.test_volume
volume_name = self.data.volume_id
extra_specs = deepcopy(self.data.rep_extra_specs)
target_slo = self.data.slo_silver
target_workload = self.data.workload
target_extra_specs = deepcopy(self.data.rep_extra_specs)
target_extra_specs[utils.DISABLECOMPRESSION] = False
extra_specs[utils.REP_CONFIG] = self.data.rep_config_async
target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async
success, target_sg_name = self.common._retype_volume(
array, srp, device_id, volume, volume_name, extra_specs,
target_slo, target_workload, target_extra_specs, remote=True)
self.assertFalse(success)
self.assertIsNone(target_sg_name)
@mock.patch.object(
rest.PowerMaxRest, 'rename_volume')
@mock.patch.object(
rest.PowerMaxRest, 'get_rdf_pair_volume',
return_value=tpd.PowerMaxData.rdf_group_vol_details)
def test_get_and_set_remote_device_uuid(self, mck_get_pair, mck_rename):
extra_specs = self.data.rep_extra_specs
rep_extra_specs = self.data.rep_extra_specs_mgmt
volume_dict = {'device_id': self.data.device_id,
'device_uuid': self.data.volume_id}
remote_vol = self.common.get_and_set_remote_device_uuid(
extra_specs, rep_extra_specs, volume_dict)
self.assertEqual(remote_vol, self.data.device_id2)
@mock.patch.object(utils.PowerMaxUtils, 'get_volume_group_utils',
return_value=(None, {'interval': 1, 'retries': 1}))
def test_get_volume_group_info(self, mock_group_utils):
self.common.interval = 1
self.common.retries = 1
with mock.patch.object(
tpfo.FakeConfiguration, 'safe_get') as mock_array:
self.common._get_volume_group_info(
self.data.test_group_1)
mock_group_utils.assert_called_once_with(
self.data.test_group_1, self.common.interval,
self.common.retries)
mock_array.assert_called_once()
def test_get_performance_config(self):
ref_cinder_conf = tpfo.FakeConfiguration(
None, 'ProvisionTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc', san_api_port=8443,
powermax_port_groups=[self.data.port_group_name_f],
load_balance=True, load_balance_real_time=True,
load_data_format='avg', load_look_back=60,
load_look_back_real_time=10, port_group_load_metric='PercentBusy',
port_load_metric='PercentBusy')
ref_perf_conf = self.data.performance_config
volume_utils.get_max_over_subscription_ratio = mock.Mock()
rest.PowerMaxRest._establish_rest_session = mock.Mock(
return_value=tpfo.FakeRequestsSession())
driver = fc.PowerMaxFCDriver(configuration=ref_cinder_conf)
self.assertEqual(ref_perf_conf, driver.common.performance.config)
def test_select_port_group_for_extra_specs_volume_type(self):
"""Test _select_port_group_for_extra_specs PG in volume-type."""
extra_specs = {utils.PORTGROUPNAME: self.data.port_group_name_i}
pool_record = {}
port_group = self.common._select_port_group_for_extra_specs(
extra_specs, pool_record)
self.assertEqual(self.data.port_group_name_i, port_group)
def test_select_port_group_for_extra_specs_cinder_conf_single(self):
"""Test _select_port_group_for_extra_specs single PG in cinder conf."""
extra_specs = {}
pool_record = {utils.PORT_GROUP: [self.data.port_group_name_i]}
port_group = self.common._select_port_group_for_extra_specs(
extra_specs, pool_record)
self.assertEqual(self.data.port_group_name_i, port_group)
def test_select_port_group_for_extra_specs_cinder_conf_multi(self):
"""Test _select_port_group_for_extra_specs multi PG in cinder conf.
Random selection is used, no performance configuration supplied.
"""
extra_specs = {}
pool_record = {utils.PORT_GROUP: self.data.perf_port_groups}
port_group = self.common._select_port_group_for_extra_specs(
extra_specs, pool_record)
self.assertIn(port_group, self.data.perf_port_groups)
def test_select_port_group_for_extra_specs_load_balanced(self):
"""Test _select_port_group_for_extra_specs multi PG in cinder conf.
Load balanced selection is used, performance configuration supplied.
"""
extra_specs = {utils.ARRAY: self.data.array}
pool_record = {utils.PORT_GROUP: self.data.perf_port_groups}
self.common.performance.config = self.data.performance_config
with mock.patch.object(
self.common.performance, 'process_port_group_load',
side_effect=(
self.common.performance.process_port_group_load)) as (
mck_process):
port_group = self.common._select_port_group_for_extra_specs(
extra_specs, pool_record, init_conn=True)
mck_process.assert_called_once_with(
self.data.array, self.data.perf_port_groups)
self.assertIn(port_group, self.data.perf_port_groups)
def test_select_port_group_for_extra_specs_exception(self):
"""Test _select_port_group_for_extra_specs exception."""
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._select_port_group_for_extra_specs, {}, {})
@mock.patch.object(
common.PowerMaxCommon, '_add_new_volume_to_volume_group',
return_value='my_group')
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_add_to_group(self, mock_cond, mock_group):
source_volume = self.data.test_volume
extra_specs = self.data.extra_specs
rep_driver_data = dict()
group_name = self.common._add_to_group(
source_volume, self.data, source_volume.name,
self.data.test_group_1.fields.get('id'), self.data.test_group_1,
extra_specs, rep_driver_data)
self.assertEqual('my_group', group_name)
mock_group.assert_called_once()
@mock.patch.object(
common.PowerMaxCommon, '_add_new_volume_to_volume_group',
return_value='my_group')
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_add_to_group_no_group_obj(self, mock_cond, mock_group):
source_volume = self.data.test_volume
extra_specs = self.data.extra_specs
rep_driver_data = dict()
group_name = self.common._add_to_group(
source_volume, self.data, source_volume.name,
self.data.test_group_1.fields.get('id'), None, extra_specs,
rep_driver_data)
self.assertIsNone(group_name)
mock_group.assert_not_called()
@mock.patch.object(
common.PowerMaxCommon, '_unlink_and_delete_temporary_snapshots')
@mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions',
return_value=(None, 'tgt_session'))
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
return_value=(True, False, False))
def test_cleanup_device_snapvx(self, mck_is_rep, mck_find, mck_unlink):
array = self.data.array
device_id = self.data.device_id
extra_specs = self.data.extra_specs
self.common._cleanup_device_snapvx(array, device_id, extra_specs)
mck_unlink.assert_called_once_with('tgt_session', array, extra_specs)
@mock.patch.object(
common.PowerMaxCommon, '_unlink_and_delete_temporary_snapshots')
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
return_value=(False, False, False))
def test_cleanup_device_snapvx_no_sessions(self, mck_is_rep, mck_unlink):
array = self.data.array
device_id = self.data.device_id
extra_specs = self.data.extra_specs
self.common._cleanup_device_snapvx(array, device_id, extra_specs)
mck_unlink.assert_not_called()
@mock.patch.object(common.PowerMaxCommon, '_delete_temp_snapshot')
@mock.patch.object(common.PowerMaxCommon, '_unlink_snapshot',
return_value=True)
def test_unlink_and_delete_temporary_snapshots_session_unlinked(
self, mck_unlink, mck_delete):
session = self.data.snap_tgt_session
array = self.data.array
extra_specs = self.data.extra_specs
self.common._unlink_and_delete_temporary_snapshots(
session, array, extra_specs)
mck_unlink.assert_called_once_with(session, array, extra_specs)
mck_delete.assert_called_once_with(session, array)
@mock.patch.object(common.PowerMaxCommon, '_delete_temp_snapshot')
@mock.patch.object(common.PowerMaxCommon, '_unlink_snapshot',
return_value=False)
def test_unlink_and_delete_temporary_snapshots_session_not_unlinked(
self, mck_unlink, mck_delete):
session = self.data.snap_tgt_session
array = self.data.array
extra_specs = self.data.extra_specs
self.common._unlink_and_delete_temporary_snapshots(
session, array, extra_specs)
mck_unlink.assert_called_once_with(session, array, extra_specs)
mck_delete.assert_not_called()
@mock.patch.object(provision.PowerMaxProvision, 'unlink_snapvx_tgt_volume')
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snap',
side_effect=[tpd.PowerMaxData.priv_snap_response.get(
'snapshotSrcs')[0], None])
def test_unlink_temp_snapshot(self, mck_get, mck_unlink):
array = self.data.array
extra_specs = self.data.extra_specs
session = self.data.snap_tgt_session
source = session.get('source_vol_id')
target = session.get('target_vol_id')
snap_name = session.get('snap_name')
snap_id = session.get('snapid')
loop = False
is_unlinked = self.common._unlink_snapshot(session, array, extra_specs)
mck_unlink.assert_called_once_with(
array, target, source, snap_name, extra_specs, snap_id, loop)
self.assertTrue(is_unlinked)
@mock.patch.object(provision.PowerMaxProvision, 'unlink_snapvx_tgt_volume')
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snap',
return_value=tpd.PowerMaxData.priv_snap_response.get(
'snapshotSrcs')[0])
def test_unlink_temp_snapshot_not_unlinked(self, mck_get, mck_unlink):
array = self.data.array
extra_specs = self.data.extra_specs
session = self.data.snap_tgt_session
source = session.get('source_vol_id')
target = session.get('target_vol_id')
snap_name = session.get('snap_name')
snap_id = session.get('snapid')
loop = False
is_unlinked = self.common._unlink_snapshot(session, array, extra_specs)
mck_unlink.assert_called_once_with(
array, target, source, snap_name, extra_specs, snap_id, loop)
self.assertFalse(is_unlinked)
@mock.patch.object(provision.PowerMaxProvision, 'delete_temp_volume_snap')
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snap',
return_value=dict())
def test_delete_temp_snapshot(self, mck_get, mck_delete):
session = self.data.snap_tgt_session
array = self.data.array
snap_name = session.get('snap_name')
source = session.get('source_vol_id')
snap_id = session.get('snapid')
self.common._delete_temp_snapshot(session, array)
mck_delete.assert_called_once_with(array, snap_name, source, snap_id)
@mock.patch.object(provision.PowerMaxProvision, 'delete_temp_volume_snap')
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snap',
return_value={'linkedDevices': 'details'})
def test_delete_temp_snapshot_is_linked(self, mck_get, mck_delete):
session = self.data.snap_tgt_session
array = self.data.array
self.common._delete_temp_snapshot(session, array)
mck_delete.assert_not_called()
def test_get_replication_flags(self):
rf = self.common._get_replication_flags(
self.data.extra_specs, self.data.rep_extra_specs)
self.assertFalse(rf.was_rep_enabled)
self.assertTrue(rf.is_rep_enabled)
self.assertFalse(rf.backend_ids_differ)
self.assertEqual('Synchronous', rf.rep_mode)
self.assertEqual('Diamond', rf.target_extra_specs.get('slo'))
@mock.patch.object(
common.PowerMaxCommon, 'configure_volume_replication',
return_value=('first_vol_in_rdf_group', True,
tpd.PowerMaxData.rep_info_dict,
tpd.PowerMaxData.rep_extra_specs_mgmt, False))
def test_prep_non_rep_to_rep(self, mck_vol_rep):
volume = fake_volume.fake_volume_obj(
context='cxt', provider_location=None)
nrr = self.common._prep_non_rep_to_rep(
self.data.array, self.data.device_id, volume, False,
True, False, self.data.rep_extra_specs_rep_config)
self.assertIsInstance(nrr.model_update, dict)
self.assertFalse(nrr.rdf_pair_created)
self.assertIsInstance(nrr.rep_extra_specs, dict)
self.assertIsInstance(nrr.rep_info_dict, dict)
self.assertFalse(nrr.resume_target_sg)
self.assertEqual('first_vol_in_rdf_group', nrr.rep_status)
@mock.patch.object(
common.PowerMaxCommon, 'break_rdf_device_pair_session',
return_value=(tpd.PowerMaxData.rep_extra_specs_mgmt, True))
def test_prep_rep_to_non_rep(self, mock_break):
volume = fake_volume.fake_volume_obj(
context='cxt', provider_location=None)
rnr = self.common._prep_rep_to_non_rep(
self.data.array, self.data.device_id, 'my_vol', volume, True,
False, False, self.data.extra_specs)
self.assertIsInstance(rnr.model_update, dict)
self.assertIsInstance(rnr.resume_original_sg_dict, dict)
self.assertTrue(rnr.rdf_pair_broken)
self.assertTrue(rnr.resume_original_sg)
self.assertFalse(rnr.is_partitioned)
|
openstack/cinder
|
cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py
|
Python
|
apache-2.0
| 235,073
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mfcc_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.signal import mfcc_ops
from tensorflow.python.platform import test
# TODO(rjryan): We have no open source tests for MFCCs at the moment. Internally
# at Google, this code is tested against a reference implementation that follows
# HTK conventions.
class MFCCTest(test.TestCase):
def test_error(self):
# num_mel_bins must be positive.
with self.assertRaises(ValueError):
signal = array_ops.zeros((2, 3, 0))
mfcc_ops.mfccs_from_log_mel_spectrograms(signal)
# signal must be float32
with self.assertRaises(ValueError):
signal = array_ops.zeros((2, 3, 5), dtype=dtypes.float64)
mfcc_ops.mfccs_from_log_mel_spectrograms(signal)
def test_basic(self):
"""A basic test that the op runs on random input."""
with spectral_ops_test_util.fft_kernel_label_map():
with self.session(use_gpu=True):
signal = random_ops.random_normal((2, 3, 5))
mfcc_ops.mfccs_from_log_mel_spectrograms(signal).eval()
def test_unknown_shape(self):
"""A test that the op runs when shape and rank are unknown."""
with spectral_ops_test_util.fft_kernel_label_map():
with self.session(use_gpu=True):
signal = array_ops.placeholder_with_default(
random_ops.random_normal((2, 3, 5)), tensor_shape.TensorShape(None))
self.assertIsNone(signal.shape.ndims)
mfcc_ops.mfccs_from_log_mel_spectrograms(signal).eval()
if __name__ == "__main__":
test.main()
|
hehongliang/tensorflow
|
tensorflow/python/kernel_tests/signal/mfcc_ops_test.py
|
Python
|
apache-2.0
| 2,543
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pickle
import pytest
import pyarrow as pa
import pyarrow.types as types
MANY_TYPES = [
pa.null(),
pa.bool_(),
pa.int32(),
pa.time32('s'),
pa.time64('us'),
pa.date32(),
pa.timestamp('us'),
pa.timestamp('us', tz='UTC'),
pa.timestamp('us', tz='Europe/Paris'),
pa.float16(),
pa.float32(),
pa.float64(),
pa.decimal128(19, 4),
pa.string(),
pa.binary(),
pa.binary(10),
pa.list_(pa.int32()),
pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())]),
pa.union([pa.field('a', pa.binary(10)),
pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE),
pa.union([pa.field('a', pa.binary(10)),
pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE),
# XXX Needs array pickling
# pa.dictionary(pa.int32(), pa.array(['a', 'b', 'c'])),
]
def test_is_boolean():
assert types.is_boolean(pa.bool_())
assert not types.is_boolean(pa.int8())
def test_is_integer():
signed_ints = [pa.int8(), pa.int16(), pa.int32(), pa.int64()]
unsigned_ints = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()]
for t in signed_ints + unsigned_ints:
assert types.is_integer(t)
for t in signed_ints:
assert types.is_signed_integer(t)
assert not types.is_unsigned_integer(t)
for t in unsigned_ints:
assert types.is_unsigned_integer(t)
assert not types.is_signed_integer(t)
assert not types.is_integer(pa.float32())
assert not types.is_signed_integer(pa.float32())
def test_is_floating():
for t in [pa.float16(), pa.float32(), pa.float64()]:
assert types.is_floating(t)
assert not types.is_floating(pa.int32())
def test_is_null():
assert types.is_null(pa.null())
assert not types.is_null(pa.list_(pa.int32()))
def test_is_decimal():
assert types.is_decimal(pa.decimal128(19, 4))
assert not types.is_decimal(pa.int32())
def test_is_list():
assert types.is_list(pa.list_(pa.int32()))
assert not types.is_list(pa.int32())
def test_is_dictionary():
assert types.is_dictionary(
pa.dictionary(pa.int32(),
pa.array(['a', 'b', 'c'])))
assert not types.is_dictionary(pa.int32())
def test_is_nested_or_struct():
struct_ex = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())])
assert types.is_struct(struct_ex)
assert not types.is_struct(pa.list_(pa.int32()))
assert types.is_nested(struct_ex)
assert types.is_nested(pa.list_(pa.int32()))
assert not types.is_nested(pa.int32())
def test_is_union():
for mode in [pa.lib.UnionMode_SPARSE, pa.lib.UnionMode_DENSE]:
assert types.is_union(pa.union([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())],
mode=mode))
assert not types.is_union(pa.list_(pa.int32()))
# TODO(wesm): is_map, once implemented
def test_is_binary_string():
assert types.is_binary(pa.binary())
assert not types.is_binary(pa.string())
assert types.is_string(pa.string())
assert types.is_unicode(pa.string())
assert not types.is_string(pa.binary())
assert types.is_fixed_size_binary(pa.binary(5))
assert not types.is_fixed_size_binary(pa.binary())
def test_is_temporal_date_time_timestamp():
date_types = [pa.date32(), pa.date64()]
time_types = [pa.time32('s'), pa.time64('ns')]
timestamp_types = [pa.timestamp('ms')]
for case in date_types + time_types + timestamp_types:
assert types.is_temporal(case)
for case in date_types:
assert types.is_date(case)
assert not types.is_time(case)
assert not types.is_timestamp(case)
for case in time_types:
assert types.is_time(case)
assert not types.is_date(case)
assert not types.is_timestamp(case)
for case in timestamp_types:
assert types.is_timestamp(case)
assert not types.is_date(case)
assert not types.is_time(case)
assert not types.is_temporal(pa.int32())
def test_timestamp_type():
# See ARROW-1683
assert isinstance(pa.timestamp('ns'), pa.TimestampType)
def test_union_type():
def check_fields(ty, fields):
assert ty.num_children == len(fields)
assert [ty[i] for i in range(ty.num_children)] == fields
fields = [pa.field('x', pa.list_(pa.int32())),
pa.field('y', pa.binary())]
for mode in ('sparse', pa.lib.UnionMode_SPARSE):
ty = pa.union(fields, mode=mode)
assert ty.mode == 'sparse'
check_fields(ty, fields)
for mode in ('dense', pa.lib.UnionMode_DENSE):
ty = pa.union(fields, mode=mode)
assert ty.mode == 'dense'
check_fields(ty, fields)
for mode in ('unknown', 2):
with pytest.raises(ValueError, match='Invalid union mode'):
pa.union(fields, mode=mode)
def test_types_hashable():
in_dict = {}
for i, type_ in enumerate(MANY_TYPES):
assert hash(type_) == hash(type_)
in_dict[type_] = i
assert in_dict[type_] == i
assert len(in_dict) == len(MANY_TYPES)
def test_types_picklable():
for ty in MANY_TYPES:
data = pickle.dumps(ty)
assert pickle.loads(data) == ty
@pytest.mark.parametrize('t,check_func', [
(pa.date32(), types.is_date32),
(pa.date64(), types.is_date64),
(pa.time32('s'), types.is_time32),
(pa.time64('ns'), types.is_time64),
(pa.int8(), types.is_int8),
(pa.int16(), types.is_int16),
(pa.int32(), types.is_int32),
(pa.int64(), types.is_int64),
(pa.uint8(), types.is_uint8),
(pa.uint16(), types.is_uint16),
(pa.uint32(), types.is_uint32),
(pa.uint64(), types.is_uint64),
(pa.float16(), types.is_float16),
(pa.float32(), types.is_float32),
(pa.float64(), types.is_float64)
])
def test_exact_primitive_types(t, check_func):
assert check_func(t)
def test_fixed_size_binary_byte_width():
ty = pa.binary(5)
assert ty.byte_width == 5
def test_decimal_byte_width():
ty = pa.decimal128(19, 4)
assert ty.byte_width == 16
|
yufeldman/arrow
|
python/pyarrow/tests/test_types.py
|
Python
|
apache-2.0
| 7,112
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import sys
import datetime
import unittest
from requests import Session
from azure.storage import (
AccessPolicy,
SharedAccessPolicy,
SignedIdentifier,
SignedIdentifiers,
)
from azure.storage.queue import (
QueueService,
QueueSharedAccessPermissions,
)
from azure.common import (
AzureHttpError,
AzureConflictHttpError,
AzureMissingResourceHttpError,
)
from tests.common_recordingtestcase import (
TestMode,
record,
)
from tests.storage_testcase import StorageTestCase
#------------------------------------------------------------------------------
TEST_QUEUE_PREFIX = 'mytestqueue'
#------------------------------------------------------------------------------
class StorageQueueTest(StorageTestCase):
def setUp(self):
super(StorageQueueTest, self).setUp()
self.qs = self._create_storage_service(QueueService, self.settings)
self.test_queues = []
self.creatable_queues = []
for i in range(10):
self.test_queues.append(self.get_resource_name(TEST_QUEUE_PREFIX + str(i)))
for i in range(4):
self.creatable_queues.append(
self.get_resource_name('mycreatablequeue' + str(i)))
if not self.is_playback():
for queue_name in self.test_queues:
self.qs.create_queue(queue_name)
def tearDown(self):
if not self.is_playback():
for queue_name in self.test_queues:
try:
self.qs.delete_queue(queue_name)
except:
pass
for queue_name in self.creatable_queues:
try:
self.qs.delete_queue(queue_name)
except:
pass
return super(StorageQueueTest, self).tearDown()
def _get_shared_access_policy(self, permission):
date_format = "%Y-%m-%dT%H:%M:%SZ"
start = datetime.datetime.utcnow() - datetime.timedelta(minutes=1)
expiry = start + datetime.timedelta(hours=1)
return SharedAccessPolicy(
AccessPolicy(
start.strftime(date_format),
expiry.strftime(date_format),
permission
)
)
@record
def test_get_service_properties(self):
# This api doesn't apply to local storage
if self.qs.use_local_storage:
return
# Action
properties = self.qs.get_queue_service_properties()
# Asserts
self.assertIsNotNone(properties)
self.assertIsNotNone(properties.logging)
self.assertIsNotNone(properties.logging.retention_policy)
self.assertIsNotNone(properties.logging.version)
self.assertIsNotNone(properties.hour_metrics)
self.assertIsNotNone(properties.hour_metrics.retention_policy)
self.assertIsNotNone(properties.hour_metrics.version)
self.assertIsNotNone(properties.minute_metrics)
self.assertIsNotNone(properties.minute_metrics.retention_policy)
self.assertIsNotNone(properties.minute_metrics.version)
@record
def test_set_service_properties(self):
# This api doesn't apply to local storage
if self.qs.use_local_storage:
return
# Action
queue_properties = self.qs.get_queue_service_properties()
queue_properties.logging.read = True
self.qs.set_queue_service_properties(queue_properties)
properties = self.qs.get_queue_service_properties()
# Asserts
self.assertIsNotNone(properties)
self.assertIsNotNone(properties.logging)
self.assertIsNotNone(properties.logging.retention_policy)
self.assertIsNotNone(properties.logging.version)
self.assertIsNotNone(properties.hour_metrics)
self.assertIsNotNone(properties.hour_metrics.retention_policy)
self.assertIsNotNone(properties.hour_metrics.version)
self.assertIsNotNone(properties.minute_metrics)
self.assertIsNotNone(properties.minute_metrics.retention_policy)
self.assertIsNotNone(properties.minute_metrics.version)
self.assertTrue(properties.logging.read)
@record
def test_create_queue(self):
# Action
self.qs.create_queue(self.creatable_queues[0])
result = self.qs.get_queue_metadata(self.creatable_queues[0])
self.qs.delete_queue(self.creatable_queues[0])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(result['x-ms-approximate-messages-count'], '0')
@record
def test_create_queue_already_exist(self):
# Action
created1 = self.qs.create_queue(self.creatable_queues[0])
created2 = self.qs.create_queue(self.creatable_queues[0])
# Asserts
self.assertTrue(created1)
self.assertFalse(created2)
@record
def test_create_queue_fail_on_exist(self):
# Action
created = self.qs.create_queue(self.creatable_queues[0], None, True)
with self.assertRaises(AzureConflictHttpError):
self.qs.create_queue(self.creatable_queues[0], None, True)
# Asserts
self.assertTrue(created)
@record
def test_create_queue_with_options(self):
# Action
self.qs.create_queue(
self.creatable_queues[1],
x_ms_meta_name_values={'val1': 'test', 'val2': 'blah'})
result = self.qs.get_queue_metadata(self.creatable_queues[1])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(3, len(result))
self.assertEqual(result['x-ms-approximate-messages-count'], '0')
self.assertEqual('test', result['x-ms-meta-val1'])
self.assertEqual('blah', result['x-ms-meta-val2'])
@record
def test_delete_queue_not_exist(self):
# Action
deleted = self.qs.delete_queue(self.creatable_queues[0])
# Asserts
self.assertFalse(deleted)
@record
def test_delete_queue_fail_not_exist_not_exist(self):
# Action
with self.assertRaises(AzureMissingResourceHttpError):
self.qs.delete_queue(self.creatable_queues[0], True)
# Asserts
@record
def test_delete_queue_fail_not_exist_already_exist(self):
# Action
created = self.qs.create_queue(self.creatable_queues[0])
deleted = self.qs.delete_queue(self.creatable_queues[0], True)
# Asserts
self.assertTrue(created)
self.assertTrue(deleted)
@record
def test_list_queues(self):
# Action
queues = self.qs.list_queues()
for queue in queues:
pass
# Asserts
self.assertIsNotNone(queues)
self.assertEqual('', queues.marker)
self.assertEqual(0, queues.max_results)
self.assertTrue(len(self.test_queues) <= len(queues))
@record
def test_list_queues_with_options(self):
# Action
queues_1 = self.qs.list_queues(prefix=TEST_QUEUE_PREFIX, maxresults=3)
queues_2 = self.qs.list_queues(
prefix=TEST_QUEUE_PREFIX,
marker=queues_1.next_marker,
include='metadata')
# Asserts
self.assertIsNotNone(queues_1)
self.assertEqual(3, len(queues_1))
self.assertEqual(3, queues_1.max_results)
self.assertEqual('', queues_1.marker)
self.assertIsNotNone(queues_1[0])
self.assertIsNone(queues_1[0].metadata)
self.assertNotEqual('', queues_1[0].name)
# Asserts
self.assertIsNotNone(queues_2)
self.assertTrue(len(self.test_queues) - 3 <= len(queues_2))
self.assertEqual(0, queues_2.max_results)
self.assertEqual(queues_1.next_marker, queues_2.marker)
self.assertIsNotNone(queues_2[0])
self.assertIsNotNone(queues_2[0].metadata)
self.assertNotEqual('', queues_2[0].name)
@record
def test_set_queue_metadata(self):
# Action
self.qs.create_queue(self.creatable_queues[2])
self.qs.set_queue_metadata(
self.creatable_queues[2],
x_ms_meta_name_values={'val1': 'test', 'val2': 'blah'})
result = self.qs.get_queue_metadata(self.creatable_queues[2])
self.qs.delete_queue(self.creatable_queues[2])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(3, len(result))
self.assertEqual('0', result['x-ms-approximate-messages-count'])
self.assertEqual('test', result['x-ms-meta-val1'])
self.assertEqual('blah', result['x-ms-meta-val2'])
@record
def test_put_message(self):
# Action. No exception means pass. No asserts needed.
self.qs.put_message(self.test_queues[0], 'message1')
self.qs.put_message(self.test_queues[0], 'message2')
self.qs.put_message(self.test_queues[0], 'message3')
self.qs.put_message(self.test_queues[0], 'message4')
@record
def test_get_messages(self):
# Action
self.qs.put_message(self.test_queues[1], 'message1')
self.qs.put_message(self.test_queues[1], 'message2')
self.qs.put_message(self.test_queues[1], 'message3')
self.qs.put_message(self.test_queues[1], 'message4')
result = self.qs.get_messages(self.test_queues[1])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual('message1', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('1', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
@record
def test_get_messages_with_options(self):
# Action
self.qs.put_message(self.test_queues[2], 'message1')
self.qs.put_message(self.test_queues[2], 'message2')
self.qs.put_message(self.test_queues[2], 'message3')
self.qs.put_message(self.test_queues[2], 'message4')
result = self.qs.get_messages(
self.test_queues[2], numofmessages=4, visibilitytimeout=20)
# Asserts
self.assertIsNotNone(result)
self.assertEqual(4, len(result))
for message in result:
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertNotEqual('', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('1', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
@record
def test_peek_messages(self):
# Action
self.qs.put_message(self.test_queues[3], 'message1')
self.qs.put_message(self.test_queues[3], 'message2')
self.qs.put_message(self.test_queues[3], 'message3')
self.qs.put_message(self.test_queues[3], 'message4')
result = self.qs.peek_messages(self.test_queues[3])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertNotEqual('', message.message_text)
self.assertEqual('', message.pop_receipt)
self.assertEqual('0', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertEqual('', message.time_next_visible)
@record
def test_peek_messages_with_options(self):
# Action
self.qs.put_message(self.test_queues[4], 'message1')
self.qs.put_message(self.test_queues[4], 'message2')
self.qs.put_message(self.test_queues[4], 'message3')
self.qs.put_message(self.test_queues[4], 'message4')
result = self.qs.peek_messages(self.test_queues[4], numofmessages=4)
# Asserts
self.assertIsNotNone(result)
self.assertEqual(4, len(result))
for message in result:
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertNotEqual('', message.message_text)
self.assertEqual('', message.pop_receipt)
self.assertEqual('0', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertEqual('', message.time_next_visible)
@record
def test_clear_messages(self):
# Action
self.qs.put_message(self.test_queues[5], 'message1')
self.qs.put_message(self.test_queues[5], 'message2')
self.qs.put_message(self.test_queues[5], 'message3')
self.qs.put_message(self.test_queues[5], 'message4')
self.qs.clear_messages(self.test_queues[5])
result = self.qs.peek_messages(self.test_queues[5])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(0, len(result))
@record
def test_delete_message(self):
# Action
self.qs.put_message(self.test_queues[6], 'message1')
self.qs.put_message(self.test_queues[6], 'message2')
self.qs.put_message(self.test_queues[6], 'message3')
self.qs.put_message(self.test_queues[6], 'message4')
result = self.qs.get_messages(self.test_queues[6])
self.qs.delete_message(
self.test_queues[6], result[0].message_id, result[0].pop_receipt)
result2 = self.qs.get_messages(self.test_queues[6], numofmessages=32)
# Asserts
self.assertIsNotNone(result2)
self.assertEqual(3, len(result2))
@record
def test_update_message(self):
# Action
self.qs.put_message(self.test_queues[7], 'message1')
list_result1 = self.qs.get_messages(self.test_queues[7])
self.qs.update_message(self.test_queues[7],
list_result1[0].message_id,
'new text',
list_result1[0].pop_receipt,
visibilitytimeout=0)
list_result2 = self.qs.get_messages(self.test_queues[7])
# Asserts
self.assertIsNotNone(list_result2)
message = list_result2[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual('new text', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('2', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
def test_sas_read(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recordingfile(self.test_mode):
return
# Arrange
self.qs.put_message(self.test_queues[0], 'message1')
token = self.qs.generate_shared_access_signature(
self.test_queues[0],
self._get_shared_access_policy(QueueSharedAccessPermissions.READ),
)
# Act
service = QueueService(
account_name=self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
)
self._set_service_options(service, self.settings)
result = service.peek_messages(self.test_queues[0])
# Assert
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual('message1', message.message_text)
def test_sas_add(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recordingfile(self.test_mode):
return
# Arrange
token = self.qs.generate_shared_access_signature(
self.test_queues[0],
self._get_shared_access_policy(QueueSharedAccessPermissions.ADD),
)
# Act
service = QueueService(
account_name=self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
)
self._set_service_options(service, self.settings)
result = service.put_message(self.test_queues[0], 'addedmessage')
# Assert
result = self.qs.get_messages(self.test_queues[0])
self.assertEqual('addedmessage', result[0].message_text)
def test_sas_update(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recordingfile(self.test_mode):
return
# Arrange
self.qs.put_message(self.test_queues[0], 'message1')
token = self.qs.generate_shared_access_signature(
self.test_queues[0],
self._get_shared_access_policy(QueueSharedAccessPermissions.UPDATE),
)
result = self.qs.get_messages(self.test_queues[0])
# Act
service = QueueService(
account_name=self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
)
self._set_service_options(service, self.settings)
service.update_message(
self.test_queues[0],
result[0].message_id,
'updatedmessage1',
result[0].pop_receipt,
visibilitytimeout=0,
)
# Assert
result = self.qs.get_messages(self.test_queues[0])
self.assertEqual('updatedmessage1', result[0].message_text)
def test_sas_process(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recordingfile(self.test_mode):
return
# Arrange
self.qs.put_message(self.test_queues[0], 'message1')
token = self.qs.generate_shared_access_signature(
self.test_queues[0],
self._get_shared_access_policy(QueueSharedAccessPermissions.PROCESS),
)
# Act
service = QueueService(
account_name=self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
)
self._set_service_options(service, self.settings)
result = service.get_messages(self.test_queues[0])
# Assert
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual('message1', message.message_text)
def test_sas_signed_identifier(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recordingfile(self.test_mode):
return
# Arrange
si = SignedIdentifier()
si.id = 'testid'
si.access_policy.start = '2011-10-11'
si.access_policy.expiry = '2018-10-12'
si.access_policy.permission = QueueSharedAccessPermissions.READ
identifiers = SignedIdentifiers()
identifiers.signed_identifiers.append(si)
resp = self.qs.set_queue_acl(self.test_queues[0], identifiers)
self.qs.put_message(self.test_queues[0], 'message1')
token = self.qs.generate_shared_access_signature(
self.test_queues[0],
SharedAccessPolicy(signed_identifier=si.id),
)
# Act
service = QueueService(
account_name=self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
)
self._set_service_options(service, self.settings)
result = service.peek_messages(self.test_queues[0])
# Assert
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual('message1', message.message_text)
@record
def test_get_queue_acl(self):
# Arrange
# Act
acl = self.qs.get_queue_acl(self.test_queues[0])
# Assert
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 0)
@record
def test_get_queue_acl_iter(self):
# Arrange
# Act
acl = self.qs.get_queue_acl(self.test_queues[0])
for signed_identifier in acl:
pass
# Assert
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 0)
self.assertEqual(len(acl), 0)
@record
def test_get_queue_acl_with_non_existing_queue(self):
# Arrange
# Act
with self.assertRaises(AzureMissingResourceHttpError):
self.qs.get_queue_acl(self.creatable_queues[0])
# Assert
@record
def test_set_queue_acl(self):
# Arrange
# Act
resp = self.qs.set_queue_acl(self.test_queues[0])
# Assert
self.assertIsNone(resp)
acl = self.qs.get_queue_acl(self.test_queues[0])
self.assertIsNotNone(acl)
@record
def test_set_queue_acl_with_empty_signed_identifiers(self):
# Arrange
# Act
identifiers = SignedIdentifiers()
resp = self.qs.set_queue_acl(self.test_queues[0], identifiers)
# Assert
self.assertIsNone(resp)
acl = self.qs.get_queue_acl(self.test_queues[0])
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 0)
@record
def test_set_queue_acl_with_signed_identifiers(self):
# Arrange
# Act
si = SignedIdentifier()
si.id = 'testid'
si.access_policy.start = '2011-10-11'
si.access_policy.expiry = '2011-10-12'
si.access_policy.permission = QueueSharedAccessPermissions.READ
identifiers = SignedIdentifiers()
identifiers.signed_identifiers.append(si)
resp = self.qs.set_queue_acl(self.test_queues[0], identifiers)
# Assert
self.assertIsNone(resp)
acl = self.qs.get_queue_acl(self.test_queues[0])
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 1)
self.assertEqual(len(acl), 1)
self.assertEqual(acl.signed_identifiers[0].id, 'testid')
self.assertEqual(acl[0].id, 'testid')
@record
def test_set_queue_acl_with_non_existing_queue(self):
# Arrange
# Act
with self.assertRaises(AzureMissingResourceHttpError):
self.qs.set_queue_acl(self.creatable_queues[0])
# Assert
@record
def test_with_filter(self):
# Single filter
called = []
def my_filter(request, next):
called.append(True)
return next(request)
qc = self.qs.with_filter(my_filter)
qc.put_message(self.test_queues[7], 'message1')
self.assertTrue(called)
del called[:]
# Chained filters
def filter_a(request, next):
called.append('a')
return next(request)
def filter_b(request, next):
called.append('b')
return next(request)
qc = self.qs.with_filter(filter_a).with_filter(filter_b)
qc.put_message(self.test_queues[7], 'message1')
self.assertEqual(called, ['b', 'a'])
@record
def test_unicode_create_queue_unicode_name(self):
# Action
self.creatable_queues[0] = u'啊齄丂狛狜'
with self.assertRaises(AzureHttpError):
# not supported - queue name must be alphanumeric, lowercase
self.qs.create_queue(self.creatable_queues[0])
# Asserts
@record
def test_unicode_get_messages_unicode_data(self):
# Action
self.qs.put_message(self.test_queues[1], u'message1㚈')
result = self.qs.get_messages(self.test_queues[1])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual(u'message1㚈', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('1', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
@record
def test_unicode_update_message_unicode_data(self):
# Action
self.qs.put_message(self.test_queues[7], 'message1')
list_result1 = self.qs.get_messages(self.test_queues[7])
self.qs.update_message(self.test_queues[7],
list_result1[0].message_id,
u'啊齄丂狛狜',
list_result1[0].pop_receipt,
visibilitytimeout=0)
list_result2 = self.qs.get_messages(self.test_queues[7])
# Asserts
self.assertIsNotNone(list_result2)
message = list_result2[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual(u'啊齄丂狛狜', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('2', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
phonnz/azure-storage-python
|
tests/test_storage_queue.py
|
Python
|
apache-2.0
| 26,556
|
"""
Support for Eneco Slimmer stekkers (Smart Plugs).
This provides controls for the z-wave smart plugs Toon can control.
"""
import logging
from homeassistant.components.switch import SwitchDevice
import custom_components.toon as toon_main
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup discovered Smart Plugs."""
_toon_main = hass.data[toon_main.TOON_HANDLE]
switch_items = []
for plug in _toon_main.toon.smartplugs:
switch_items.append(EnecoSmartPlug(hass, plug))
add_devices_callback(switch_items)
class EnecoSmartPlug(SwitchDevice):
"""Representation of a Smart Plug."""
def __init__(self, hass, plug):
"""Initialize the Smart Plug."""
self.smartplug = plug
self.toon_data_store = hass.data[toon_main.TOON_HANDLE]
@property
def should_poll(self):
"""No polling needed with subscriptions."""
return True
@property
def unique_id(self):
"""Return the ID of this switch."""
return self.smartplug.device_uuid
@property
def name(self):
"""Return the name of the switch if any."""
return self.smartplug.name
@property
def current_power_w(self):
"""Current power usage in W."""
return self.toon_data_store.get_data('current_power', self.name)
@property
def today_energy_kwh(self):
"""Today total energy usage in kWh."""
return self.toon_data_store.get_data('today_energy', self.name)
@property
def is_on(self):
"""Return true if switch is on. Standby is on."""
return self.toon_data_store.get_data('current_state', self.name)
@property
def available(self):
"""True if switch is available."""
return self.smartplug.can_toggle
def turn_on(self, **kwargs):
"""Turn the switch on."""
return self.smartplug.turn_on()
def turn_off(self):
"""Turn the switch off."""
return self.smartplug.turn_off()
def update(self):
"""Update state."""
self.toon_data_store.update()
|
krocat/ToonHA
|
toon/switch.py
|
Python
|
apache-2.0
| 2,141
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq unbind client --cluster`."""
from aquilon.aqdb.model import Cluster
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.commands.unbind_client_hostname import \
CommandUnbindClientHostname
class CommandUnbindClientCluster(CommandUnbindClientHostname):
required_parameters = ["cluster", "service"]
def get_dbobj(self, session, cluster=None, **_):
return Cluster.get_unique(session, cluster, compel=True)
|
quattor/aquilon
|
lib/aquilon/worker/commands/unbind_client_cluster.py
|
Python
|
apache-2.0
| 1,222
|
#!/usr/bin/env python
'''======================================================
Created by: D. Spencer Maughan
Last updated: May 2015
File name: IRIS_DF_Controller.py
Organization: RISC Lab, Utah State University
Notes:
======================================================'''
import roslib; roslib.load_manifest('risc_msgs')
import rospy
from math import *
import numpy as np
import time
#=======================#
# Messages Needed #
#=======================#
from risc_msgs.msg import *
from std_msgs.msg import Bool
from roscopter.msg import Status
#=====================#
# Gain Matrices #
#=====================#
K = np.matrix([[ 1.8, 0, 0, 1.4, 0, 0, 0],\
[ 0, 1.8, 0, 0, 1.4, 0, 0],\
[ 0, 0, 3, 0, 0, 5, 0],\
[ 0, 0, 0, 0, 0, 0,.5]])
#========================#
# Globals #
#========================#
nominal_thrust = 0 # thrust necessary to maintain hover given battery level
phi_scale = 3.053261127645355
phi_trim = 0.0#0.058941904209906
theta_scale = 3.815398742249453
theta_trim = 0.0#-0.091216767651723
ctrl_status = False
states = Cortex()
states.Obj = [States()]*1
traj = Trajectories()
traj.Obj = [Trajectory()]*1
euler_max = 45*np.pi/180
max_yaw_rate = .3490659 #in radians/sec
rate = 45 # Hz
image = 0
start_time = 0
#==================#
# Publishers #
#==================#
pub_ctrl = rospy.Publisher('/controls', Controls, queue_size = 1)
#========================#
# Get Cortex States #
#========================#
def GetStates(S):
global states
states = S
#=====================#
# Get Trajectory #
#=====================#
def GetTraj(S):
global traj
traj = S
#=========================#
# Get Battery Status #
#=========================#
def GetBatt(S):
global nominal_thrust
B = S.battery_remaining
# coefficients for fourth order fit
# determined 11 May 2015 by Spencer Maughan and Ishmaal Erekson
c0 = 0.491674747062374
c1 = -0.024809293286468
c2 = 0.000662710609466
c3 = -0.000008160593348
c4 = 0.000000033699651
nominal_thrust = c0+c1*B+c2*B**2+c3*B**3+c4*B**4
#============================#
# Get Controller Status #
#============================#
def GetStatus(S):
global ctrl_status
ctrl_status = S.data
#========================#
# Basic Controller #
#========================#
def Basic_Controller():
global states, euler_max, max_yaw_rate, pub_ctrl,K,traj
Ctrl = Controls()
Ctrl.Obj = [Control()]*1
Ctrl.header.stamp = states.header.stamp
g = 9.80665 # average value of earth's gravitational constant m/s^2
m = 1.282 # IRIS mass in kg
#===================================#
# Get State Trajectory Errors #
#===================================#
if states.Obj[0].visible:
X = np.asmatrix(np.zeros((7,1)))
X[0] = traj.Obj[0].x-states.Obj[0].x
X[1] = traj.Obj[0].y-states.Obj[0].y
X[2] = traj.Obj[0].z-states.Obj[0].z
X[3] = traj.Obj[0].xdot-states.Obj[0].u
X[4] = traj.Obj[0].ydot-states.Obj[0].v
X[5] = traj.Obj[0].zdot-states.Obj[0].w
X[6] = traj.Obj[0].psi-states.Obj[0].psi*np.pi/180
#============================================#
# Differential Flatness Control Input #
#============================================#
# LQR input
utilde = -K*X
# required input
u_r = np.asmatrix(np.zeros((4,1)))
u = utilde+u_r-np.matrix([[0],[0],[9.81],[0]])
#==================================#
# Rotate to Vehicle 1 Frame #
#==================================#
psi = states.Obj[0].psi*np.pi/180
rotZ = np.matrix([[cos(psi), sin(psi), 0],[-sin(psi), cos(psi), 0],[0, 0, 1]])
Cart = np.matrix([[1, 0, 0],[0, -1, 0],[0, 0, -1]])
u[:-1] = Cart*rotZ*u[:-1]
#===================================#
# Normalize given the Thrust #
#===================================#
T = sqrt(u[0:3].T*u[0:3])
u[:-1] = np.divide(u[:-1],-T)
#==================#
# Set Controls #
#==================#
# Controls for Ardrone
# -phi = right... +phi = left
# -theta = back... +theta = forward
# -psi = right... +psi = left
global phi_trim,theta_trim,phi_scale,theta_scale
phi_d = (asin(u[1,-1]))
theta_d = (-asin(u[0,-1]))
ctrl = Control()
ctrl.name = states.Obj[0].name
ctrl.phi = phi_trim + phi_scale*phi_d
ctrl.theta = theta_trim + theta_scale*theta_d
ctrl.psi = -u[3,-1]/max_yaw_rate
global nominal_thrust
T_d = nominal_thrust+(T-g)/g
ctrl.T = T_d
Ctrl.Obj[0] = ctrl
Ctrl.header = states.header
#rospy.loginfo("latency = %f",states.header.stamp.to_sec()-rospy.get_time())
pub_ctrl.publish(Ctrl)
#===================#
# Main #
#===================#
if __name__=='__main__':
import sys
rospy.init_node('IRIS_DF_Controller')
#=====================================#
# Set up Publish/Subscribe Loop #
#=====================================#
r = rospy.Rate(rate)
while not rospy.is_shutdown():
sub_cortex = rospy.Subscriber('/cortex_raw' , Cortex, GetStates, queue_size=1, buff_size=2**24)
sub_traj = rospy.Subscriber('/trajectory' , Trajectories, GetTraj, queue_size=1, buff_size=2**24)
sub_Batt = rospy.Subscriber('/apm/status' , Status, GetBatt)
sub_status = rospy.Subscriber('/controller_status' , Bool, GetStatus)
Basic_Controller()
r.sleep()
|
riscmaster/risc_maap
|
risc_control/src/IRIS_DF_Controller.py
|
Python
|
bsd-2-clause
| 6,110
|
"""
Utilities to simplify the boilerplate for native lowering.
"""
import collections
import contextlib
import inspect
import functools
from enum import Enum
from numba.core import typing, types, utils, cgutils
from numba.core.typing.templates import BaseRegistryLoader
class Registry(object):
"""
A registry of function and attribute implementations.
"""
def __init__(self):
self.functions = []
self.getattrs = []
self.setattrs = []
self.casts = []
self.constants = []
def lower(self, func, *argtys):
"""
Decorate an implementation of *func* for the given argument types.
*func* may be an actual global function object, or any
pseudo-function supported by Numba, such as "getitem".
The decorated implementation has the signature
(context, builder, sig, args).
"""
def decorate(impl):
self.functions.append((impl, func, argtys))
return impl
return decorate
def _decorate_attr(self, impl, ty, attr, impl_list, decorator):
real_impl = decorator(impl, ty, attr)
impl_list.append((real_impl, attr, real_impl.signature))
return impl
def lower_getattr(self, ty, attr):
"""
Decorate an implementation of __getattr__ for type *ty* and
the attribute *attr*.
The decorated implementation will have the signature
(context, builder, typ, val).
"""
def decorate(impl):
return self._decorate_attr(impl, ty, attr, self.getattrs,
_decorate_getattr)
return decorate
def lower_getattr_generic(self, ty):
"""
Decorate the fallback implementation of __getattr__ for type *ty*.
The decorated implementation will have the signature
(context, builder, typ, val, attr). The implementation is
called for attributes which haven't been explicitly registered
with lower_getattr().
"""
return self.lower_getattr(ty, None)
def lower_setattr(self, ty, attr):
"""
Decorate an implementation of __setattr__ for type *ty* and
the attribute *attr*.
The decorated implementation will have the signature
(context, builder, sig, args).
"""
def decorate(impl):
return self._decorate_attr(impl, ty, attr, self.setattrs,
_decorate_setattr)
return decorate
def lower_setattr_generic(self, ty):
"""
Decorate the fallback implementation of __setattr__ for type *ty*.
The decorated implementation will have the signature
(context, builder, sig, args, attr). The implementation is
called for attributes which haven't been explicitly registered
with lower_setattr().
"""
return self.lower_setattr(ty, None)
def lower_cast(self, fromty, toty):
"""
Decorate the implementation of implicit conversion between
*fromty* and *toty*.
The decorated implementation will have the signature
(context, builder, fromty, toty, val).
"""
def decorate(impl):
self.casts.append((impl, (fromty, toty)))
return impl
return decorate
def lower_constant(self, ty):
"""
Decorate the implementation for creating a constant of type *ty*.
The decorated implementation will have the signature
(context, builder, ty, pyval).
"""
def decorate(impl):
self.constants.append((impl, (ty,)))
return impl
return decorate
class RegistryLoader(BaseRegistryLoader):
"""
An incremental loader for a target registry.
"""
registry_items = ('functions', 'getattrs', 'setattrs', 'casts', 'constants')
# Global registry for implementations of builtin operations
# (functions, attributes, type casts)
builtin_registry = Registry()
lower_builtin = builtin_registry.lower
lower_getattr = builtin_registry.lower_getattr
lower_getattr_generic = builtin_registry.lower_getattr_generic
lower_setattr = builtin_registry.lower_setattr
lower_setattr_generic = builtin_registry.lower_setattr_generic
lower_cast = builtin_registry.lower_cast
lower_constant = builtin_registry.lower_constant
def _decorate_getattr(impl, ty, attr):
real_impl = impl
if attr is not None:
def res(context, builder, typ, value, attr):
return real_impl(context, builder, typ, value)
else:
def res(context, builder, typ, value, attr):
return real_impl(context, builder, typ, value, attr)
res.signature = (ty,)
res.attr = attr
return res
def _decorate_setattr(impl, ty, attr):
real_impl = impl
if attr is not None:
def res(context, builder, sig, args, attr):
return real_impl(context, builder, sig, args)
else:
def res(context, builder, sig, args, attr):
return real_impl(context, builder, sig, args, attr)
res.signature = (ty, types.Any)
res.attr = attr
return res
def fix_returning_optional(context, builder, sig, status, retval):
# Reconstruct optional return type
if isinstance(sig.return_type, types.Optional):
value_type = sig.return_type.type
optional_none = context.make_optional_none(builder, value_type)
retvalptr = cgutils.alloca_once_value(builder, optional_none)
with builder.if_then(builder.not_(status.is_none)):
optional_value = context.make_optional_value(
builder, value_type, retval,
)
builder.store(optional_value, retvalptr)
retval = builder.load(retvalptr)
return retval
def user_function(fndesc, libs):
"""
A wrapper inserting code calling Numba-compiled *fndesc*.
"""
def imp(context, builder, sig, args):
func = context.declare_function(builder.module, fndesc)
# env=None assumes this is a nopython function
status, retval = context.call_conv.call_function(
builder, func, fndesc.restype, fndesc.argtypes, args)
with cgutils.if_unlikely(builder, status.is_error):
context.call_conv.return_status_propagate(builder, status)
assert sig.return_type == fndesc.restype
# Reconstruct optional return type
retval = fix_returning_optional(context, builder, sig, status, retval)
# If the data representations don't match up
if retval.type != context.get_value_type(sig.return_type):
msg = "function returned {0} but expect {1}"
raise TypeError(msg.format(retval.type, sig.return_type))
return impl_ret_new_ref(context, builder, fndesc.restype, retval)
imp.signature = fndesc.argtypes
imp.libs = tuple(libs)
return imp
def user_generator(gendesc, libs):
"""
A wrapper inserting code calling Numba-compiled *gendesc*.
"""
def imp(context, builder, sig, args):
func = context.declare_function(builder.module, gendesc)
# env=None assumes this is a nopython function
status, retval = context.call_conv.call_function(
builder, func, gendesc.restype, gendesc.argtypes, args)
# Return raw status for caller to process StopIteration
return status, retval
imp.libs = tuple(libs)
return imp
def iterator_impl(iterable_type, iterator_type):
"""
Decorator a given class as implementing *iterator_type*
(by providing an `iternext()` method).
"""
def wrapper(cls):
# These are unbound methods
iternext = cls.iternext
@iternext_impl(RefType.BORROWED)
def iternext_wrapper(context, builder, sig, args, result):
(value,) = args
iterobj = cls(context, builder, value)
return iternext(iterobj, context, builder, result)
lower_builtin('iternext', iterator_type)(iternext_wrapper)
return cls
return wrapper
class _IternextResult(object):
"""
A result wrapper for iteration, passed by iternext_impl() into the
wrapped function.
"""
__slots__ = ('_context', '_builder', '_pairobj')
def __init__(self, context, builder, pairobj):
self._context = context
self._builder = builder
self._pairobj = pairobj
def set_exhausted(self):
"""
Mark the iterator as exhausted.
"""
self._pairobj.second = self._context.get_constant(types.boolean, False)
def set_valid(self, is_valid=True):
"""
Mark the iterator as valid according to *is_valid* (which must
be either a Python boolean or a LLVM inst).
"""
if is_valid in (False, True):
is_valid = self._context.get_constant(types.boolean, is_valid)
self._pairobj.second = is_valid
def yield_(self, value):
"""
Mark the iterator as yielding the given *value* (a LLVM inst).
"""
self._pairobj.first = value
def is_valid(self):
"""
Return whether the iterator is marked valid.
"""
return self._context.get_argument_value(self._builder,
types.boolean,
self._pairobj.second)
def yielded_value(self):
"""
Return the iterator's yielded value, if any.
"""
return self._pairobj.first
class RefType(Enum):
"""
Enumerate the reference type
"""
"""
A new reference
"""
NEW = 1
"""
A borrowed reference
"""
BORROWED = 2
"""
An untracked reference
"""
UNTRACKED = 3
def iternext_impl(ref_type=None):
"""
Wrap the given iternext() implementation so that it gets passed
an _IternextResult() object easing the returning of the iternext()
result pair.
ref_type: a numba.targets.imputils.RefType value, the reference type used is
that specified through the RefType enum.
The wrapped function will be called with the following signature:
(context, builder, sig, args, iternext_result)
"""
if ref_type not in [x for x in RefType]:
raise ValueError("ref_type must be an enum member of imputils.RefType")
def outer(func):
def wrapper(context, builder, sig, args):
pair_type = sig.return_type
pairobj = context.make_helper(builder, pair_type)
func(context, builder, sig, args,
_IternextResult(context, builder, pairobj))
if ref_type == RefType.NEW:
impl_ret = impl_ret_new_ref
elif ref_type == RefType.BORROWED:
impl_ret = impl_ret_borrowed
elif ref_type == RefType.UNTRACKED:
impl_ret = impl_ret_untracked
else:
raise ValueError("Unknown ref_type encountered")
return impl_ret(context, builder,
pair_type, pairobj._getvalue())
return wrapper
return outer
def call_getiter(context, builder, iterable_type, val):
"""
Call the `getiter()` implementation for the given *iterable_type*
of value *val*, and return the corresponding LLVM inst.
"""
getiter_sig = typing.signature(iterable_type.iterator_type, iterable_type)
getiter_impl = context.get_function('getiter', getiter_sig)
return getiter_impl(builder, (val,))
def call_iternext(context, builder, iterator_type, val):
"""
Call the `iternext()` implementation for the given *iterator_type*
of value *val*, and return a convenience _IternextResult() object
reflecting the results.
"""
itemty = iterator_type.yield_type
pair_type = types.Pair(itemty, types.boolean)
iternext_sig = typing.signature(pair_type, iterator_type)
iternext_impl = context.get_function('iternext', iternext_sig)
val = iternext_impl(builder, (val,))
pairobj = context.make_helper(builder, pair_type, val)
return _IternextResult(context, builder, pairobj)
def call_len(context, builder, ty, val):
"""
Call len() on the given value. Return None if len() isn't defined on
this type.
"""
try:
len_impl = context.get_function(len, typing.signature(types.intp, ty,))
except NotImplementedError:
return None
else:
return len_impl(builder, (val,))
_ForIterLoop = collections.namedtuple('_ForIterLoop',
('value', 'do_break'))
@contextlib.contextmanager
def for_iter(context, builder, iterable_type, val):
"""
Simulate a for loop on the given iterable. Yields a namedtuple with
the given members:
- `value` is the value being yielded
- `do_break` is a callable to early out of the loop
"""
iterator_type = iterable_type.iterator_type
iterval = call_getiter(context, builder, iterable_type, val)
bb_body = builder.append_basic_block('for_iter.body')
bb_end = builder.append_basic_block('for_iter.end')
def do_break():
builder.branch(bb_end)
builder.branch(bb_body)
with builder.goto_block(bb_body):
res = call_iternext(context, builder, iterator_type, iterval)
with builder.if_then(builder.not_(res.is_valid()), likely=False):
builder.branch(bb_end)
yield _ForIterLoop(res.yielded_value(), do_break)
builder.branch(bb_body)
builder.position_at_end(bb_end)
if context.enable_nrt:
context.nrt.decref(builder, iterator_type, iterval)
def impl_ret_new_ref(ctx, builder, retty, ret):
"""
The implementation returns a new reference.
"""
return ret
def impl_ret_borrowed(ctx, builder, retty, ret):
"""
The implementation returns a borrowed reference.
This function automatically incref so that the implementation is
returning a new reference.
"""
if ctx.enable_nrt:
ctx.nrt.incref(builder, retty, ret)
return ret
def impl_ret_untracked(ctx, builder, retty, ret):
"""
The return type is not a NRT object.
"""
return ret
@contextlib.contextmanager
def force_error_model(context, model_name='numpy'):
"""
Temporarily change the context's error model.
"""
from numba.core import callconv
old_error_model = context.error_model
context.error_model = callconv.create_error_model(model_name, context)
try:
yield
finally:
context.error_model = old_error_model
def numba_typeref_ctor(*args, **kwargs):
"""A stub for use internally by Numba when a call is emitted
on a TypeRef.
"""
raise NotImplementedError("This function should not be executed.")
|
sklam/numba
|
numba/core/imputils.py
|
Python
|
bsd-2-clause
| 14,752
|
import unittest
from ..gwt import GwtIO
from ...FileIO import FileIO as psopen
from .... import examples as pysal_examples
import tempfile
import os
import warnings
class test_GwtIO(unittest.TestCase):
def setUp(self):
self.test_file = test_file = pysal_examples.get_path('juvenile.gwt')
self.obj = GwtIO(test_file, 'r')
def test_close(self):
f = self.obj
f.close()
self.failUnlessRaises(ValueError, f.read)
def test_read(self):
w = self.obj.read()
self.assertEqual(168, w.n)
self.assertEqual(16.678571428571427, w.mean_neighbors)
w.transform = 'B'
self.assertEqual([1.0], w[1].values())
def test_seek(self):
self.test_read()
self.failUnlessRaises(StopIteration, self.obj.read)
self.obj.seek(0)
self.test_read()
# Commented out by CRS, GWT 'w' mode removed until we can find a good solution for retaining distances.
# see issue #153.
# Added back by CRS,
def test_write(self):
w = self.obj.read()
f = tempfile.NamedTemporaryFile(
suffix='.gwt', dir=pysal_examples.get_path(''))
fname = f.name
f.close()
o = psopen(fname, 'w')
#copy the shapefile and ID variable names from the old gwt.
# this is only available after the read() method has been called.
#o.shpName = self.obj.shpName
#o.varName = self.obj.varName
o.write(w)
o.close()
wnew = psopen(fname, 'r').read()
self.assertEqual(wnew.pct_nonzero, w.pct_nonzero)
os.remove(fname)
if __name__ == '__main__':
unittest.main()
|
sjsrey/pysal_core
|
pysal_core/io/IOHandlers/tests/test_gwt.py
|
Python
|
bsd-3-clause
| 1,656
|
from __future__ import unicode_literals
import os
import datetime
from django.test import TestCase, Client, override_settings
from django.utils import timezone
from ..models import Show, Episode, Enclosure
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
@override_settings(PODCAST_SINGULAR=False)
class PodcastTestCase(TestCase):
fixtures = [
'podcast_category.json',
]
def setUp(self):
super(PodcastTestCase, self).setUp()
self.client = Client()
# show
show = Show.objects.create(
title='All About Everything',
slug='everything',
description='All About Everything is a show about everything. Each week we dive into any subject known to man and talk about it as much as we can. Look for our podcast in the Podcasts app or in the iTunes Store',
managing_editor='john.doe@example.com',
webmaster='',
ttl=60,
subtitle='A show about everything',
summary='',
author_name='John Doe',
author_email='',
owner_name='John Doe',
owner_email='john.doe@example.com',
copyright='John Doe & Family',
image='podcast/tests/static/everything/AllAboutEverything.jpg',
explicit=False,
block=False,
complete=False,
)
show.categories.add(1, 4, 62, 63, 67)
# episode 1
episode_1 = Episode.objects.create(
show=show,
title='Shake Shake Shake Your Spices',
slug='shake-shake-shake-your-spices',
description='This week we talk about <a href="https://itunes/apple.com/us/book/antique-trader-salt-pepper/id429691295?mt=11">salt and pepper shakers</a>, comparing and contrasting pour rates, construction materials, and overall aesthetics. Come and join the party!',
pub_date=timezone.make_aware(datetime.datetime.strptime('2016-03-08T12:00:00', '%Y-%m-%dT%H:%M:%S')),
summary='A short primer on table spices',
image='podcast/tests/static/everything/AllAboutEverything/Episode1.jpg',
explicit=False,
block=False,
)
# episode 2
episode_2 = Episode.objects.create(
show=show,
title='Socket Wrench Shootout',
slug='socket-wrench-shootout',
description='This week we talk about metric vs. Old English socket wrenches. Which one is better? Do you really need both? Get all of your answers here.',
pub_date=timezone.make_aware(datetime.datetime.strptime('2016-03-09T18:00:00', '%Y-%m-%dT%H:%M:%S')),
summary='Comparing socket wrenches is fun!',
author_name='Jane Doe',
image='podcast/tests/static/everything/AllAboutEverything/Episode2.jpg',
explicit=False,
block=False,
)
# episode 3
episode_3 = Episode.objects.create(
show=show,
title='The Best Chili',
slug='best-chili',
description='This week we talk about the best Chili in the world. Which chili is better?',
pub_date=timezone.make_aware(datetime.datetime.strptime('2016-03-10T09:00:00', '%Y-%m-%dT%H:%M:%S')),
summary='Jane and Eric',
author_name='Jane Doe',
image='podcast/tests/static/everything/AllAboutEverything/Episode3.jpg',
explicit=False,
block=False,
)
# episode 4
episode_4 = Episode.objects.create(
show=show,
title='Red,Whine, & Blue',
slug='red-whine-blue',
description='This week we talk about surviving in a Red state if you are a Blue person. Or vice versa.',
pub_date=timezone.make_aware(datetime.datetime.strptime('2016-03-10T22:15:00', '%Y-%m-%dT%H:%M:%S')),
summary='Red + Blue != Purple',
author_name='Various',
image='podcast/tests/static/everything/AllAboutEverything/Episode4.jpg',
explicit=False,
block=False,
)
# enclosure 1
Enclosure.objects.create(
episode=episode_1,
file='podcast/tests/static/everything/AllAboutEverythingEpisode3.m4a',
type='audio/x-m4a',
cc=False,
)
# enclosure 2
Enclosure.objects.create(
episode=episode_2,
file='podcast/tests/static/everything/AllAboutEverythingEpisode2.mp4',
type='video/mp4',
cc=False,
)
# enclosure 3
Enclosure.objects.create(
episode=episode_3,
file='podcast/tests/static/everything/AllAboutEverythingEpisode2.m4v',
type='video/x-m4v',
cc=True,
)
# enclosure 4
Enclosure.objects.create(
episode=episode_4,
file='podcast/tests/static/everything/AllAboutEverythingEpisode4.mp3',
type='audio/mpeg',
cc=False,
)
def test_show_feed(self):
response = self.client.get(reverse('podcast:show_feed'))
with open(os.path.join(os.path.dirname(__file__), 'feed.xml'), 'r') as file_1:
xml_1 = file_1.read()
xml_2 = response.content.decode('utf-8').replace('http://testserverpodcast', 'http://testserver/podcast')
self.maxDiff = None
self.assertXMLEqual(xml_1, xml_2)
|
richardcornish/django-itunespodcast
|
podcast/tests/__init__.py
|
Python
|
bsd-3-clause
| 5,519
|
# -*- coding:utf-8 -*-
import urlparse
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.http.response import HttpResponseBadRequest
from .base import Mixin
from .. import strings
class SuccessURLAliasViewMixin(Mixin):
def get_success_url(self):
return reverse(self.success_url_alias)
class HttpRefererViewMixin(Mixin):
def get(self, request, referers=None, *args, **kwargs):
from_referer = urlparse.urlsplit(
request.META.get('HTTP_REFERER', '')).path
if referers is not None \
and all(map(lambda r: unicode(r) != from_referer, referers)):
return HttpResponseBadRequest(
strings.HTTP_REFERER_VIEW_MIXIN_FORM_VIEW_BAD_REQUEST \
% from_referer)
return self.base_impl(
HttpRefererViewMixin, self).get(request, args, kwargs)
class KwargsUserFormViewMixin(Mixin):
def get_form_kwargs(self):
kwargs = self.base_impl(KwargsUserFormViewMixin, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
|
hellhovnd/dentexchange
|
dentexchange/apps/libs/mixins/views.py
|
Python
|
bsd-3-clause
| 1,114
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ['DJANGO_SETTINGS_MODULE'] = 'lfs_downloads.tests._settings'
import django
from django.conf import settings
from django.test.utils import get_runner
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(["tests"])
sys.exit(bool(failures))
|
misaelnieto/lfs_downloads
|
runtests.py
|
Python
|
bsd-3-clause
| 412
|
"""private_base will be populated from puppet and placed in this directory"""
import logging
import os
import dj_database_url
from lib.settings_base import (
CACHE_PREFIX, ES_INDEXES, KNOWN_PROXIES, LOGGING, CSP_SCRIPT_SRC,
CSP_FRAME_SRC)
from .. import splitstrip
import private_base as private
ENGAGE_ROBOTS = False
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = private.EMAIL_HOST
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = False
SESSION_COOKIE_SECURE = True
REDIRECT_SECRET_KEY = private.REDIRECT_SECRET_KEY
ADMINS = ()
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(private.DATABASES_DEFAULT_URL)
DATABASES['default']['ENGINE'] = 'mysql_pool'
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['slave'] = dj_database_url.parse(private.DATABASES_SLAVE_URL)
DATABASES['slave']['ENGINE'] = 'mysql_pool'
DATABASES['slave']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
SERVICES_DATABASE = dj_database_url.parse(private.SERVICES_DATABASE_URL)
DATABASE_POOL_ARGS = {
'max_overflow': 10,
'pool_size': 5,
'recycle': 30
}
SLAVE_DATABASES = ['slave']
CACHES = {
'default': {
'BACKEND': 'caching.backends.memcached.CacheClass',
#'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
#'BACKEND': 'memcachepool.cache.UMemcacheCache',
'LOCATION': splitstrip(private.CACHES_DEFAULT_LOCATION),
'TIMEOUT': 500,
'KEY_PREFIX': CACHE_PREFIX,
},
}
SECRET_KEY = private.SECRET_KEY
LOG_LEVEL = logging.DEBUG
# Celery
BROKER_URL = private.BROKER_URL
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_PREFETCH_MULTIPLIER = 1
NETAPP_STORAGE = private.NETAPP_STORAGE_ROOT + '/shared_storage'
MIRROR_STAGE_PATH = private.NETAPP_STORAGE_ROOT + '/public-staging'
GUARDED_ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/guarded-addons'
UPLOADS_PATH = NETAPP_STORAGE + '/uploads'
USERPICS_PATH = UPLOADS_PATH + '/userpics'
ADDON_ICONS_PATH = UPLOADS_PATH + '/addon_icons'
COLLECTION_ICONS_PATH = UPLOADS_PATH + '/collection_icons'
IMAGEASSETS_PATH = UPLOADS_PATH + '/imageassets'
REVIEWER_ATTACHMENTS_PATH = UPLOADS_PATH + '/reviewer_attachment'
PREVIEWS_PATH = UPLOADS_PATH + '/previews'
SIGNED_APPS_PATH = NETAPP_STORAGE + '/signed_apps'
SIGNED_APPS_REVIEWER_PATH = NETAPP_STORAGE + '/signed_apps_reviewer'
PREVIEW_THUMBNAIL_PATH = PREVIEWS_PATH + '/thumbs/%s/%d.png'
PREVIEW_FULL_PATH = PREVIEWS_PATH + '/full/%s/%d.%s'
HERA = []
LOGGING['loggers'].update({
'z.task': {'level': logging.DEBUG},
'z.hera': {'level': logging.INFO},
'z.redis': {'level': logging.DEBUG},
'z.pool': {'level': logging.ERROR},
})
REDIS_BACKEND = private.REDIS_BACKENDS_CACHE
REDIS_BACKENDS = {
'cache': private.REDIS_BACKENDS_CACHE,
'cache_slave': private.REDIS_BACKENDS_CACHE_SLAVE,
'master': private.REDIS_BACKENDS_MASTER,
'slave': private.REDIS_BACKENDS_SLAVE,
}
CACHE_MACHINE_USE_REDIS = True
RECAPTCHA_PUBLIC_KEY = private.RECAPTCHA_PUBLIC_KEY
RECAPTCHA_PRIVATE_KEY = private.RECAPTCHA_PRIVATE_KEY
RECAPTCHA_URL = (
'https://www.google.com/recaptcha/api/challenge?k=%s' %
RECAPTCHA_PUBLIC_KEY)
TMP_PATH = os.path.join(NETAPP_STORAGE, 'tmp')
PACKAGER_PATH = os.path.join(TMP_PATH, 'packager')
ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/files'
PERF_THRESHOLD = 20
SPIDERMONKEY = '/usr/bin/tracemonkey'
# Remove DetectMobileMiddleware from middleware in production.
detect = 'mobility.middleware.DetectMobileMiddleware'
csp = 'csp.middleware.CSPMiddleware'
RESPONSYS_ID = private.RESPONSYS_ID
CRONJOB_LOCK_PREFIX = 'marketplace-identity-stage'
BUILDER_SECRET_KEY = private.BUILDER_SECRET_KEY
BUILDER_VERSIONS_URL = (
"https://builder-addons.allizom.org/repackage/sdk-versions/")
ES_HOSTS = splitstrip(private.ES_HOSTS)
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = dict((k, '%s_identity_stage' % v) for k, v in ES_INDEXES.items())
BUILDER_UPGRADE_URL = "https://builder-addons.allizom.org/repackage/rebuild/"
STATSD_HOST = private.STATSD_HOST
STATSD_PORT = private.STATSD_PORT
STATSD_PREFIX = private.STATSD_PREFIX
GRAPHITE_HOST = private.GRAPHITE_HOST
GRAPHITE_PORT = private.GRAPHITE_PORT
GRAPHITE_PREFIX = private.GRAPHITE_PREFIX
CEF_PRODUCT = STATSD_PREFIX
ES_TIMEOUT = 60
EXPOSE_VALIDATOR_TRACEBACKS = True
KNOWN_PROXIES += ['10.2.83.105',
'10.2.83.106',
'10.2.83.107',
'10.8.83.200',
'10.8.83.201',
'10.8.83.202',
'10.8.83.203',
'10.8.83.204',
'10.8.83.210',
'10.8.83.211',
'10.8.83.212',
'10.8.83.213',
'10.8.83.214',
'10.8.83.215',
'10.8.83.251',
'10.8.83.252',
'10.8.83.253',
]
NEW_FEATURES = True
PERF_TEST_URL = (
'http://talos-addon-master1.amotest.scl1.mozilla.com/trigger/trigger.cgi')
REDIRECT_URL = 'https://outgoing.allizom.org/v1/'
CLEANCSS_BIN = 'cleancss'
UGLIFY_BIN = 'uglifyjs'
CELERYD_TASK_SOFT_TIME_LIMIT = 240
LESS_PREPROCESS = True
XSENDFILE_HEADER = 'X-Accel-Redirect'
ALLOW_SELF_REVIEWS = True
GEOIP_URL = 'http://geo.marketplace.allizom.org'
API_THROTTLE = False
CSP_SCRIPT_SRC = CSP_SCRIPT_SRC + ("https://firefoxos.anosrep.org",)
CSP_FRAME_SRC = CSP_FRAME_SRC + ("https://firefoxos.anosrep.org",)
AES_KEYS = private.AES_KEYS
|
Joergen/olympia
|
sites/identitystage/settings_base.py
|
Python
|
bsd-3-clause
| 5,553
|
"""
Test lldb-mi -var-xxx commands.
"""
from __future__ import print_function
import lldbmi_testcase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class MiVarTestCase(lldbmi_testcase.MiTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
@skipIfWindows # llvm.org/pr24452: Get lldb-mi tests working on Windows
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
@skipIfRemote # We do not currently support remote debugging via the MI.
@skipIfDarwin
def test_lldbmi_eval(self):
"""Test that 'lldb-mi --interpreter' works for evaluating."""
self.spawnLldbMi(args=None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to program return
line = line_number('main.cpp', '// BP_return')
self.runCmd("-break-insert main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Print non-existant variable
self.runCmd("-var-create var1 * undef")
self.expect(
"\^error,msg=\"error: use of undeclared identifier \'undef\'\\\\n\"")
self.runCmd("-data-evaluate-expression undef")
self.expect(
"\^error,msg=\"error: use of undeclared identifier \'undef\'\\\\n\"")
# Print global "g_MyVar", modify, delete and create again
self.runCmd("-data-evaluate-expression g_MyVar")
self.expect("\^done,value=\"3\"")
self.runCmd("-var-create var2 * g_MyVar")
self.expect(
"\^done,name=\"var2\",numchild=\"0\",value=\"3\",type=\"int\",thread-id=\"1\",has_more=\"0\"")
self.runCmd("-var-evaluate-expression var2")
self.expect("\^done,value=\"3\"")
self.runCmd("-var-show-attributes var2")
self.expect("\^done,status=\"editable\"")
self.runCmd("-var-list-children var2")
self.expect("\^done,numchild=\"0\",has_more=\"0\"")
# Ensure -var-list-children also works with quotes
self.runCmd("-var-list-children \"var2\"")
self.expect("\^done,numchild=\"0\",has_more=\"0\"")
self.runCmd("-data-evaluate-expression \"g_MyVar=30\"")
self.expect("\^done,value=\"30\"")
self.runCmd("-var-update --all-values var2")
# self.expect("\^done,changelist=\[\{name=\"var2\",value=\"30\",in_scope=\"true\",type_changed=\"false\",has_more=\"0\"\}\]")
# FIXME -var-update doesn't work
self.runCmd("-var-delete var2")
self.expect("\^done")
self.runCmd("-var-create var2 * g_MyVar")
self.expect(
"\^done,name=\"var2\",numchild=\"0\",value=\"30\",type=\"int\",thread-id=\"1\",has_more=\"0\"")
# Print static "s_MyVar", modify, delete and create again
self.runCmd("-data-evaluate-expression s_MyVar")
self.expect("\^done,value=\"30\"")
self.runCmd("-var-create var3 * s_MyVar")
self.expect(
"\^done,name=\"var3\",numchild=\"0\",value=\"30\",type=\"int\",thread-id=\"1\",has_more=\"0\"")
self.runCmd("-var-evaluate-expression var3")
self.expect("\^done,value=\"30\"")
self.runCmd("-var-show-attributes var3")
self.expect("\^done,status=\"editable\"")
self.runCmd("-var-list-children var3")
self.expect("\^done,numchild=\"0\",has_more=\"0\"")
self.runCmd("-data-evaluate-expression \"s_MyVar=3\"")
self.expect("\^done,value=\"3\"")
self.runCmd("-var-update --all-values var3")
# self.expect("\^done,changelist=\[\{name=\"var3\",value=\"3\",in_scope=\"true\",type_changed=\"false\",has_more=\"0\"\}\]")
# FIXME -var-update doesn't work
self.runCmd("-var-delete var3")
self.expect("\^done")
self.runCmd("-var-create var3 * s_MyVar")
self.expect(
"\^done,name=\"var3\",numchild=\"0\",value=\"3\",type=\"int\",thread-id=\"1\",has_more=\"0\"")
# Print local "b", modify, delete and create again
self.runCmd("-data-evaluate-expression b")
self.expect("\^done,value=\"20\"")
self.runCmd("-var-create var4 * b")
self.expect(
"\^done,name=\"var4\",numchild=\"0\",value=\"20\",type=\"int\",thread-id=\"1\",has_more=\"0\"")
self.runCmd("-var-evaluate-expression var4")
self.expect("\^done,value=\"20\"")
self.runCmd("-var-show-attributes var4")
self.expect("\^done,status=\"editable\"")
self.runCmd("-var-list-children var4")
self.expect("\^done,numchild=\"0\",has_more=\"0\"")
self.runCmd("-data-evaluate-expression \"b=2\"")
self.expect("\^done,value=\"2\"")
self.runCmd("-var-update --all-values var4")
# self.expect("\^done,changelist=\[\{name=\"var4\",value=\"2\",in_scope=\"true\",type_changed=\"false\",has_more=\"0\"\}\]")
# FIXME -var-update doesn't work
self.runCmd("-var-delete var4")
self.expect("\^done")
self.runCmd("-var-create var4 * b")
self.expect(
"\^done,name=\"var4\",numchild=\"0\",value=\"2\",type=\"int\",thread-id=\"1\",has_more=\"0\"")
# Print temp "a + b"
self.runCmd("-data-evaluate-expression \"a + b\"")
self.expect("\^done,value=\"12\"")
self.runCmd("-var-create var5 * \"a + b\"")
self.expect(
"\^done,name=\"var5\",numchild=\"0\",value=\"12\",type=\"int\",thread-id=\"1\",has_more=\"0\"")
self.runCmd("-var-evaluate-expression var5")
self.expect("\^done,value=\"12\"")
self.runCmd("-var-show-attributes var5")
self.expect("\^done,status=\"editable\"") # FIXME editable or not?
self.runCmd("-var-list-children var5")
self.expect("\^done,numchild=\"0\",has_more=\"0\"")
# Print argument "argv[0]"
self.runCmd("-data-evaluate-expression \"argv[0]\"")
self.expect(
"\^done,value=\"0x[0-9a-f]+ \\\\\\\".*?%s\\\\\\\"\"" %
self.myexe)
self.runCmd("-var-create var6 * \"argv[0]\"")
self.expect(
"\^done,name=\"var6\",numchild=\"1\",value=\"0x[0-9a-f]+ \\\\\\\".*?%s\\\\\\\"\",type=\"const char \*\",thread-id=\"1\",has_more=\"0\"" %
self.myexe)
self.runCmd("-var-evaluate-expression var6")
self.expect(
"\^done,value=\"0x[0-9a-f]+ \\\\\\\".*?%s\\\\\\\"\"" %
self.myexe)
self.runCmd("-var-show-attributes var6")
self.expect("\^done,status=\"editable\"")
self.runCmd("-var-list-children --all-values var6")
# FIXME: The name below is not correct. It should be "var.*argv[0]".
# FIXME -var-list-children shows invalid thread-id
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var6\.\*\$[0-9]+\",exp=\"\*\$[0-9]+\",numchild=\"0\",type=\"const char\",thread-id=\"4294967295\",value=\"47 '/'\",has_more=\"0\"\}\],has_more=\"0\"")
# Print an expression with spaces and optional arguments
self.runCmd("-data-evaluate-expression \"a + b\"")
self.expect("\^done,value=\"12\"")
self.runCmd("-var-create var7 * \"a + b\" --thread 1 --frame 0")
self.expect(
"\^done,name=\"var7\",numchild=\"0\",value=\"12\",type=\"int\",thread-id=\"1\",has_more=\"0\"")
@skipIfWindows # llvm.org/pr24452: Get lldb-mi tests working on Windows
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
@skipIfLinux # llvm.org/pr22841: lldb-mi tests fail on all Linux buildbots
@skipIfDarwin
@skipIfRemote # We do not currently support remote debugging via the MI.
def test_lldbmi_var_update(self):
"""Test that 'lldb-mi --interpreter' works for -var-update."""
self.spawnLldbMi(args=None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to BP_var_update_test_init
line = line_number('main.cpp', '// BP_var_update_test_init')
self.runCmd("-break-insert main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Setup variables
self.runCmd("-var-create var_l * l")
self.expect(
"\^done,name=\"var_l\",numchild=\"0\",value=\"1\",type=\"long\",thread-id=\"1\",has_more=\"0\"")
self.runCmd("-var-create var_complx * complx")
self.expect(
"\^done,name=\"var_complx\",numchild=\"3\",value=\"\{\.\.\.\}\",type=\"complex_type\",thread-id=\"1\",has_more=\"0\"")
self.runCmd("-var-create var_complx_array * complx_array")
self.expect(
"\^done,name=\"var_complx_array\",numchild=\"2\",value=\"\[2\]\",type=\"complex_type \[2\]\",thread-id=\"1\",has_more=\"0\"")
# Go to BP_var_update_test_l
line = line_number('main.cpp', '// BP_var_update_test_l')
self.runCmd("-break-insert main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"2\"")
self.runCmd("-exec-continue")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test that var_l was updated
self.runCmd("-var-update --all-values var_l")
self.expect(
"\^done,changelist=\[\{name=\"var_l\",value=\"0\",in_scope=\"true\",type_changed=\"false\",has_more=\"0\"\}\]")
# Go to BP_var_update_test_complx
line = line_number('main.cpp', '// BP_var_update_test_complx')
self.runCmd("-break-insert main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"3\"")
self.runCmd("-exec-continue")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test that var_complx was updated
self.runCmd("-var-update --all-values var_complx")
self.expect(
"\^done,changelist=\[\{name=\"var_complx\",value=\"\{\.\.\.\}\",in_scope=\"true\",type_changed=\"false\",has_more=\"0\"\}\]")
# Go to BP_var_update_test_complx_array
line = line_number('main.cpp', '// BP_var_update_test_complx_array')
self.runCmd("-break-insert main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"4\"")
self.runCmd("-exec-continue")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test that var_complex_array was updated
self.runCmd("-var-update --all-values var_complx_array")
self.expect(
"\^done,changelist=\[\{name=\"var_complx_array\",value=\"\[2\]\",in_scope=\"true\",type_changed=\"false\",has_more=\"0\"\}\]")
@skipIfWindows # llvm.org/pr24452: Get lldb-mi tests working on Windows
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
@skipIfRemote # We do not currently support remote debugging via the MI.
@skipIfDarwin
def test_lldbmi_var_create_register(self):
"""Test that 'lldb-mi --interpreter' works for -var-create $regname."""
self.spawnLldbMi(args=None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to main
self.runCmd("-break-insert -f main")
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Find name of register 0
self.runCmd("-data-list-register-names 0")
self.expect("\^done,register-names=\[\".+?\"\]")
register_name = self.child.after.split("\"")[1]
# Create variable for register 0
# Note that message is different in Darwin and Linux:
# Darwin: "^done,name=\"var_reg\",numchild=\"0\",value=\"0x[0-9a-f]+\",type=\"unsigned long\",thread-id=\"1\",has_more=\"0\"
# Linux:
# "^done,name=\"var_reg\",numchild=\"0\",value=\"0x[0-9a-f]+\",type=\"unsigned
# int\",thread-id=\"1\",has_more=\"0\"
self.runCmd("-var-create var_reg * $%s" % register_name)
self.expect(
"\^done,name=\"var_reg\",numchild=\"0\",value=\"0x[0-9a-f]+\",type=\"unsigned (long|int)\",thread-id=\"1\",has_more=\"0\"")
# Assign value to variable
self.runCmd("-var-assign var_reg \"6\"")
# FIXME: the output has different format for 32bit and 64bit values
self.expect("\^done,value=\"0x0*?6\"")
# Assert register 0 updated
self.runCmd("-data-list-register-values d 0")
self.expect("\^done,register-values=\[{number=\"0\",value=\"6\"")
@skipIfWindows # llvm.org/pr24452: Get lldb-mi tests working on Windows
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
@skipIfLinux # llvm.org/pr22841: lldb-mi tests fail on all Linux buildbots
@skipIfRemote # We do not currently support remote debugging via the MI.
@skipIfDarwin
def test_lldbmi_var_list_children(self):
"""Test that 'lldb-mi --interpreter' works for -var-list-children."""
self.spawnLldbMi(args=None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to BP_var_list_children_test
line = line_number('main.cpp', '// BP_var_list_children_test')
self.runCmd("-break-insert main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Create variable
self.runCmd("-var-create var_complx * complx")
self.expect(
"\^done,name=\"var_complx\",numchild=\"3\",value=\"\{\.\.\.\}\",type=\"complex_type\",thread-id=\"1\",has_more=\"0\"")
self.runCmd("-var-create var_complx_array * complx_array")
self.expect(
"\^done,name=\"var_complx_array\",numchild=\"2\",value=\"\[2\]\",type=\"complex_type \[2\]\",thread-id=\"1\",has_more=\"0\"")
self.runCmd("-var-create var_pcomplx * pcomplx")
self.expect(
"\^done,name=\"var_pcomplx\",numchild=\"2\",value=\"\{\.\.\.\}\",type=\"pcomplex_type\",thread-id=\"1\",has_more=\"0\"")
# Test that -var-evaluate-expression can evaluate the children of
# created varobj
self.runCmd("-var-list-children var_complx")
self.runCmd("-var-evaluate-expression var_complx.i")
self.expect("\^done,value=\"3\"")
self.runCmd("-var-list-children var_complx_array")
self.runCmd("-var-evaluate-expression var_complx_array.[0]")
self.expect("\^done,value=\"\{...\}\"")
self.runCmd("-var-list-children var_pcomplx")
self.runCmd("-var-evaluate-expression var_pcomplx.complex_type")
self.expect("\^done,value=\"\{...\}\"")
# Test that -var-list-children lists empty children if range is empty
# (and that print-values is optional)
self.runCmd("-var-list-children var_complx 0 0")
self.expect("\^done,numchild=\"0\",has_more=\"1\"")
self.runCmd("-var-list-children var_complx 99 0")
self.expect("\^done,numchild=\"0\",has_more=\"1\"")
self.runCmd("-var-list-children var_complx 99 3")
self.expect("\^done,numchild=\"0\",has_more=\"0\"")
# Test that -var-list-children lists all children with their values
# (and that from and to are optional)
self.runCmd("-var-list-children --all-values var_complx")
self.expect(
"\^done,numchild=\"3\",children=\[child=\{name=\"var_complx\.i\",exp=\"i\",numchild=\"0\",type=\"int\",thread-id=\"1\",value=\"3\",has_more=\"0\"\},child=\{name=\"var_complx\.inner\",exp=\"inner\",numchild=\"1\",type=\"complex_type::\(anonymous struct\)\",thread-id=\"1\",value=\"\{\.\.\.\}\",has_more=\"0\"\},child=\{name=\"var_complx\.complex_ptr\",exp=\"complex_ptr\",numchild=\"3\",type=\"complex_type \*\",thread-id=\"1\",value=\"0x[0-9a-f]+\",has_more=\"0\"\}\],has_more=\"0\"")
self.runCmd("-var-list-children --simple-values var_complx_array")
self.expect(
"\^done,numchild=\"2\",children=\[child=\{name=\"var_complx_array\.\[0\]\",exp=\"\[0\]\",numchild=\"3\",type=\"complex_type\",thread-id=\"1\",has_more=\"0\"\},child=\{name=\"var_complx_array\.\[1\]\",exp=\"\[1\]\",numchild=\"3\",type=\"complex_type\",thread-id=\"1\",has_more=\"0\"\}\],has_more=\"0\"")
self.runCmd("-var-list-children 0 var_pcomplx")
self.expect(
"\^done,numchild=\"2\",children=\[child=\{name=\"var_pcomplx\.complex_type\",exp=\"complex_type\",numchild=\"3\",type=\"complex_type\",thread-id=\"1\",has_more=\"0\"\},child={name=\"var_pcomplx\.complx\",exp=\"complx\",numchild=\"3\",type=\"complex_type\",thread-id=\"1\",has_more=\"0\"\}\],has_more=\"0\"")
# Test that -var-list-children lists children without values
self.runCmd("-var-list-children 0 var_complx 0 1")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_complx\.i\",exp=\"i\",numchild=\"0\",type=\"int\",thread-id=\"1\",has_more=\"0\"\}\],has_more=\"1\"")
self.runCmd("-var-list-children --no-values var_complx 0 1")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_complx\.i\",exp=\"i\",numchild=\"0\",type=\"int\",thread-id=\"1\",has_more=\"0\"\}\],has_more=\"1\"")
self.runCmd("-var-list-children --no-values var_complx_array 0 1")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_complx_array\.\[0\]\",exp=\"\[0\]\",numchild=\"3\",type=\"complex_type\",thread-id=\"1\",has_more=\"0\"\}\],has_more=\"1\"")
self.runCmd("-var-list-children --no-values var_pcomplx 0 1")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_pcomplx\.complex_type\",exp=\"complex_type\",numchild=\"3\",type=\"complex_type\",thread-id=\"1\",has_more=\"0\"\}\],has_more=\"1\"")
# Test that -var-list-children lists children with all values
self.runCmd("-var-list-children 1 var_complx 1 2")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_complx\.inner\",exp=\"inner\",numchild=\"1\",type=\"complex_type::\(anonymous struct\)\",thread-id=\"1\",value=\"\{\.\.\.\}\",has_more=\"0\"\}\],has_more=\"1\"")
self.runCmd("-var-list-children --all-values var_complx 1 2")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_complx\.inner\",exp=\"inner\",numchild=\"1\",type=\"complex_type::\(anonymous struct\)\",thread-id=\"1\",value=\"\{\.\.\.\}\",has_more=\"0\"\}\],has_more=\"1\"")
self.runCmd("-var-list-children --all-values var_complx_array 1 2")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_complx_array\.\[1\]\",exp=\"\[1\]\",numchild=\"3\",type=\"complex_type\",thread-id=\"1\",value=\"\{\.\.\.\}\",has_more=\"0\"\}\],has_more=\"0\"")
self.runCmd("-var-list-children --all-values var_pcomplx 1 2")
self.expect(
"\^done,numchild=\"1\",children=\[child={name=\"var_pcomplx\.complx\",exp=\"complx\",numchild=\"3\",type=\"complex_type\",thread-id=\"1\",value=\"\{\.\.\.\}\",has_more=\"0\"\}\],has_more=\"0\"")
# Test that -var-list-children lists children with simple values
self.runCmd("-var-list-children 2 var_complx 2 4")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_complx\.complex_ptr\",exp=\"complex_ptr\",numchild=\"3\",type=\"complex_type \*\",thread-id=\"1\",has_more=\"0\"\}\],has_more=\"0\"")
self.runCmd("-var-list-children --simple-values var_complx 2 4")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_complx\.complex_ptr\",exp=\"complex_ptr\",numchild=\"3\",type=\"complex_type \*\",thread-id=\"1\",has_more=\"0\"\}\],has_more=\"0\"")
self.runCmd("-var-list-children --simple-values var_complx_array 2 4")
self.expect("\^done,numchild=\"0\",has_more=\"0\"")
self.runCmd("-var-list-children --simple-values var_pcomplx 2 4")
self.expect("\^done,numchild=\"0\",has_more=\"0\"")
# Test that an invalid from is handled
# FIXME: -1 is treated as unsigned int
self.runCmd("-var-list-children 0 var_complx -1 0")
#self.expect("\^error,msg=\"Command 'var-list-children'\. Variable children range invalid\"")
# Test that an invalid to is handled
# FIXME: -1 is treated as unsigned int
self.runCmd("-var-list-children 0 var_complx 0 -1")
#self.expect("\^error,msg=\"Command 'var-list-children'\. Variable children range invalid\"")
# Test that a missing low-frame or high-frame is handled
self.runCmd("-var-list-children 0 var_complx 0")
self.expect(
"\^error,msg=\"Command 'var-list-children'. Variable children range invalid\"")
@skipIfWindows # llvm.org/pr24452: Get lldb-mi working on Windows
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
@skipIfLinux # llvm.org/pr22841: lldb-mi tests fail on all Linux buildbots
@skipIfRemote # We do not currently support remote debugging via the MI.
@skipIfDarwin
def test_lldbmi_var_create_for_stl_types(self):
"""Test that 'lldb-mi --interpreter' print summary for STL types."""
self.spawnLldbMi(args=None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to BP_gdb_set_show_print_char_array_as_string_test
line = line_number('main.cpp', '// BP_cpp_stl_types_test')
self.runCmd("-break-insert main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test for std::string
self.runCmd("-var-create - * std_string")
self.expect(
'\^done,name="var\d+",numchild="[0-9]+",value="\\\\"hello\\\\"",type="std::[\S]*?string",thread-id="1",has_more="0"')
@skipIfWindows # llvm.org/pr24452: Get lldb-mi working on Windows
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
@skipIfLinux # llvm.org/pr22841: lldb-mi tests fail on all Linux buildbots
@skipIfRemote # We do not currently support remote debugging via the MI.
@skipIfDarwin
def test_lldbmi_var_create_for_unnamed_objects(self):
"""Test that 'lldb-mi --interpreter' can expand unnamed structures and unions."""
self.spawnLldbMi(args=None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to breakpoint
line = line_number('main.cpp', '// BP_unnamed_objects_test')
self.runCmd("-break-insert main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Evaluate struct_with_unions type and its children
self.runCmd("-var-create v0 * swu")
self.expect(
'\^done,name="v0",numchild="2",value="\{\.\.\.\}",type="struct_with_unions",thread-id="1",has_more="0"')
self.runCmd("-var-list-children v0")
# inspect the first unnamed union
self.runCmd("-var-list-children v0.$0")
self.runCmd("-var-evaluate-expression v0.$0.u_i")
self.expect('\^done,value="1"')
# inspect the second unnamed union
self.runCmd("-var-list-children v0.$1")
self.runCmd("-var-evaluate-expression v0.$1.u1")
self.expect('\^done,value="-1"')
# inspect unnamed structure
self.runCmd("-var-list-children v0.$1.$1")
self.runCmd("-var-evaluate-expression v0.$1.$1.s1")
self.expect('\^done,value="-1"')
|
youtube/cobalt
|
third_party/llvm-project/lldb/packages/Python/lldbsuite/test/tools/lldb-mi/variable/TestMiVar.py
|
Python
|
bsd-3-clause
| 24,254
|
from django.test import TestCase
from api.models import UsernameSnippet
class TestUsernameSnippet(TestCase):
@classmethod
def setUpTestData(cls):
UsernameSnippet.objects.create(available=True)
def test_existence(self):
u = UsernameSnippet.objects.first()
self.assertIsInstance(u, UsernameSnippet)
self.assertEqual(u.available, True)
def test_field_types(self):
u = UsernameSnippet.objects.first()
self.assertIsInstance(u.available, bool)
|
jeremyphilemon/uniqna
|
api/tests/test_models.py
|
Python
|
bsd-3-clause
| 459
|
class AjaxTemplateMixin(object):
ajax_template_name = ''
template_name = ''
def dispatch(self, request, *args, **kwargs):
if not hasattr(self, 'ajax_template_name'):
split = self.template_name.split('.html')
split[-1] = '_inner'
split.append('.html')
self.ajax_template_name = ''.join(split)
if request.is_ajax():
self.template_name = self.ajax_template_name
return super(AjaxTemplateMixin, self).dispatch(request, *args, **kwargs)
|
savioabuga/phoenix
|
phoenix/utils/view_utils.py
|
Python
|
bsd-3-clause
| 528
|
from ..libmp.backend import xrange
from .calculus import defun
#----------------------------------------------------------------------------#
# Polynomials #
#----------------------------------------------------------------------------#
# XXX: extra precision
@defun
def polyval(ctx, coeffs, x, derivative=False):
r"""
Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`,
:func:`~mpmath.polyval` evaluates the polynomial
.. math ::
P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0.
If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously
evaluates `P(x)` with the derivative, `P'(x)`, and returns the
tuple `(P(x), P'(x))`.
>>> from mpmath import *
>>> mp.pretty = True
>>> polyval([3, 0, 2], 0.5)
2.75
>>> polyval([3, 0, 2], 0.5, derivative=True)
(2.75, 3.0)
The coefficients and the evaluation point may be any combination
of real or complex numbers.
"""
if not coeffs:
return ctx.zero
p = ctx.convert(coeffs[0])
q = ctx.zero
for c in coeffs[1:]:
if derivative:
q = p + x*q
p = c + x*p
if derivative:
return p, q
else:
return p
@defun
def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10,
error=False, roots_init=None):
"""
Computes all roots (real or complex) of a given polynomial.
The roots are returned as a sorted list, where real roots appear first
followed by complex conjugate roots as adjacent elements. The polynomial
should be given as a list of coefficients, in the format used by
:func:`~mpmath.polyval`. The leading coefficient must be nonzero.
With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)*
where *err* is an estimate of the maximum error among the computed roots.
**Examples**
Finding the three real roots of `x^3 - x^2 - 14x + 24`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint(polyroots([1,-1,-14,24]), 4)
[-4.0, 2.0, 3.0]
Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an
error estimate::
>>> roots, err = polyroots([4,3,2], error=True)
>>> for r in roots:
... print(r)
...
(-0.375 + 0.59947894041409j)
(-0.375 - 0.59947894041409j)
>>>
>>> err
2.22044604925031e-16
>>>
>>> polyval([4,3,2], roots[0])
(2.22044604925031e-16 + 0.0j)
>>> polyval([4,3,2], roots[1])
(2.22044604925031e-16 + 0.0j)
The following example computes all the 5th roots of unity; that is,
the roots of `x^5 - 1`::
>>> mp.dps = 20
>>> for r in polyroots([1, 0, 0, 0, 0, -1]):
... print(r)
...
1.0
(-0.8090169943749474241 + 0.58778525229247312917j)
(-0.8090169943749474241 - 0.58778525229247312917j)
(0.3090169943749474241 + 0.95105651629515357212j)
(0.3090169943749474241 - 0.95105651629515357212j)
**Precision and conditioning**
The roots are computed to the current working precision accuracy. If this
accuracy cannot be achieved in ``maxsteps`` steps, then a
``NoConvergence`` exception is raised. The algorithm internally is using
the current working precision extended by ``extraprec``. If
``NoConvergence`` was raised, that is caused either by not having enough
extra precision to achieve convergence (in which case increasing
``extraprec`` should fix the problem) or too low ``maxsteps`` (in which
case increasing ``maxsteps`` should fix the problem), or a combination of
both.
The user should always do a convergence study with regards to
``extraprec`` to ensure accurate results. It is possible to get
convergence to a wrong answer with too low ``extraprec``.
Provided there are no repeated roots, :func:`~mpmath.polyroots` can
typically compute all roots of an arbitrary polynomial to high precision::
>>> mp.dps = 60
>>> for r in polyroots([1, 0, -10, 0, 1]):
... print(r)
...
-3.14626436994197234232913506571557044551247712918732870123249
-0.317837245195782244725757617296174288373133378433432554879127
0.317837245195782244725757617296174288373133378433432554879127
3.14626436994197234232913506571557044551247712918732870123249
>>>
>>> sqrt(3) + sqrt(2)
3.14626436994197234232913506571557044551247712918732870123249
>>> sqrt(3) - sqrt(2)
0.317837245195782244725757617296174288373133378433432554879127
**Algorithm**
:func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which
uses complex arithmetic to locate all roots simultaneously.
The Durand-Kerner method can be viewed as approximately performing
simultaneous Newton iteration for all the roots. In particular,
the convergence to simple roots is quadratic, just like Newton's
method.
Although all roots are internally calculated using complex arithmetic, any
root found to have an imaginary part smaller than the estimated numerical
error is truncated to a real number (small real parts are also chopped).
Real roots are placed first in the returned list, sorted by value. The
remaining complex roots are sorted by their real parts so that conjugate
roots end up next to each other.
**References**
1. http://en.wikipedia.org/wiki/Durand-Kerner_method
"""
if len(coeffs) <= 1:
if not coeffs or not coeffs[0]:
raise ValueError("Input to polyroots must not be the zero polynomial")
# Constant polynomial with no roots
return []
orig = ctx.prec
tol = +ctx.eps
with ctx.extraprec(extraprec):
deg = len(coeffs) - 1
# Must be monic
lead = ctx.convert(coeffs[0])
if lead == 1:
coeffs = [ctx.convert(c) for c in coeffs]
else:
coeffs = [c/lead for c in coeffs]
f = lambda x: ctx.polyval(coeffs, x)
if roots_init is None:
roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)]
else:
roots = [None]*deg;
deg_init = min(deg, len(roots_init))
roots[:deg_init] = list(roots_init[:deg_init])
roots[deg_init:] = [ctx.mpc((0.4+0.9j)**n) for n
in xrange(deg_init,deg)]
err = [ctx.one for n in xrange(deg)]
# Durand-Kerner iteration until convergence
for step in xrange(maxsteps):
if abs(max(err)) < tol:
break
for i in xrange(deg):
p = roots[i]
x = f(p)
for j in range(deg):
if i != j:
try:
x /= (p-roots[j])
except ZeroDivisionError:
continue
roots[i] = p - x
err[i] = abs(x)
if abs(max(err)) >= tol:
raise ctx.NoConvergence("Didn't converge in maxsteps=%d steps." \
% maxsteps)
# Remove small real or imaginary parts
if cleanup:
for i in xrange(deg):
if abs(roots[i]) < tol:
roots[i] = ctx.zero
elif abs(ctx._im(roots[i])) < tol:
roots[i] = roots[i].real
elif abs(ctx._re(roots[i])) < tol:
roots[i] = roots[i].imag * 1j
roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x)))
if error:
err = max(err)
err = max(err, ctx.ldexp(1, -orig+1))
return [+r for r in roots], +err
else:
return [+r for r in roots]
|
JensGrabner/mpmath
|
mpmath/calculus/polynomials.py
|
Python
|
bsd-3-clause
| 7,877
|
from __future__ import division, absolute_import, print_function
import sys
import gzip
import os
import threading
from tempfile import mkstemp, NamedTemporaryFile
import time
import warnings
import gc
from io import BytesIO
from datetime import datetime
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import (ConverterError, ConverterLockError,
ConversionWarning)
from numpy.compat import asbytes, asbytes_nested, bytes, asstr
from nose import SkipTest
from numpy.ma.testutils import (
TestCase, assert_equal, assert_array_equal,
assert_raises, assert_raises_regex, run_module_suite
)
from numpy.testing import assert_warns, assert_, build_err_msg
from numpy.testing.utils import tempdir
class TextIO(BytesIO):
"""Helper IO class.
Writes encode strings to bytes if needed, reads return bytes.
This makes it easier to emulate files opened in binary mode
without needing to explicitly convert strings to bytes in
setting up the test data.
"""
def __init__(self, s=""):
BytesIO.__init__(self, asbytes(s))
def write(self, s):
BytesIO.write(self, asbytes(s))
def writelines(self, lines):
BytesIO.writelines(self, [asbytes(s) for s in lines])
MAJVER, MINVER = sys.version_info[:2]
IS_64BIT = sys.maxsize > 2**32
def strptime(s, fmt=None):
"""This function is available in the datetime module only
from Python >= 2.5.
"""
if sys.version_info[0] >= 3:
return datetime(*time.strptime(s.decode('latin1'), fmt)[:3])
else:
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest(object):
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
target_file = NamedTemporaryFile(delete=False)
load_file = target_file.name
else:
target_file = BytesIO()
load_file = target_file
try:
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
finally:
if not isinstance(target_file, BytesIO):
target_file.close()
# holds an open file descriptor so it can't be deleted on win
if not isinstance(arr_reloaded, np.lib.npyio.NpzFile):
os.remove(target_file.name)
def check_roundtrips(self, a):
self.roundtrip(a)
self.roundtrip(a, file_on_disk=True)
self.roundtrip(np.asfortranarray(a))
self.roundtrip(np.asfortranarray(a), file_on_disk=True)
if a.shape[0] > 1:
# neither C nor Fortran contiguous for 2D arrays or more
self.roundtrip(np.asfortranarray(a)[1:])
self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True)
def test_array(self):
a = np.array([], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], int)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.check_roundtrips(a)
def test_array_object(self):
if sys.version_info[:2] >= (2, 7):
a = np.array([], object)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], object)
self.check_roundtrips(a)
# Fails with UnpicklingError: could not find MARK on Python 2.6
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@np.testing.dec.knownfailureif(sys.platform == 'win32', "Fail on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
a = np.asfortranarray([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.check_roundtrips(a)
def test_format_2_0(self):
dt = [(("%d" % i) * 100, float) for i in range(500)]
a = np.ones(1000, dtype=dt)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', UserWarning)
self.check_roundtrips(a)
class TestSaveLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype)
assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
class TestSavezLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
try:
for n, arr in enumerate(self.arr):
reloaded = self.arr_reloaded['arr_%d' % n]
assert_equal(arr, reloaded)
assert_equal(arr.dtype, reloaded.dtype)
assert_equal(arr.flags.fnc, reloaded.flags.fnc)
finally:
# delete tempfile, must be done here on windows
if self.arr_reloaded.fid:
self.arr_reloaded.fid.close()
os.remove(self.arr_reloaded.fid.name)
@np.testing.dec.skipif(not IS_64BIT, "Works only with 64bit systems")
@np.testing.dec.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
a = np.empty(L, dtype=np.uint8)
with tempdir(prefix="numpy_test_big_arrays_") as tmpdir:
tmp = os.path.join(tmpdir, "file.npz")
np.savez(tmp, a=a)
del a
npfile = np.load(tmp)
a = npfile['a']
npfile.close()
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a, b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
def test_BagObj(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(sorted(dir(l.f)), ['file_a','file_b'])
assert_equal(a, l.f.file_a)
assert_equal(b, l.f.file_b)
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError as err:
error_list.append(err)
finally:
os.remove(tmp)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
def test_not_closing_opened_fid(self):
# Test that issue #2178 is fixed:
# verify could seek on 'loaded' file
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
fp = open(tmp, 'wb')
np.savez(fp, data='LOVELY LOAD')
fp.close()
fp = open(tmp, 'rb', 10000)
fp.seek(0)
assert_(not fp.closed)
_ = np.load(fp)['data']
assert_(not fp.closed)
# must not get closed by .load(opened fp)
fp.seek(0)
assert_(not fp.closed)
finally:
fp.close()
os.remove(tmp)
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
fp = open(tmp, 'wb')
np.savez(fp, data='LOVELY LOAD')
fp.close()
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings. Because ResourceWarning
# is unknown in Python < 3.x, we take the easy way out and
# catch all warnings.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
finally:
os.remove(tmp)
def test_closing_zipfile_after_load(self):
# Check that zipfile owns file and can close it.
# This needs to pass a file name to load for the
# test.
with tempdir(prefix="numpy_test_closing_zipfile_after_load_") as tmpdir:
fd, tmp = mkstemp(suffix='.npz', dir=tmpdir)
os.close(fd)
np.savez(tmp, lab='place holder')
data = np.load(tmp)
fp = data.zip.fp
data.close()
assert_(fp.closed)
class TestSaveTxt(TestCase):
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = BytesIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
a = np.array([[1, 2], [3, 4]], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overiden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Bad fmt, should raise a ValueError
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, a, fmt=99)
def test_header_footer(self):
"""
Test the functionality of the header and footer keyword argument.
"""
c = BytesIO()
a = np.array([(1, 2), (3, 4)], dtype=np.int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
# Test the footer keyword argument
c = BytesIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
header=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
footer=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
def test_file_roundtrip(self):
f, name = mkstemp()
os.close(f)
try:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
finally:
os.unlink(name)
def test_complex_arrays(self):
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re + 1.0j * im
# One format only
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
# One format for each real and imaginary part
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
# One format for each complex number
c = BytesIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
def test_custom_writer(self):
class CustomWriter(list):
def write(self, text):
self.extend(text.split(b'\n'))
w = CustomWriter()
a = np.array([(1, 2), (3, 4)])
np.savetxt(w, a)
b = np.loadtxt(w)
assert_array_equal(a, b)
class TestLoadTxt(TestCase):
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = TextIO()
d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=np.int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = TextIO()
c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = TextIO()
c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = TextIO()
c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = TextIO()
c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments='#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_skiprows(self):
c = TextIO()
c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = TextIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(arr['stid'], [b"JOE", b"BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
def test_fancy_dtype(self):
c = TextIO()
c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0,
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])],
dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
c = TextIO()
x = np.loadtxt(c)
assert_equal(x.shape, (0,))
x = np.loadtxt(c, dtype=np.int64)
assert_equal(x.shape, (0,))
assert_(x.dtype == np.int64)
def test_unused_converter(self):
c = TextIO()
c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
def test_uint64_type(self):
tgt = (9223372043271415339, 9223372043271415853)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.uint64)
assert_equal(res, tgt)
def test_int64_type(self):
tgt = (-9223372036854775807, 9223372036854775807)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
def test_universal_newline(self):
f, name = mkstemp()
os.write(f, b'1 21\r3 42\r')
os.close(f)
try:
data = np.loadtxt(name)
assert_array_equal(data, [[1, 21], [3, 42]])
finally:
os.unlink(name)
def test_empty_field_after_tab(self):
c = TextIO()
c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
c.seek(0)
dt = {'names': ('x', 'y', 'z', 'comment'),
'formats': ('<i4', '<i4', '<f4', '|S8')}
x = np.loadtxt(c, dtype=dt, delimiter='\t')
a = np.array([b'start ', b' ', b''])
assert_array_equal(x['comment'], a)
def test_structure_unpack(self):
txt = TextIO("M 21 72\nF 35 58")
dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
assert_(a.dtype.str == '|S1')
assert_(b.dtype.str == '<i4')
assert_(c.dtype.str == '<f4')
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([72., 58.]))
def test_ndmin_keyword(self):
c = TextIO()
c.write('1,2,3\n4,5,6')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=3)
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
a = np.array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(x, a)
d = TextIO()
d.write('0,1,2')
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (1, 3))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
e = TextIO()
e.write('0\n1\n2')
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (3, 1))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
# Test ndmin kw with empty file.
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
f = TextIO()
assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
assert_(np.loadtxt(f, ndmin=1).shape == (0,))
def test_generator_source(self):
def count():
for i in range(10):
yield "%d" % i
res = np.loadtxt(count())
assert_array_equal(res, np.arange(10))
def test_bad_line(self):
c = TextIO()
c.write('1 2 3\n4 5 6\n2 3')
c.seek(0)
# Check for exception and that exception contains line number
assert_raises_regex(ValueError, "3", np.loadtxt, c)
class Testfromregex(TestCase):
# np.fromregex expects files opened in binary mode.
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
#####--------------------------------------------------------------------------
class TestFromTxt(TestCase):
#
def test_record(self):
"Test w/ explicit dtype"
data = TextIO('1 2\n3 4')
# data.seek(0)
test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = TextIO('M 64.0 75.0\nF 25.0 60.0')
# data.seek(0)
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.ndfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
"Test outputing a standard ndarray"
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
"Test squeezing to 1D"
control = np.array([1, 2, 3, 4], int)
#
data = TextIO('1\n2\n3\n4\n')
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = TextIO('1,2,3,4\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
"Test the stripping of comments"
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = TextIO('# comment\n1,2,3,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = TextIO('1,2,3,5# comment\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
"Test row skipping"
control = np.array([1, 2, 3, 5], int)
kwargs = dict(dtype=int, delimiter=',')
#
data = TextIO('comment\n1,2,3,5\n')
test = np.ndfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = TextIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = ["# %i" % i for i in range(1, 6)]
data.append("A, B, C")
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
TextIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(TextIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(
TextIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
def test_header(self):
"Test retrieving a header"
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
test = np.ndfromtxt(data, dtype=None, names=True)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
"Test the automatic definition of the output dtype"
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
test = np.ndfromtxt(data, dtype=None)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
"Tests whether the output dtype can be uniformized"
data = TextIO('1 2 3 4\n5 6 7 8\n')
test = np.ndfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
"Check that a nested dtype isn't MIA"
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
"Test overwriting the names of the dtype"
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.ndfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
"Check that names can be retrieved even if the line is commented out."
data = TextIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
test = np.genfromtxt(data, names=True, dtype=None)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = TextIO(b"""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
test = np.genfromtxt(data, names=True, dtype=None)
assert_equal(test, ctrl)
def test_autonames_and_usecols(self):
"Tests names and usecols"
data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
"Test the combination user-defined converters and usecol"
data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
"Tests names and usecols"
data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None, converters={'C': lambda s: 2 * int(s)})
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
"Test the conversion to datetime."
converter = {
'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
"Test the conversion to datetime64."
converter = {
'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
"Test whether unused converters are forgotten"
data = TextIO("1 21\n 3 42\n")
test = np.ndfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.ndfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or
(b'r' not in x.lower() and x.strip() or 0.0))
strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or
(b'%' not in x.lower() and x.strip() or 0.0))
s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(
converters={2: strip_per, 3: strip_rand}, delimiter=",",
dtype=None)
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
"Test some corner case"
s = TextIO('q1,2\nq3,4')
cnv = lambda s: float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0: cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_converters_and_usecols(self):
dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n"
dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3}
dtyp = [('E1','i4'),('E2','i4'),('E3','i2'),('N', 'i1')]
conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
names=None, converters=conv)
control = np.rec.array([[1,5,-1,0], [2,8,-1,1], [3,3,-2,3]], dtype=dtyp)
assert_equal(test, control)
dtyp = [('E1','i4'),('E2','i4'),('N', 'i1')]
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
usecols=(0,1,3), names=None, converters=conv)
control = np.rec.array([[1,5,0], [2,8,1], [3,3,3]], dtype=dtyp)
assert_equal(test, control)
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
#
ndtype = [('nest', [('idx', int), ('code', np.object)])]
try:
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
except NotImplementedError:
pass
else:
errmsg = "Nested dtype involving objects should be supported."
raise AssertionError(errmsg)
def test_userconverters_with_explicit_dtype(self):
"Test user_converters w/ explicit (standard) dtype"
data = TextIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
"Test space delimiter"
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
test = np.ndfromtxt(data)
control = np.array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
"Test using an integer for delimiter"
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(TextIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = TextIO('1,2,3,,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
"Test w/ a delimiter tab"
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(TextIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
"Test the selection of columns"
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
"Test giving usecols with a comma-separated string"
data = "1 2 3\n4 5 6"
test = np.genfromtxt(TextIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
"Test usecols with an explicit structured dtype"
data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.ndfromtxt(
data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
"Test usecols with an integer"
test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
"Test usecols with named columns"
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
"Test that an empty file raises the proper warning."
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="genfromtxt: Empty input file:")
data = TextIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
def test_fancy_dtype_alt(self):
"Check that a nested dtype isn't MIA"
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.mafromtxt(data, dtype=fancydtype, delimiter=',')
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.ndfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = TextIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.mafromtxt(data, dtype=None, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.mafromtxt(data, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.float), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.mafromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.mafromtxt(TextIO(data),
missing_values={0: -9, 1: -99, 2: -999j}, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.mafromtxt(TextIO(data),
missing_values={0: -9, 'B': -99, 'C': -999j},
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
"Test with missing and filling values"
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0: "N/A", 'b': " ", 2: "???"},
filling_values={0: 0, 'b': 0, 2: -999})
test = np.genfromtxt(TextIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
data2 = "1,2,*,4\n5,*,7,8\n"
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=0)
ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]])
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=-1)
ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = TextIO('A,B\n0,1.5\n2,-999.00')
test = np.mafromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True,)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
"Test masked column"
data = TextIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
"Test masked column"
data = TextIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
"Test invalid raise"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
#
kwargs = dict(delimiter=",", dtype=None, names=True)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, invalid_raise=False, **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.ndfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
"Test invalid_raise with usecols"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, usecols=(0, 4), **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.ndfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
"Test inconsistent dtype"
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
converters = {4: lambda x: "(%s)" % x}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
"Test default format"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
"Test single dtype w/o names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
"Test single dtype w explicit names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
"Test single dtype w implicit names"
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
"Test easy structured dtype"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
"Test autostrip"
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
mtest = np.ndfromtxt(TextIO(data), **kwargs)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
"Test the 'replace_space' option"
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
"Test w/ incomplete names"
data = "A,,C\n0,1,2\n3,4,5"
kwargs = dict(delimiter=",", names=True)
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
"Make sure that names are properly completed"
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(TextIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
"Make sure we pick up the right names w/ usecols"
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
"Test fix-width w/ names"
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
"Test missing values"
data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values=-999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_equal(test[1], b'testNonetherestofthedata')
test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_equal(test[1], b' testNonetherestofthedata')
def test_recfromtxt(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', np.int), ('b', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,3')
dtype = [('a', np.int), ('b', np.float)]
test = np.recfromcsv(data, missing_values='N/A', dtype=dtype)
control = np.array([(0, 1), (2, 3)],
dtype=dtype)
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file object
wanted = np.arange(6).reshape((2, 3))
if sys.version_info[0] >= 3:
# python 3k is known to fail for '\r'
linesep = ('\n', '\r\n')
else:
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
f, name = mkstemp()
# We can't use NamedTemporaryFile on windows, because we cannot
# reopen the file.
try:
os.write(f, asbytes(data))
assert_array_equal(np.genfromtxt(name), wanted)
finally:
os.close(f)
os.unlink(name)
def test_gft_using_generator(self):
# gft doesn't work with unicode.
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
def test_gzip_load():
a = np.random.random((5, 5))
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokeness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = BytesIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(b'1 2 3\n')
g.close()
s.seek(0)
f, name = mkstemp(suffix='.gz')
try:
os.write(f, s.read())
s.close()
assert_array_equal(np.loadtxt(name), [1, 2, 3])
finally:
os.close(f)
os.unlink(name)
def test_gzip_loadtxt_from_string():
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(b'1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in z.keys())
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
gc.collect()
n_before = len(gc.get_objects())
np.load(f)
n_after = len(gc.get_objects())
assert_equal(n_before, n_after)
if __name__ == "__main__":
run_module_suite()
|
larsmans/numpy
|
numpy/lib/tests/test_io.py
|
Python
|
bsd-3-clause
| 66,065
|
#!/usr/bin/env python
import time,os,re,csv,sys,uuid,joblib
from datetime import date
import numpy as np
from sklearn import svm
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
def train_model(X,y,saved_model):
"""
function to train model
"""
## Perform a train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
## Specify parameters and model
params = {'C':1.0,'kernel':'linear','gamma':0.5}
clf = svm.SVC(**params,probability=True)
## fit model on training data
clf = clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(classification_report(y_test,y_pred))
## retrain using all data
clf.fit(X, y)
print("... saving model: {}".format(saved_model))
joblib.dump(clf,saved_model)
print(y_test[:5])
print(X_test[:5,:])
def _update_predict_log(y_pred,y_proba,query,runtime):
"""
update predict log file
"""
## name the logfile using something that cycles with date (day, month, year)
today = date.today()
logfile = "example-predict-{}-{}.log".format(today.year, today.month)
## write the data to a csv file
header = ['unique_id','timestamp','y_pred','y_proba','x_shape','model_version','runtime']
write_header = False
if not os.path.exists(logfile):
write_header = True
with open(logfile,'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='|')
if write_header:
writer.writerow(header)
to_write = map(str,[uuid.uuid4(),time.time(),y_pred,y_proba,query.shape,MODEL_VERSION,runtime])
writer.writerow(to_write)
def predict(query):
"""
generic function for prediction
"""
## start timer for runtime
time_start = time.time()
## ensure the model is loaded
model = joblib.load(saved_model)
## output checking
if len(query.shape) == 1:
query = query.reshape(1, -1)
## make prediction and gather data for log entry
y_pred = model.predict(query)
y_proba = None
if 'predict_proba' in dir(model) and model.probability == True:
y_proba = model.predict_proba(query)
m, s = divmod(time.time()-time_start, 60)
h, m = divmod(m, 60)
runtime = "%03d:%02d:%02d"%(h, m, s)
## update the log file
_update_predict_log(y_pred,y_proba,query,runtime)
return(y_pred)
if __name__ == "__main__":
## import some data to play with
iris = datasets.load_iris()
X = iris.data[:,:2]
y = iris.target
## train the model
MODEL_VERSION = 1.0
saved_model = "example-predict-{}.joblib".format(re.sub("\.","_",str(MODEL_VERSION)))
model = train_model(X,y,saved_model)
## example predict
query = np.array([[6.1,2.8]])
for query in [np.array([[6.1,2.8]]), np.array([[7.7,2.5]]), np.array([[5.8,3.8]])]:
y_pred = predict(query)
print("predicted: {}".format(y_pred))
|
ajrichards/bayesian-examples
|
python/howto-logging.py
|
Python
|
bsd-3-clause
| 3,052
|
"""
Compatibility helpers for older Python versions.
"""
import sys
PY2 = sys.version_info[0] == 2
|
praba230890/PYPOWER
|
pypower/_compat.py
|
Python
|
bsd-3-clause
| 103
|
I = complex(0,1)
ha2ev = 27.211396132
ev2cm1 = 8065.5440044136285
bohr2ang = 0.52917720859
atomic_mass = [ None, 1.00794, 4.002602, 6.941, 9.012182,
10.811, 12.0107, 14.0067, 15.9994, 18.9984032,
20.1797, 22.98976928, 24.305,26.9815386, 28.0855,
30.973762, 32.065, 35.453, 39.948, 39.0983,
40.078, 44.955912, 47.867, 50.9415, 51.9961,
54.938045, 55.845, 58.933195, 58.6934, 63.546,
65.38, 69.723, 72.64, 74.9216, 78.96,
79.904, 83.798, 85.4678, 87.62, 88.90585,
91.224, 92.90638, 95.96, None, 101.07,
102.9055, 106.42, 107.8682, 112.411, 114.818,
118.71, 121.76, 127.6, 126.90447, 131.293,
132.9054519, 137.327, 138.90547, 140.116, 140.90765,
144.242, None, 150.36, 151.964, 157.25,
158.92535, 162.5, 164.93032, 167.259, 168.93421,
173.054, 174.9668, 178.49, 180.94788, 183.84,
186.207, 190.23, 192.217, 195.084, 196.966569,
200.59, 204.3833, 207.2, 208.9804, None,
None, None, None, None, None,
232.03806, 231.03588, 238.02891, None, None,
None, None, None, None, None,
None, None, None, None, None,
None, None, None, None, None,
None, None, None, None, None,
None, None, None, None]
chemical_symbols = ['X', 'H', 'He', 'Li', 'Be',
'B', 'C', 'N', 'O', 'F',
'Ne', 'Na', 'Mg', 'Al', 'Si',
'P', 'S', 'Cl', 'Ar', 'K',
'Ca', 'Sc', 'Ti', 'V', 'Cr',
'Mn', 'Fe', 'Co', 'Ni', 'Cu',
'Zn', 'Ga', 'Ge', 'As', 'Se',
'Br', 'Kr', 'Rb', 'Sr', 'Y',
'Zr', 'Nb', 'Mo', 'Tc', 'Ru',
'Rh', 'Pd', 'Ag', 'Cd', 'In',
'Sn', 'Sb', 'Te', 'I', 'Xe',
'Cs', 'Ba', 'La', 'Ce', 'Pr',
'Nd', 'Pm', 'Sm', 'Eu', 'Gd',
'Tb', 'Dy', 'Ho', 'Er', 'Tm',
'Yb', 'Lu', 'Hf', 'Ta', 'W',
'Re', 'Os', 'Ir', 'Pt', 'Au',
'Hg', 'Tl', 'Pb', 'Bi', 'Po',
'At', 'Rn', 'Fr', 'Ra', 'Ac',
'Th', 'Pa', 'U', 'Np', 'Pu',
'Am', 'Cm', 'Bk', 'Cf', 'Es',
'Fm', 'Md', 'No', 'Lr']
|
alexmoratalla/yambopy
|
yambopy/units.py
|
Python
|
bsd-3-clause
| 2,898
|
from core_types import IncompatibleTypes, ImmutableT
class FnT(ImmutableT):
"""Type of a typed function"""
def __init__(self, input_types, return_type):
self.input_types = tuple(input_types)
self.return_type = return_type
self._hash = hash(self.input_types + (return_type,))
def __str__(self):
input_str = ", ".join(str(t) for t in self.input_types)
return "(%s)->%s" % (input_str, self.return_type)
def __repr__(self):
return str(self)
def __eq__(self, other):
return other.__class__ is FnT and \
self.return_type == other.return_type and \
len(self.input_types) == len(other.input_types) and \
all(t1 == t2 for (t1, t2) in
zip(self.input_types, other.input_types))
def combine(self, other):
if self == other:
return self
else:
raise IncompatibleTypes(self, other)
def __hash__(self):
return self._hash
_fn_type_cache = {}
def make_fn_type(input_types, return_type):
input_types = tuple(input_types)
key = input_types, return_type
if key in _fn_type_cache:
return _fn_type_cache[key]
else:
t = FnT(input_types, return_type)
_fn_type_cache[key] = t
return t
|
pombredanne/parakeet
|
parakeet/ndtypes/fn_type.py
|
Python
|
bsd-3-clause
| 1,204
|
#!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (C) 2015 Daniel Standage <daniel.standage@gmail.com>
#
# This file is part of tag (http://github.com/standage/tag) and is licensed
# under the BSD 3-clause license: see LICENSE.
# -----------------------------------------------------------------------------
"""Package-wide configuration"""
try:
import __builtin__ as builtins
except ImportError: # pragma: no cover
import builtins
from tag.comment import Comment
from tag.directive import Directive
from tag.feature import Feature
from tag.sequence import Sequence
from tag.range import Range
from tag.reader import GFF3Reader
from tag.writer import GFF3Writer
from tag.score import Score
from tag import bae
from tag import cli
from tag import index
from tag import locus
from tag import select
from tag import transcript
from gzip import open as gzopen
import sys
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
def open(filename, mode):
if mode not in ['r', 'w']:
raise ValueError('invalid mode "{}"'.format(mode))
if filename in ['-', None]: # pragma: no cover
filehandle = sys.stdin if mode == 'r' else sys.stdout
return filehandle
openfunc = builtins.open
if filename.endswith('.gz'):
openfunc = gzopen
mode += 't'
return openfunc(filename, mode)
|
standage/tag
|
tag/__init__.py
|
Python
|
bsd-3-clause
| 1,438
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import coverage
import all_tests
import atom.core
import atom.http_core
import atom.mock_http_core
import atom.auth
import atom.client
import gdata.gauth
import gdata.client
import gdata.data
import gdata.blogger.data
import gdata.blogger.client
import gdata.maps.data
import gdata.maps.client
import gdata.spreadsheets.data
from gdata.test_config import settings
# Ensure that coverage tests execute the live requests to the servers, but
# allow use of cached server responses to speed up repeated runs.
settings.RUN_LIVE_TESTS = True
settings.CLEAR_CACHE = False
def suite():
return unittest.TestSuite((atom_tests.core_test.suite(),))
if __name__ == '__main__':
coverage.erase()
coverage.start()
unittest.TextTestRunner().run(all_tests.suite())
coverage.stop()
coverage.report([atom.core, atom.http_core, atom.auth, atom.data,
atom.mock_http_core, atom.client, gdata.gauth, gdata.client,
gdata.core, gdata.data, gdata.blogger.data, gdata.blogger.client,
gdata.maps.data, gdata.maps.client, gdata.spreadsheets.data])
|
dekom/threepress-bookworm-read-only
|
bookworm/gdata/tests/all_tests_coverage.py
|
Python
|
bsd-3-clause
| 1,805
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# mavericks, yosemite, linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Browser types:
# android-webview-shell, android-content-shell, debug
#
# ANGLE renderer:
# d3d9, d3d11, opengl
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
#
# Sample usage in SetExpectations in subclasses:
# self.Fail('gl-enable-vertex-attrib.html',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
ANGLE_MODIFIERS = ['d3d9', 'd3d11', 'opengl']
BROWSER_TYPE_MODIFIERS = [
'android-webview-shell', 'android-content-shell', 'debug' ]
class _FlakyExpectation(object):
def __init__(self, expectation, max_num_retries):
self.expectation = expectation
self.max_num_retries = max_num_retries
class GpuTestExpectations(test_expectations.TestExpectations):
def __init__(self):
self._flaky_expectations = []
super(GpuTestExpectations, self).__init__()
def Flaky(self, url_pattern, conditions=None, bug=None, max_num_retries=2):
expectation = _FlakyExpectation(self.CreateExpectation(
'pass', url_pattern, conditions, bug), max_num_retries)
self._flaky_expectations.append(expectation)
def GetFlakyRetriesForPage(self, page, browser):
for fe in self._flaky_expectations:
e = fe.expectation
if self.ExpectationAppliesToPage(e, browser, page):
return fe.max_num_retries
return 0
def IsValidUserDefinedCondition(self, condition):
# Add support for d3d9, d3d11 and opengl-specific expectations.
if condition in ANGLE_MODIFIERS:
return True
# Add support for browser-type-specific expectations.
if condition in BROWSER_TYPE_MODIFIERS:
return True
return super(GpuTestExpectations,
self).IsValidUserDefinedCondition(condition)
def ModifiersApply(self, browser, expectation):
if not super(GpuTestExpectations, self).ModifiersApply(
browser, expectation):
return False
# We'll only get here if the OS and GPU matched the expectation.
# TODO(kbr): refactor _Expectation to be a public class so that
# the GPU-specific properties can be moved into a subclass, and
# run the unit tests from this directory on the CQ and the bots.
# crbug.com/495868 crbug.com/495870
# Check for presence of Android WebView.
browser_expectations = [x for x in expectation.user_defined_conditions
if x in BROWSER_TYPE_MODIFIERS]
browser_matches = ((not browser_expectations) or
browser.browser_type in browser_expectations)
if not browser_matches:
return False
angle_renderer = ''
gpu_info = None
if browser.supports_system_info:
gpu_info = browser.GetSystemInfo().gpu
if gpu_info and gpu_info.aux_attributes:
gl_renderer = gpu_info.aux_attributes.get('gl_renderer')
if gl_renderer:
if 'Direct3D11' in gl_renderer:
angle_renderer = 'd3d11'
elif 'Direct3D9' in gl_renderer:
angle_renderer = 'd3d9'
elif 'OpenGL' in gl_renderer:
angle_renderer = 'opengl'
angle_expectations = [x for x in expectation.user_defined_conditions
if x in ANGLE_MODIFIERS]
angle_matches = ((not angle_expectations) or
angle_renderer in angle_expectations)
return angle_matches
|
Just-D/chromium-1
|
content/test/gpu/gpu_tests/gpu_test_expectations.py
|
Python
|
bsd-3-clause
| 3,819
|
from __future__ import absolute_import, print_function
from datetime import timedelta
from django.utils import timezone
from freezegun import freeze_time
from sentry.models import CheckInStatus, Monitor, MonitorCheckIn, MonitorStatus, MonitorType
from sentry.testutils import APITestCase
@freeze_time("2019-01-01")
class CreateMonitorCheckInTest(APITestCase):
def test_passing(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org, members=[user])
project = self.create_project(teams=[team])
monitor = Monitor.objects.create(
organization_id=org.id,
project_id=project.id,
next_checkin=timezone.now() - timedelta(minutes=1),
type=MonitorType.CRON_JOB,
config={"schedule": "* * * * *"},
)
self.login_as(user=user)
with self.feature({"organizations:monitors": True}):
resp = self.client.post(
"/api/0/monitors/{}/checkins/".format(monitor.guid), data={"status": "ok"}
)
assert resp.status_code == 201, resp.content
checkin = MonitorCheckIn.objects.get(guid=resp.data["id"])
assert checkin.status == CheckInStatus.OK
monitor = Monitor.objects.get(id=monitor.id)
assert monitor.status == MonitorStatus.OK
assert monitor.last_checkin == checkin.date_added
assert monitor.next_checkin == monitor.get_next_scheduled_checkin(checkin.date_added)
def test_failing(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org, members=[user])
project = self.create_project(teams=[team])
monitor = Monitor.objects.create(
organization_id=org.id,
project_id=project.id,
next_checkin=timezone.now() - timedelta(minutes=1),
type=MonitorType.CRON_JOB,
config={"schedule": "* * * * *"},
)
self.login_as(user=user)
with self.feature({"organizations:monitors": True}):
resp = self.client.post(
"/api/0/monitors/{}/checkins/".format(monitor.guid), data={"status": "error"}
)
assert resp.status_code == 201, resp.content
checkin = MonitorCheckIn.objects.get(guid=resp.data["id"])
assert checkin.status == CheckInStatus.ERROR
monitor = Monitor.objects.get(id=monitor.id)
assert monitor.status == MonitorStatus.ERROR
assert monitor.last_checkin == checkin.date_added
assert monitor.next_checkin == monitor.get_next_scheduled_checkin(checkin.date_added)
def test_disabled(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org, members=[user])
project = self.create_project(teams=[team])
monitor = Monitor.objects.create(
organization_id=org.id,
project_id=project.id,
next_checkin=timezone.now() - timedelta(minutes=1),
type=MonitorType.CRON_JOB,
status=MonitorStatus.DISABLED,
config={"schedule": "* * * * *"},
)
self.login_as(user=user)
with self.feature({"organizations:monitors": True}):
resp = self.client.post(
"/api/0/monitors/{}/checkins/".format(monitor.guid), data={"status": "error"}
)
assert resp.status_code == 201, resp.content
checkin = MonitorCheckIn.objects.get(guid=resp.data["id"])
assert checkin.status == CheckInStatus.ERROR
monitor = Monitor.objects.get(id=monitor.id)
assert monitor.status == MonitorStatus.DISABLED
assert monitor.last_checkin == checkin.date_added
assert monitor.next_checkin == monitor.get_next_scheduled_checkin(checkin.date_added)
def test_pending_deletion(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org, members=[user])
project = self.create_project(teams=[team])
monitor = Monitor.objects.create(
organization_id=org.id,
project_id=project.id,
next_checkin=timezone.now() - timedelta(minutes=1),
type=MonitorType.CRON_JOB,
status=MonitorStatus.PENDING_DELETION,
config={"schedule": "* * * * *"},
)
self.login_as(user=user)
with self.feature({"organizations:monitors": True}):
resp = self.client.post(
"/api/0/monitors/{}/checkins/".format(monitor.guid), data={"status": "error"}
)
assert resp.status_code == 404, resp.content
def test_deletion_in_progress(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org, members=[user])
project = self.create_project(teams=[team])
monitor = Monitor.objects.create(
organization_id=org.id,
project_id=project.id,
next_checkin=timezone.now() - timedelta(minutes=1),
type=MonitorType.CRON_JOB,
status=MonitorStatus.DELETION_IN_PROGRESS,
config={"schedule": "* * * * *"},
)
self.login_as(user=user)
with self.feature({"organizations:monitors": True}):
resp = self.client.post(
"/api/0/monitors/{}/checkins/".format(monitor.guid), data={"status": "error"}
)
assert resp.status_code == 404, resp.content
|
mvaled/sentry
|
tests/sentry/api/endpoints/test_monitor_checkins.py
|
Python
|
bsd-3-clause
| 5,664
|
#!/usr/bin/env python
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
command = testshade("-g 128 128 -od uint8 -o Cout out.tif test")
outputs = [ "out.txt", "out.tif" ]
|
imageworks/OpenShadingLanguage
|
testsuite/hashnoise/run.py
|
Python
|
bsd-3-clause
| 294
|
import json
from os.path import join
from django.conf import settings
from django.core.management.base import NoArgsCommand
from program.models import ProgramSlot
class Command(NoArgsCommand):
help = 'checks the automation_ids used by program slots against the exported'
def handle_noargs(self, **options):
cache_dir = getattr(settings, 'AUTOMATION_CACHE_DIR', 'cache')
cached_shows = join(cache_dir, 'shows.json')
with open(cached_shows) as shows_json:
shows = json.loads(shows_json.read())
rd_ids = {}
for show in shows['shows']:
rd_ids[show['id']] = show
for show in shows['multi-shows']:
rd_ids[show['id']] = show
pv_ids = []
for programslot in ProgramSlot.objects.filter(automation_id__isnull=False):
pv_ids.append(int(programslot.automation_id))
for automation_id in sorted(rd_ids.iterkeys()):
if rd_ids[automation_id]['type'] == 's':
continue
multi_id = -1
if 'multi' in rd_ids[automation_id]:
multi_id = rd_ids[automation_id]['multi']['id']
if automation_id not in pv_ids and multi_id not in pv_ids:
if multi_id < 0:
print '+ %d' % (automation_id)
else:
print '+ %d (%d)' % (automation_id, multi_id)
for automation_id in sorted(pv_ids):
if automation_id not in rd_ids:
print '-', automation_id
|
nnrcschmdt/helsinki
|
program/management/commands/check_automation_ids.py
|
Python
|
bsd-3-clause
| 1,616
|
#!/usr/bin/env pythonw
#from __future__ import print_function
import sys
import wx
import os
import matplotlib
if matplotlib.get_backend() != "WXAgg":
matplotlib.use("WXAgg")
import matplotlib.pyplot as plt
from pmagpy import pmagplotlib
import pmagpy.command_line_extractor as extractor
import pmagpy.ipmag as ipmag
import dialogs.pmag_widgets as pw
import dialogs.pmag_menu_dialogs as pmag_menu_dialogs
def main():
"""
NAME
core_depthplot.py
DESCRIPTION
plots various measurements versus core_depth or age. plots data flagged as 'FS-SS-C' as discrete samples.
SYNTAX
core_depthplot.py [command line options]
# or, for Anaconda users:
core_depthplot_anaconda [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input measurments format file
-fsum FILE: specify input LIMS database (IODP) core summary csv file
-fwig FILE: specify input depth,wiggle to plot, in magic format with sample_core_depth key for depth
-fsa FILE: specify input er_samples format file from magic for depth
-fa FILE: specify input ages format file from magic for age
NB: must have either -fsa OR -fa (not both)
-fsp FILE sym size: specify input zeq_specimen format file from magic, sym and size
NB: PCAs will have specified color, while fisher means will be white with specified color as the edgecolor
-fres FILE specify input pmag_results file from magic, sym and size
-LP [AF,T,ARM,IRM, X] step [in mT,C,mT,mT, mass/vol] to plot
-S do not plot blanket treatment data (if this is set, you don't need the -LP)
-sym SYM SIZE, symbol, size for continuous points (e.g., ro 5, bs 10, g^ 10 for red dot, blue square, green triangle), default is blue dot at 5 pt
-D do not plot declination
-M do not plot magnetization
-log plot magnetization on a log scale
-L do not connect dots with a line
-I do not plot inclination
-d min max [in m] depth range to plot
-n normalize by weight in er_specimen table
-Iex: plot the expected inc at lat - only available for results with lat info in file
-ts TS amin amax: plot the GPTS for the time interval between amin and amax (numbers in Ma)
TS: [ck95, gts04, gts12]
-ds [mbsf,mcd] specify depth scale, mbsf default
-fmt [svg, eps, pdf, png] specify output format for plot (default: svg)
-sav save plot silently
DEFAULTS:
Measurements file: measurements.txt
Samples file: samples.txt
NRM step
Summary file: none
"""
args = sys.argv
if '-h' in args:
print(main.__doc__)
sys.exit()
dataframe = extractor.command_line_dataframe([ ['f', False, 'measurements.txt'], ['fsum', False, ''],
['fwig', False, ''], ['fsa', False, ''],
['fa', False, ''], ['fsp', False, ''],
['fres', False, '' ], ['fmt', False, 'svg'],
['LP', False, ''], ['n', False, False],
['d', False, '-1 -1'], ['ts', False, ''],
['WD', False, '.'], ['L', False, True],
['S', False, True], ['D', False, True],
['I', False, True], ['M', False, True],
['log', False, 0],
['ds', False, 'sample_core_depth'],
['sym', False, 'bo 5'], ['ID', False, '.'],
['sav', False, False], ['DM', False, 3]])
checked_args = extractor.extract_and_check_args(args, dataframe)
meas_file, sum_file, wig_file, samp_file, age_file, spc_file, res_file, fmt, meth, norm, depth, timescale, dir_path, pltLine, pltSus, pltDec, pltInc, pltMag, logit, depth_scale, symbol, input_dir, save, data_model_num = extractor.get_vars(
['f', 'fsum', 'fwig', 'fsa', 'fa', 'fsp', 'fres', 'fmt', 'LP', 'n', 'd', 'ts', 'WD', 'L', 'S', 'D', 'I', 'M', 'log', 'ds', 'sym', 'ID', 'sav', 'DM'], checked_args)
# format some variables
# format symbol/size
try:
sym, size = symbol.split()
size = int(size)
except:
print('you should provide -sym in this format: ro 5')
print('using defaults instead')
sym, size = 'ro', 5
# format result file, symbol, size
if res_file:
try:
res_file, res_sym, res_size = res_file.split()
except:
print('you must provide -fres in this format: -fres filename symbol size')
print(
'could not parse {}, defaulting to using no result file'.format(res_file))
res_file, res_sym, res_size = '', '', 0
else:
res_file, res_sym, res_size = '', '', 0
# format specimen file, symbol, size
if spc_file:
try:
spc_file, spc_sym, spc_size = spc_file.split()
except:
print('you must provide -fsp in this format: -fsp filename symbol size')
print(
'could not parse {}, defaulting to using no specimen file'.format(spc_file))
spc_file, spc_sym, spc_size = '', '', 0
else:
spc_file, spc_sym, spc_size = '', '', 0
# format min/max depth
try:
dmin, dmax = depth.split()
except:
print('you must provide -d in this format: -d dmin dmax')
print('could not parse {}, defaulting to plotting all depths'.format(depth))
dmin, dmax = -1, -1
# format timescale, min/max time
if timescale:
try:
timescale, amin, amax = timescale.split()
pltTime = True
except:
print(
'you must provide -ts in this format: -ts timescale minimum_age maximum_age')
print(
'could not parse {}, defaulting to using no timescale'.format(timescale))
timescale, amin, amax = None, -1, -1
pltTime = False
else:
timescale, amin, amax = None, -1, -1
pltTime = False
# format norm and wt_file
if norm and not isinstance(norm, bool):
wt_file = norm
norm = True
else:
norm = False
wt_file = ''
# format list of protcols and step
try:
method, step = meth.split()
except:
print(
'To use the -LP flag you must provide both the protocol and the step in this format:\n-LP [AF,T,ARM,IRM, X] step [in mT,C,mT,mT, mass/vol] to plot')
print('Defaulting to using no protocol')
method, step = 'LT-NO', 0
# list of varnames
#['f', 'fsum', 'fwig', 'fsa', 'fa', 'fsp', 'fres', 'fmt', 'LP', 'n', 'd', 'ts', 'WD', 'L', 'S', 'D', 'I', 'M', 'log', 'ds', 'sym' ]
#meas_file, sum_file, wig_file, samp_file, age_file, spc_file, res_file, fmt, meth, norm, depth, timescale, dir_path, pltLine, pltSus, pltDec, pltInc, pltMag, logit, depth_scale, symbol
fig, figname = ipmag.core_depthplot(input_dir, meas_file, spc_file, samp_file, age_file, sum_file, wt_file, depth_scale, dmin, dmax, sym, size,
spc_sym, spc_size, method, step, fmt, pltDec, pltInc, pltMag, pltLine, pltSus, logit, pltTime, timescale, amin, amax, norm, data_model_num)
if not pmagplotlib.isServer:
figname = figname.replace(':', '_')
if fig and save:
print('-I- Created plot: {}'.format(figname))
plt.savefig(figname)
return
app = wx.App(redirect=False)
if not fig:
pw.simple_warning(
'No plot was able to be created with the data you provided.\nMake sure you have given all the required information and try again')
return False
dpi = fig.get_dpi()
pixel_width = dpi * fig.get_figwidth()
pixel_height = dpi * fig.get_figheight()
figname = os.path.join(dir_path, figname)
plot_frame = pmag_menu_dialogs.PlotFrame((int(pixel_width), int(pixel_height + 50)),
fig, figname, standalone=True)
app.MainLoop()
if __name__ == "__main__":
main()
|
lfairchild/PmagPy
|
programs/core_depthplot.py
|
Python
|
bsd-3-clause
| 8,467
|
"""
.. _sfm-reconst:
==============================================
Reconstruction with the Sparse Fascicle Model
==============================================
In this example, we will use the Sparse Fascicle Model (SFM) [Rokem2015]_, to
reconstruct the fiber Orientation Distribution Function (fODF) in every voxel.
First, we import the modules we will use in this example:
"""
import dipy.reconst.sfm as sfm
import dipy.data as dpd
import dipy.direction.peaks as dpp
from dipy.io.image import load_nifti, save_nifti
from dipy.io.gradients import read_bvals_bvecs
from dipy.core.gradients import gradient_table
from dipy.viz import window, actor
"""
For the purpose of this example, we will use the Stanford HARDI dataset (150
directions, single b-value of 2000 $s/mm^2$) that can be automatically
downloaded. If you have not yet downloaded this data-set in one of the other
examples, you will need to be connected to the internet the first time you run
this example. The data will be stored for subsequent runs, and for use with
other examples.
"""
hardi_fname, hardi_bval_fname, hardi_bvec_fname = dpd.get_fnames('stanford_hardi')
data, affine = load_nifti(hardi_fname)
bvals, bvecs = read_bvals_bvecs(hardi_bval_fname, hardi_bvec_fname)
gtab = gradient_table(bvals, bvecs)
# Enables/disables interactive visualization
interactive = False
"""
Reconstruction of the fiber ODF in each voxel guides subsequent tracking
steps. Here, the model is the Sparse Fascicle Model, described in
[Rokem2014]_. This model reconstructs the diffusion signal as a combination of
the signals from different fascicles. This model can be written as:
.. math::
y = X\beta
Where $y$ is the signal and $\beta$ are weights on different points in the
sphere. The columns of the design matrix, $X$ are the signals in each point in
the measurement that would be predicted if there was a fascicle oriented in the
direction represented by that column. Typically, the signal used for this
kernel will be a prolate tensor with axial diffusivity 3-5 times higher than
its radial diffusivity. The exact numbers can also be estimated from examining
parts of the brain in which there is known to be only one fascicle (e.g. in
corpus callosum).
Sparsity constraints on the fiber ODF ($\beta$) are set through the Elastic Net
algorihtm [Zou2005]_.
Elastic Net optimizes the following cost function:
.. math::
\sum_{i=1}^{n}{(y_i - \hat{y}_i)^2} + \alpha (\lambda \sum_{j=1}^{m}{w_j}+(1-\lambda) \sum_{j=1}^{m}{w^2_j}
where $\hat{y}$ is the signal predicted for a particular setting of $\beta$,
such that the left part of this expression is the squared loss function;
$\alpha$ is a parameter that sets the balance between the squared loss on
the data, and the regularization constraints. The regularization parameter
$\lambda$ sets the `l1_ratio`, which controls the balance between L1-sparsity
(low sum of weights), and low L2-sparsity (low sum-of-squares of the weights).
Just like Constrained Spherical Deconvolution (see :ref:`reconst-csd`), the SFM
requires the definition of a response function. We'll take advantage of the
automated algorithm in the :mod:`csdeconv` module to find this response
function:
"""
from dipy.reconst.csdeconv import auto_response
response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7)
"""
The ``response`` return value contains two entries. The first is an array with
the eigenvalues of the response function and the second is the average S0 for
this response.
It is a very good practice to always validate the result of ``auto_response``.
For, this purpose we can print it and have a look at its values.
"""
print(response)
"""
(array([ 0.0014, 0.00029, 0.00029]), 416.206)
We initialize an SFM model object, using these values. We will use the default
sphere (362 vertices, symmetrically distributed on the surface of the sphere),
as a set of putative fascicle directions that are considered in the model
"""
sphere = dpd.get_sphere()
sf_model = sfm.SparseFascicleModel(gtab, sphere=sphere,
l1_ratio=0.5, alpha=0.001,
response=response[0])
"""
For the purpose of the example, we will consider a small volume of data
containing parts of the corpus callosum and of the centrum semiovale
"""
data_small = data[20:50, 55:85, 38:39]
"""
Fitting the model to this small volume of data, we calculate the ODF of this
model on the sphere, and plot it.
"""
sf_fit = sf_model.fit(data_small)
sf_odf = sf_fit.odf(sphere)
fodf_spheres = actor.odf_slicer(sf_odf, sphere=sphere, scale=0.8,
colormap='plasma')
ren = window.Renderer()
ren.add(fodf_spheres)
print('Saving illustration as sf_odfs.png')
window.record(ren, out_path='sf_odfs.png', size=(1000, 1000))
if interactive:
window.show(ren)
"""
We can extract the peaks from the ODF, and plot these as well
"""
sf_peaks = dpp.peaks_from_model(sf_model,
data_small,
sphere,
relative_peak_threshold=.5,
min_separation_angle=25,
return_sh=False)
window.clear(ren)
fodf_peaks = actor.peak_slicer(sf_peaks.peak_dirs, sf_peaks.peak_values)
ren.add(fodf_peaks)
print('Saving illustration as sf_peaks.png')
window.record(ren, out_path='sf_peaks.png', size=(1000, 1000))
if interactive:
window.show(ren)
"""
Finally, we plot both the peaks and the ODFs, overlayed:
"""
fodf_spheres.GetProperty().SetOpacity(0.4)
ren.add(fodf_spheres)
print('Saving illustration as sf_both.png')
window.record(ren, out_path='sf_both.png', size=(1000, 1000))
if interactive:
window.show(ren)
"""
.. figure:: sf_both.png
:align: center
SFM Peaks and ODFs.
To see how to use this information in tracking, proceed to :ref:`sfm-track`.
References
----------
.. [Rokem2015] Ariel Rokem, Jason D. Yeatman, Franco Pestilli, Kendrick
N. Kay, Aviv Mezer, Stefan van der Walt, Brian A. Wandell
(2015). Evaluating the accuracy of diffusion MRI models in white
matter. PLoS ONE 10(4): e0123272. doi:10.1371/journal.pone.0123272
.. [Zou2005] Zou H, Hastie T (2005). Regularization and variable
selection via the elastic net. J R Stat Soc B:301-320
"""
|
FrancoisRheaultUS/dipy
|
doc/examples/sfm_reconst.py
|
Python
|
bsd-3-clause
| 6,318
|
import numpy as np
from brew.base import Ensemble
from brew.metrics.diversity.paired import kuncheva_double_fault_measure
from .base import DCS
class DSKNN(DCS):
"""DS-KNN
The DS-KNN selects an ensemble of classifiers based on
their accuracy and diversity in the neighborhood of the
test sample.
Attributes
----------
`Xval` : array-like, shape = [indeterminated, n_features]
Validation set.
`yval` : array-like, shape = [indeterminated]
Labels of the validation set.
`knn` : sklearn KNeighborsClassifier,
Classifier used to find neighborhood.
Examples
--------
>>> from brew.selection.dynamic import DSKNN
>>> from brew.generation.bagging import Bagging
>>> from brew.base import EnsembleClassifier
>>>
>>> from sklearn.tree import DecisionTreeClassifier
>>> import numpy as np
>>>
>>> X = np.array([[-1, 0], [-0.8, 1], [-0.8, -1], [-0.5, 0],
[0.5, 0], [1, 0], [0.8, 1], [0.8, -1]])
>>> y = np.array([1, 1, 1, 2, 1, 2, 2, 2])
>>> tree = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
>>> bag = Bagging(base_classifier=tree, n_classifiers=10)
>>> bag.fit(X, y)
>>>
>>> sel = DSKNN(X, y, K=3)
>>>
>>> clf = EnsembleClassifier(bag.ensemble, selector=sel)
>>> clf.predict([-1.1,-0.5])
[1]
See also
--------
brew.selection.dynamic.lca.OLA: Overall Local Accuracy.
brew.selection.dynamic.lca.LCA: Local Class Accuracy.
References
----------
Santana, Alixandre, et al. "A dynamic classifier selection method
to build ensembles using accuracy and diversity." 2006 Ninth
Brazilian Symposium on Neural Networks (SBRN'06). IEEE, 2006.
"""
def __init__(self, Xval, yval, K=5, weighted=False, knn=None,
n_1=0.7, n_2=0.3):
if n_1 < 0 or n_2 < 0 or n_1 <= n_2:
raise Exception
self.n_1 = n_1
self.n_2 = n_2
super(DSKNN, self).__init__(
Xval, yval, K=K, weighted=weighted, knn=knn)
def select(self, ensemble, x):
if ensemble.in_agreement(x):
return Ensemble([ensemble.classifiers[0]]), None
n_sel_1, n_sel_2 = self.n_1, self.n_2
if isinstance(self.n_1, float):
n_sel_1 = int(n_sel_1 * len(ensemble))
if isinstance(self.n_2, float):
n_sel_2 = int(n_sel_2 * len(ensemble))
n_sel_1 = max(n_sel_1, 1)
n_sel_2 = max(n_sel_2, 1)
# intialize variables
# the the indexes of the KNN of x
classifiers = ensemble.classifiers
[idx] = self.knn.kneighbors(x, return_distance=False)
X, y = self.Xval[idx], self.yval[idx]
acc_scores = np.array([clf.score(X, y) for clf in classifiers])
out = ensemble.output(X, mode='labels')
oracle = np.equal(out, y[:, np.newaxis])
div_scores = np.zeros(len(ensemble), dtype=float)
for i in range(len(ensemble)):
tmp = []
for j in range(len(ensemble)):
if i != j:
d = kuncheva_double_fault_measure(oracle[:, [i, j]])
tmp.append(d)
div_scores[i] = np.mean(tmp)
z = zip(np.arange(len(ensemble)), acc_scores, div_scores)
z = sorted(z, key=lambda e: e[1], reverse=True)[:n_sel_1]
z = sorted(z, key=lambda e: e[2], reverse=False)[:n_sel_2]
z = zip(*z)[0]
classifiers = [classifiers[i] for i in z]
return Ensemble(classifiers=classifiers), None
|
thypad/brew
|
skensemble/selection/dynamic/dsknn.py
|
Python
|
mit
| 3,563
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
# The source was jpeg.ksy from here - https://github.com/kaitai-io/kaitai_struct_formats/blob/24e2d00048b8084ceec30a187a79cb87a79a48ba/image/jpeg.ksy
import array
import struct
import zlib
from enum import Enum
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from .exif import Exif
class Jpeg(KaitaiStruct):
class ComponentId(Enum):
y = 1
cb = 2
cr = 3
i = 4
q = 5
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.segments = []
while not self._io.is_eof():
self.segments.append(self._root.Segment(self._io, self, self._root))
class Segment(KaitaiStruct):
class MarkerEnum(Enum):
tem = 1
sof0 = 192
sof1 = 193
sof2 = 194
sof3 = 195
dht = 196
sof5 = 197
sof6 = 198
sof7 = 199
soi = 216
eoi = 217
sos = 218
dqt = 219
dnl = 220
dri = 221
dhp = 222
app0 = 224
app1 = 225
app2 = 226
app3 = 227
app4 = 228
app5 = 229
app6 = 230
app7 = 231
app8 = 232
app9 = 233
app10 = 234
app11 = 235
app12 = 236
app13 = 237
app14 = 238
app15 = 239
com = 254
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.magic = self._io.ensure_fixed_contents(struct.pack('1b', -1))
self.marker = self._root.Segment.MarkerEnum(self._io.read_u1())
if ((self.marker != self._root.Segment.MarkerEnum.soi) and (self.marker != self._root.Segment.MarkerEnum.eoi)) :
self.length = self._io.read_u2be()
if ((self.marker != self._root.Segment.MarkerEnum.soi) and (self.marker != self._root.Segment.MarkerEnum.eoi)) :
_on = self.marker
if _on == self._root.Segment.MarkerEnum.sos:
self._raw_data = self._io.read_bytes((self.length - 2))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = self._root.SegmentSos(io, self, self._root)
elif _on == self._root.Segment.MarkerEnum.app1:
self._raw_data = self._io.read_bytes((self.length - 2))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = self._root.SegmentApp1(io, self, self._root)
elif _on == self._root.Segment.MarkerEnum.sof0:
self._raw_data = self._io.read_bytes((self.length - 2))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = self._root.SegmentSof0(io, self, self._root)
elif _on == self._root.Segment.MarkerEnum.app0:
self._raw_data = self._io.read_bytes((self.length - 2))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = self._root.SegmentApp0(io, self, self._root)
else:
self.data = self._io.read_bytes((self.length - 2))
if self.marker == self._root.Segment.MarkerEnum.sos:
self.image_data = self._io.read_bytes_full()
class SegmentSos(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.num_components = self._io.read_u1()
self.components = [None] * (self.num_components)
for i in range(self.num_components):
self.components[i] = self._root.SegmentSos.Component(self._io, self, self._root)
self.start_spectral_selection = self._io.read_u1()
self.end_spectral = self._io.read_u1()
self.appr_bit_pos = self._io.read_u1()
class Component(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.id = self._root.ComponentId(self._io.read_u1())
self.huffman_table = self._io.read_u1()
class SegmentApp1(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.magic = self._io.read_strz("ASCII", 0, False, True, True)
_on = self.magic
if _on == u"Exif":
self.body = self._root.ExifInJpeg(self._io, self, self._root)
class SegmentSof0(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.bits_per_sample = self._io.read_u1()
self.image_height = self._io.read_u2be()
self.image_width = self._io.read_u2be()
self.num_components = self._io.read_u1()
self.components = [None] * (self.num_components)
for i in range(self.num_components):
self.components[i] = self._root.SegmentSof0.Component(self._io, self, self._root)
class Component(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.id = self._root.ComponentId(self._io.read_u1())
self.sampling_factors = self._io.read_u1()
self.quantization_table_id = self._io.read_u1()
@property
def sampling_x(self):
if hasattr(self, '_m_sampling_x'):
return self._m_sampling_x if hasattr(self, '_m_sampling_x') else None
self._m_sampling_x = ((self.sampling_factors & 240) >> 4)
return self._m_sampling_x if hasattr(self, '_m_sampling_x') else None
@property
def sampling_y(self):
if hasattr(self, '_m_sampling_y'):
return self._m_sampling_y if hasattr(self, '_m_sampling_y') else None
self._m_sampling_y = (self.sampling_factors & 15)
return self._m_sampling_y if hasattr(self, '_m_sampling_y') else None
class ExifInJpeg(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.extra_zero = self._io.ensure_fixed_contents(struct.pack('1b', 0))
self._raw_data = self._io.read_bytes_full()
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Exif(io)
class SegmentApp0(KaitaiStruct):
class DensityUnit(Enum):
no_units = 0
pixels_per_inch = 1
pixels_per_cm = 2
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.magic = self._io.read_str_byte_limit(5, "ASCII")
self.version_major = self._io.read_u1()
self.version_minor = self._io.read_u1()
self.density_units = self._root.SegmentApp0.DensityUnit(self._io.read_u1())
self.density_x = self._io.read_u2be()
self.density_y = self._io.read_u2be()
self.thumbnail_x = self._io.read_u1()
self.thumbnail_y = self._io.read_u1()
self.thumbnail = self._io.read_bytes(((self.thumbnail_x * self.thumbnail_y) * 3))
|
mosajjal/mitmproxy
|
mitmproxy/contrib/kaitaistruct/jpeg.py
|
Python
|
mit
| 8,139
|
import numpy
import pandas
import statsmodels.api as sm
def complex_heuristic(file_path):
'''
You are given a list of Titantic passengers and their associating
information. More information about the data can be seen at the link below:
http://www.kaggle.com/c/titanic-gettingStarted/data
For this exercise, you need to write a more sophisticated heuristic
that will use the passengers' gender and their social economical class and age
to predict if they survived the Titanic diaster.
You prediction should be 79% accurate or higher.
If the passenger is female or if his/her socio-economical status is high AND
if the passenger is under 18, you should assume the passenger surived.
Otherwise, you should assume the passenger perished in the disaster.
Or more specifically in code terms: female or (high status and under 18)
You can access the gender of a passenger via passenger['Sex'].
If the passenger is male, passenger['Sex'] will return a string "male".
If the passenger is female, passenger['Sex'] will return a string "female".
You can access the socio-economical status of a passenger via passenger['Pclass']:
High socio-economical status -- passenger['Pclass'] is 1
Medium socio-economical status -- passenger['Pclass'] is 2
Low socio-economical status -- passenger['Pclass'] is 3
You can access the age of a passenger via passenger['Age'].
Write your prediction back into the "predictions" dictionary. The
key of the dictionary should be the Passenger's id (which can be accessed
via passenger["PassengerId"]) and the associating value should be 1 if the
passenger survied or 0 otherwise.
For example, if a passenger survived:
passenger_id = passenger['PassengerId']
predictions[passenger_id] = 1
Or if a passenger perished in the disaster:
passenger_id = passenger['PassengerId']
predictions[passenger_id] = 0
You can also look at the titantic data that you will be working with
at the link below:
https://www.dropbox.com/s/r5f9aos8p9ri9sa/titanic_data.csv
'''
predictions = {}
df = pandas.read_csv(file_path)
for passenger_index, passenger in df.iterrows():
#
# your code here
#
if (passenger['Sex']=='female') or (passenger['Pclass']==1 and passenger['Age']<18):
predictions[passenger['PassengerId']] = 1
else:
predictions[passenger['PassengerId']] = 0
return predictions
|
KellyChan/python-examples
|
python/data_science/Titanic/complexHeuristic.py
|
Python
|
mit
| 2,546
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "logger.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
h4/fuit-webdev
|
projects/logger/manage.py
|
Python
|
mit
| 249
|
import os
from holster.enum import Enum
from rowboat.types import Model, SlottedModel, Field, DictField, text, raw, rule_matcher
CooldownMode = Enum(
'GUILD',
'CHANNEL',
'USER',
)
class PluginConfigObj(object):
client = None
class PluginsConfig(Model):
def __init__(self, inst, obj):
self.client = None
self.load_into(inst, obj)
@classmethod
def parse(cls, obj, *args, **kwargs):
inst = PluginConfigObj()
cls(inst, obj)
return inst
@classmethod
def force_load_plugin_configs(cls):
"""
This function can be called to ensure that this class will have all its
attributes properly loaded, as they are dynamically set when plugin configs
are defined.
"""
plugins = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'plugins')
for name in os.listdir(plugins):
__import__('rowboat.plugins.{}'.format(
name.rsplit('.', 1)[0]
))
class CommandOverrideConfig(SlottedModel):
disabled = Field(bool, default=False)
level = Field(int)
class CommandsConfig(SlottedModel):
prefix = Field(str, default='')
mention = Field(bool, default=False)
overrides = Field(raw)
def get_command_override(self, command):
return rule_matcher(command, self.overrides or [])
class GuildConfig(SlottedModel):
nickname = Field(text)
commands = Field(CommandsConfig, default=None, create=False)
levels = DictField(int, int)
plugins = Field(PluginsConfig.parse)
|
b1naryth1ef/rowboat
|
rowboat/types/guild.py
|
Python
|
mit
| 1,573
|
import logging
try:
from configparser import ConfigParser
except ImportError:
# Python 2 support
from ConfigParser import ConfigParser
logger = logging.getLogger("packges.knightos.org")
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
sh.setFormatter(formatter)
logger.addHandler(sh)
# scss logger
logging.getLogger("scss").addHandler(sh)
config = ConfigParser()
config.readfp(open('config.ini'))
env = 'dev'
_cfg = lambda k: config.get(env, k)
_cfgi = lambda k: int(_cfg(k))
|
KnightOS/packages.knightos.org
|
packages/config.py
|
Python
|
mit
| 619
|
from direct.distributed.DistributedObjectGlobal import DistributedObjectGlobal
from direct.directnotify.DirectNotifyGlobal import directNotify
from otp.otpbase import OTPGlobals
from otp.avatar.Avatar import teleportNotify
from otp.friends import FriendResponseCodes
class PlayerFriendsManager(DistributedObjectGlobal):
notify = directNotify.newCategory('PlayerFriendsManager')
def __init__(self, cr):
DistributedObjectGlobal.__init__(self, cr)
self.playerFriendsList = set()
self.playerId2Info = {}
self.playerAvId2avInfo = {}
self.accept('gotExtraFriendHandles', self.__handleFriendHandles)
def delete(self):
self.ignoreAll()
def sendRequestInvite(self, playerId):
print 'PFM sendRequestInvite id:%s' % playerId
self.sendUpdate('requestInvite', [0, playerId, True])
def sendRequestDecline(self, playerId):
self.sendUpdate('requestDecline', [0, playerId])
def sendRequestRemove(self, playerId):
self.sendUpdate('requestRemove', [0, playerId])
def sendRequestUnlimitedSecret(self):
self.sendUpdate('requestUnlimitedSecret', [0])
def sendRequestLimitedSecret(self, username, password):
self.sendUpdate('requestLimitedSecret', [0, username, password])
def sendRequestUseUnlimitedSecret(self, secret):
pass
def sendRequestUseLimitedSecret(self, secret, username, password):
pass
def sendSCWhisper(self, recipientId, msgId):
self.sendUpdate('whisperSCTo', [0, recipientId, msgId])
def sendSCCustomWhisper(self, recipientId, msgId):
self.sendUpdate('whisperSCCustomTo', [0, recipientId, msgId])
def sendSCEmoteWhisper(self, recipientId, msgId):
self.sendUpdate('whisperSCEmoteTo', [0, recipientId, msgId])
def setTalkAccount(self, toAc, fromAc, fromName, message, mods, flags):
localAvatar.displayTalkAccount(fromAc, fromName, message, mods)
toName = None
friendInfo = self.getFriendInfo(toAc)
if friendInfo:
toName = friendInfo.playerName
elif toAc == localAvatar.DISLid:
toName = localAvatar.getName()
base.talkAssistant.receiveAccountTalk(None, None, fromAc, fromName, toAc, toName, message)
return
def invitationFrom(self, playerId, avatarName):
messenger.send(OTPGlobals.PlayerFriendInvitationEvent, [playerId, avatarName])
def retractInvite(self, playerId):
messenger.send(OTPGlobals.PlayerFriendRetractInviteEvent, [playerId])
def rejectInvite(self, playerId, reason):
messenger.send(OTPGlobals.PlayerFriendRejectInviteEvent, [playerId, reason])
def rejectRemove(self, playerId, reason):
messenger.send(OTPGlobals.PlayerFriendRejectRemoveEvent, [playerId, reason])
def secretResponse(self, secret):
print 'secretResponse %s' % secret
messenger.send(OTPGlobals.PlayerFriendNewSecretEvent, [secret])
def rejectSecret(self, reason):
print 'rejectSecret %s' % reason
messenger.send(OTPGlobals.PlayerFriendRejectNewSecretEvent, [reason])
def rejectUseSecret(self, reason):
print 'rejectUseSecret %s' % reason
messenger.send(OTPGlobals.PlayerFriendRejectUseSecretEvent, [reason])
def invitationResponse(self, playerId, respCode, context):
if respCode == FriendResponseCodes.INVITATION_RESP_DECLINE:
messenger.send(OTPGlobals.PlayerFriendRejectInviteEvent, [playerId, respCode])
elif respCode == FriendResponseCodes.INVITATION_RESP_NEW_FRIENDS:
pass
def updatePlayerFriend(self, id, info, isNewFriend):
self.notify.warning('updatePlayerFriend: %s, %s, %s' % (id, info, isNewFriend))
info.calcUnderstandableYesNo()
if info.playerName[0:5] == 'Guest':
info.playerName = 'Guest ' + info.playerName[5:]
if id not in self.playerFriendsList:
self.playerFriendsList.add(id)
self.playerId2Info[id] = info
messenger.send(OTPGlobals.PlayerFriendAddEvent, [id, info, isNewFriend])
elif id in self.playerId2Info:
if not self.playerId2Info[id].onlineYesNo and info.onlineYesNo:
self.playerId2Info[id] = info
messenger.send('playerOnline', [id])
base.talkAssistant.receiveFriendAccountUpdate(id, info.playerName, info.onlineYesNo)
elif self.playerId2Info[id].onlineYesNo and not info.onlineYesNo:
self.playerId2Info[id] = info
messenger.send('playerOffline', [id])
base.talkAssistant.receiveFriendAccountUpdate(id, info.playerName, info.onlineYesNo)
if not self.askAvatarKnownHere(info.avatarId):
self.requestAvatarInfo(info.avatarId)
self.playerId2Info[id] = info
av = base.cr.doId2do.get(info.avatarId, None)
if av is not None:
av.considerUnderstandable()
messenger.send(OTPGlobals.PlayerFriendUpdateEvent, [id, info])
return
def removePlayerFriend(self, id):
if id not in self.playerFriendsList:
return
self.playerFriendsList.remove(id)
info = self.playerId2Info.pop(id, None)
if info is not None:
av = base.cr.doId2do.get(info.avatarId, None)
if av is not None:
av.considerUnderstandable()
messenger.send(OTPGlobals.PlayerFriendRemoveEvent, [id])
return
def whisperSCFrom(self, playerId, msg):
base.talkAssistant.receivePlayerWhisperSpeedChat(msg, playerId)
def isFriend(self, pId):
return self.isPlayerFriend(pId)
def isPlayerFriend(self, pId):
if not pId:
return 0
return pId in self.playerFriendsList
def isAvatarOwnerPlayerFriend(self, avId):
pId = self.findPlayerIdFromAvId(avId)
if pId and self.isPlayerFriend(pId):
return True
else:
return False
def getFriendInfo(self, pId):
return self.playerId2Info.get(pId)
def findPlayerIdFromAvId(self, avId):
for playerId in self.playerId2Info:
if self.playerId2Info[playerId].avatarId == avId:
if self.playerId2Info[playerId].onlineYesNo:
return playerId
return None
def findAvIdFromPlayerId(self, pId):
pInfo = self.playerId2Info.get(pId)
if pInfo:
return pInfo.avatarId
else:
return None
return None
def findPlayerInfoFromAvId(self, avId):
playerId = self.findPlayerIdFromAvId(avId)
if playerId:
return self.getFriendInfo(playerId)
else:
return None
return None
def askAvatarOnline(self, avId):
returnValue = 0
if avId in self.cr.doId2do:
returnValue = 1
if avId in self.playerAvId2avInfo:
playerId = self.findPlayerIdFromAvId(avId)
if playerId in self.playerId2Info:
playerInfo = self.playerId2Info[playerId]
if playerInfo.onlineYesNo:
returnValue = 1
return returnValue
def countTrueFriends(self):
count = 0
for id in self.playerId2Info:
if self.playerId2Info[id].openChatFriendshipYesNo:
count += 1
return count
def askTransientFriend(self, avId):
if (avId in self.playerAvId2avInfo) and (not base.cr.isAvatarFriend(avId)):
return 1
else:
return 0
def askAvatarKnown(self, avId):
if self.askAvatarKnownElseWhere(avId) or self.askAvatarKnownHere(avId):
return 1
else:
return 0
def askAvatarKnownElseWhere(self, avId):
if hasattr(base, 'cr'):
if base.cr.askAvatarKnown(avId):
return 1
return 0
def askAvatarKnownHere(self, avId):
if avId in self.playerAvId2avInfo:
return 1
else:
return 0
def requestAvatarInfo(self, avId):
if hasattr(base, 'cr'):
base.cr.queueRequestAvatarInfo(avId)
def __handleFriendHandles(self, handleList):
for handle in handleList:
self.playerAvId2avInfo[handle.getDoId()] = handle
messenger.send('friendsListChanged')
def getAvHandleFromId(self, avId):
if avId in self.playerAvId2avInfo:
return self.playerAvId2avInfo[avId]
def identifyFriend(self, avId):
handle = None
teleportNotify.debug('identifyFriend(%s)' % avId)
handle = base.cr.identifyFriend(avId)
if not handle:
teleportNotify.debug('getAvHandleFromId(%s)' % avId)
handle = self.getAvHandleFromId(avId)
return handle
def getAllOnlinePlayerAvatars(self):
returnList = []
for avatarId in self.playerAvId2avInfo:
playerId = self.findPlayerIdFromAvId(avatarId)
if playerId:
if self.playerId2Info[playerId].onlineYesNo:
returnList.append(avatarId)
return returnList
def identifyAvatar(self, doId):
if doId in base.cr.doId2do:
return base.cr.doId2do[doId]
else:
return self.identifyFriend(doId)
def friendsListFull(self):
return len(self.playerFriendsList) >= OTPGlobals.MaxPlayerFriends
|
Spiderlover/Toontown
|
otp/friends/PlayerFriendsManager.py
|
Python
|
mit
| 9,425
|
#!/usr/bin/python
import pygame
import enemies
from core import balloon, bullet, game, gem, particle, player, world
from scenes import credits, scene, splashscreen
from ui import menu, text
from utils import prettyprint, utility, vector
from utils.settings import *
pygame.init()
utility.read_settings()
if settings_list[SETTING_FULLSCREEN]:
screen = utility.set_fullscreen()
else:
screen = utility.set_fullscreen(False)
pygame.display.set_icon(utility.load_image('icon'))
pygame.display.set_caption('Trouble In CloudLand v1.1')
screen.fill((0, 0, 0))
tempText = text.Text(FONT_PATH, 36, (255, 255, 255))
tempText.set_text('Loading...')
tempText.position = vector.Vector2d((SCREEN_WIDTH / 2) - (tempText.image.get_width() / 2), (SCREEN_HEIGHT / 2) - (tempText.image.get_height() / 2))
tempText.update()
tempText.draw(screen)
pygame.display.flip()
try:
pygame.mixer.set_reserved(MUSIC_CHANNEL)
pygame.mixer.Channel(MUSIC_CHANNEL).set_volume(1)
pygame.mixer.set_reserved(PLAYER_CHANNEL)
pygame.mixer.Channel(PLAYER_CHANNEL).set_volume(1)
pygame.mixer.set_reserved(OW_CHANNEL)
pygame.mixer.Channel(OW_CHANNEL).set_volume(1)
pygame.mixer.set_reserved(BAAKE_CHANNEL)
pygame.mixer.Channel(BAAKE_CHANNEL).set_volume(1)
pygame.mixer.set_reserved(BOSS_CHANNEL)
pygame.mixer.Channel(BOSS_CHANNEL).set_volume(1)
pygame.mixer.set_reserved(PICKUP_CHANNEL)
pygame.mixer.Channel(PICKUP_CHANNEL).set_volume(1)
except:
utility.sound_active = False
print('WARNING! - Sound not initialized.')
pygame.mouse.set_visible(False)
music_list = [
utility.load_sound('menuMusic'),
utility.load_sound('music0'),
utility.load_sound('music1'),
utility.load_sound('music2'),
utility.load_sound('bossMusic')
]
world.load_data()
player.load_data()
bullet.load_data()
pygame.event.pump()
enemies.baake.load_data()
balloon.load_data()
gem.load_data()
pygame.event.pump()
enemies.moono.load_data()
enemies.batto.load_data()
enemies.rokubi.load_data()
pygame.event.pump()
enemies.haoya.load_data()
enemies.yurei.load_data()
enemies.bokko.load_data()
pygame.event.pump()
enemies.hakta.load_data()
enemies.raayu.load_data()
enemies.paajo.load_data()
pygame.event.pump()
enemies.boss.load_data()
particle.load_data()
menu.load_data()
for event in pygame.event.get():
pass
splashscreen.SplashScreen(screen, 'pygamesplash')
utility.play_music(music_list[MENU_MUSIC])
splashscreen.SplashScreen(screen, 'gameSplash')
if settings_list[WORLD_UNLOCKED] == 0:
new_scene = scene.TutorialScene()
elif settings_list[WORLD_UNLOCKED] == 1:
new_scene = scene.ForestScene()
elif settings_list[WORLD_UNLOCKED] == 2:
new_scene = scene.RockyScene()
elif settings_list[WORLD_UNLOCKED] == 3:
new_scene = scene.PinkScene()
game_is_running = True
main_menu_dictionary = {
START_GAME: ('Play', 'Start a New Game'),
OPTION_MENU: ('Options', 'Change Sound and Video Options'),
CREDIT_MENU: ('Credits', 'Who We Are, What We Did'),
EXIT_GAME: ('Exit', 'Exit the Game')
}
world_menu_dictionary = {
TUTORIAL: ('Tutorial', 'Start the Tutorial [Learn]'),
WORLD1: ('Cloudopolis', 'Start Playing Cloudopolis [Apprentice]'),
WORLD2: ('Nightmaria', 'Start Playing Nightmaria [Journeyman]'),
WORLD3: ('Opulent Dream', 'Start Playing Opulent Dream [Master]'),
EXIT_OPTIONS: ('Back', 'Go Back to the Main Menu')
}
option_menu_dictionary = {
SOUND_MENU: ('Sound Options', 'Change Sound Options'),
DISPLAY_MENU: ('Video Options', 'Change Video Options'),
CHANGE_SENSITIVITY: ('Mouse Sensitivity: ' + prettyprint.mouse_sensitivity(settings_list[SENSITIVITY]), 'Change Mouse Sensitivity'),
EXIT_OPTIONS: ('Back', 'Go Back to the Main Menu')
}
sound_menu_dictionary = {
TOGGLE_SFX: ('Sound Effects: ' + prettyprint.on(settings_list[SFX]), 'Turn ' + prettyprint.on(not settings_list[SFX]) + ' Sound Effects'),
TOGGLE_MUSIC: ('Music: ' + prettyprint.on(settings_list[MUSIC]), 'Turn ' + prettyprint.on(not settings_list[MUSIC]) + ' Music'),
EXIT_OPTIONS: ('Back', 'Go Back to the Option Menu')
}
display_menu_dictionary = {
TOGGLE_PARTICLES: ('Particles: ' + prettyprint.able(settings_list[PARTICLES]), 'Turn ' + prettyprint.on(not settings_list[PARTICLES]) + ' Particle Effects'),
TOGGLE_FULLSCREEN: ('Video Mode: ' + prettyprint.screen_mode(settings_list[SETTING_FULLSCREEN]), 'Switch To ' + prettyprint.screen_mode(not settings_list[SETTING_FULLSCREEN]) + ' Mode'),
EXIT_OPTIONS: ('Back', 'Go Back to the Main Menu')
}
sensitivity_menu_dictionary = {
0: ('Very Low', 'Change Sensitivity to Very Low'),
1: ('Low', 'Change Sensitivity to Low'),
2: ('Normal', 'Change Sensitivity to Normal'),
3: ('High', 'Change Sensitivity to High'),
4: ('Very High', 'Change Sensitivity to Very High')
}
menu_bounds = (0, SCREEN_HEIGHT / 3, SCREEN_WIDTH, SCREEN_HEIGHT)
while game_is_running:
menu_result = menu.Menu(screen,
music_list[MENU_MUSIC],
new_scene,
menu_bounds,
('Trouble in Cloudland', 80, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4),
main_menu_dictionary).show()
if menu_result == START_GAME:
last_highlighted = settings_list[WORLD_UNLOCKED]
world_result = menu.Menu(screen,
music_list[MENU_MUSIC],
new_scene,
menu_bounds,
('Choose a World', 96, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4),
world_menu_dictionary,
last_highlighted).show()
if world_result == TUTORIAL:
game.Game(screen, 0, music_list).run()
elif world_result == EXIT_OPTIONS:
world_result = False
elif world_result is not False:
utility.fade_music()
utility.play_music(music_list[world_result - 1], True)
game.Game(screen, world_result - 1, music_list).run()
elif menu_result == OPTION_MENU:
option_result = True
last_highlighted = 0
while option_result:
option_result = menu.Menu(screen,
music_list[MENU_MUSIC],
new_scene,
menu_bounds,
('Options', 96, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4),
option_menu_dictionary,
last_highlighted).show()
if option_result == SOUND_MENU:
sound_result = True
last_highlighted = 0
while sound_result:
sound_result = menu.Menu(screen,
music_list[MENU_MUSIC],
new_scene,
menu_bounds,
('Sound Options', 96, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4),
sound_menu_dictionary,
last_highlighted).show()
if sound_result == TOGGLE_SFX:
settings_list[SFX] = not settings_list[SFX]
last_highlighted = 0
elif sound_result == TOGGLE_MUSIC:
settings_list[MUSIC] = not settings_list[MUSIC]
if not settings_list[MUSIC]:
pygame.mixer.Channel(MUSIC_CHANNEL).stop()
last_highlighted = 1
elif sound_result == EXIT_OPTIONS:
sound_result = False
sound_menu_dictionary = {
TOGGLE_SFX: ('Sound Effects: ' + prettyprint.on(settings_list[SFX]), 'Turn ' + prettyprint.on(not settings_list[SFX]) + ' Sound Effects'),
TOGGLE_MUSIC: ('Music: ' + prettyprint.on(settings_list[MUSIC]), 'Turn ' + prettyprint.on(not settings_list[MUSIC]) + ' Music'),
EXIT_OPTIONS: ('Back','Go Back to the Option Menu')
}
if option_result == DISPLAY_MENU:
display_result = True
last_highlighted = 0
while display_result:
display_result = menu.Menu(screen,
music_list[MENU_MUSIC],
new_scene,
menu_bounds,
('Video Options', 96, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4),
display_menu_dictionary,
last_highlighted).show()
if display_result == TOGGLE_PARTICLES:
settings_list[PARTICLES] = not settings_list[PARTICLES]
last_highlighted = 0
elif display_result == TOGGLE_FULLSCREEN:
settings_list[SETTING_FULLSCREEN] = not settings_list[SETTING_FULLSCREEN]
last_highlighted = 1
if settings_list[SETTING_FULLSCREEN]:
screen = utility.set_fullscreen()
else:
screen = utility.set_fullscreen(False)
pygame.mouse.set_visible(False)
elif display_result == EXIT_OPTIONS:
display_result = False
display_menu_dictionary = {
TOGGLE_PARTICLES: ('Particles: ' + prettyprint.able(settings_list[PARTICLES]), 'Turn ' + prettyprint.on(not settings_list[PARTICLES]) + ' Particle Effects'),
TOGGLE_FULLSCREEN: ('Video Mode: ' + prettyprint.screen_mode(settings_list[SETTING_FULLSCREEN]), 'Switch To ' + prettyprint.screen_mode(not settings_list[SETTING_FULLSCREEN]) + ' Mode'),
EXIT_OPTIONS: ('Back', 'Go Back to the Main Menu')
}
elif option_result == EXIT_OPTIONS:
option_result = False
elif option_result == CHANGE_SENSITIVITY:
sensitivity_result = True
last_highlighted = 0
while sensitivity_result:
sensitivity_menu = menu.Menu(screen,
music_list[MENU_MUSIC],
new_scene,
menu_bounds,
('Mouse Sensitivity', 96, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4),
sensitivity_menu_dictionary,
last_highlighted)
sensitivity_result = sensitivity_menu.show()
mouse_sensitivities = [0.5, 0.75, 1, 1.25, 1.5]
settings_list[SENSITIVITY] = mouse_sensitivities[sensitivity_result]
if sensitivity_result > 0:
sensitivity_result = False
option_menu_dictionary = {
SOUND_MENU: ('Sound Options', 'Change Sound Options'),
DISPLAY_MENU: ('Video Options', 'Change Video Options'),
CHANGE_SENSITIVITY: ('Mouse Sensitivity: ' + prettyprint.mouse_sensitivity(settings_list[SENSITIVITY]), 'Change Mouse Sensitivity'),
EXIT_OPTIONS: ('Back', 'Go Back to the Main Menu')
}
elif menu_result == CREDIT_MENU:
credits.Credits(screen, music_list[MENU_MUSIC])
elif menu_result == EXIT_GAME:
game_is_running = False
utility.write_settings()
splashscreen.SplashScreen(screen, 'outroSplash')
quit()
|
JoshuaSkelly/TroubleInCloudLand
|
main.py
|
Python
|
mit
| 12,372
|
from django import http
from django.conf.urls import patterns
from django.contrib import admin
from django.db import models
from django.forms.models import modelform_factory
from django.shortcuts import get_object_or_404
from django.template import loader, Context
from django.views.generic import View
def get_printable_field_value(instance, fieldname):
""" Get the display value of a model field, showing a comma-delimited
list for M2M fields.
"""
field = instance._meta.get_field(fieldname)
field_value = getattr(instance, fieldname)
if isinstance(field, models.ManyToManyField):
field_value = ', '.join([unicode(f) for f in
field_value.all()])
return field_value
class AjaxModelFormView(View):
""" Handles AJAX updates of a single field on an object
(You likely don't need to use this directly as the admin
registers a URL for it itself.)
"""
model = None
valid_fields = None
def __init__(self, model, valid_fields, **kwargs):
self.model = model
self.valid_fields = valid_fields
def post(self, request, object_id, *args, **kwargs):
if not request.user or not request.user.is_staff:
return http.HttpResponseForbidden()
request = request.POST.copy()
fieldname = request.pop('field', None)[0]
form_prefix = request.pop('prefix', None)[0]
# prevent setting fields that weren't made AJAX-editable
if fieldname not in self.valid_fields:
return http.HttpResponseBadRequest()
ItemForm = modelform_factory(self.model, fields=(fieldname,))
instance = get_object_or_404(self.model, pk=object_id)
form = ItemForm(request, instance=instance, prefix=form_prefix)
if not form or not form.is_valid():
return http.HttpResponseBadRequest()
form.save()
new_value = get_printable_field_value(instance, fieldname)
return http.HttpResponse(new_value)
class AjaxModelAdmin(admin.ModelAdmin):
""" Admin class providing support for inline forms in
listview that are submitted through AJAX.
"""
def __init__(self, *args, **kwargs):
HANDLER_NAME_TPL = "_%s_ajax_handler"
if not hasattr(self, 'ajax_list_display'):
self.ajax_list_display = []
self.list_display = list(self.list_display)
self.list_display = self.list_display + map(lambda name: HANDLER_NAME_TPL % name,
self.ajax_list_display)
super(AjaxModelAdmin, self).__init__(*args, **kwargs)
for name in self.ajax_list_display:
setattr(self, HANDLER_NAME_TPL % name,
self._get_field_handler(name))
self.ajax_item_template = loader.get_template('ajax_changelist/'
'field_form.html')
def get_urls(self):
""" Add endpoint for saving a new field value. """
urls = super(AjaxModelAdmin, self).get_urls()
list_urls = patterns('',
(r'^(?P<object_id>\d+)$',
AjaxModelFormView.as_view(model=self.model,
valid_fields=self.ajax_list_display)))
return list_urls + urls
def _get_field_handler(self, fieldname):
""" Handle rendering of AJAX-editable fields for the changelist, by
dynamically building a callable for each field.
"""
def handler_function(obj, *args, **kwargs):
ItemForm = modelform_factory(self.model, fields=(fieldname,))
form = ItemForm(instance=obj, prefix="c" + unicode(obj.id))
field_value = get_printable_field_value(obj, fieldname)
# Render the field value and edit form
return self.ajax_item_template.render(Context({
'object_id': obj.id,
'field_name': fieldname,
'form': form.as_p(),
'field_value': field_value
}))
handler_function.allow_tags = True
handler_function.short_description = fieldname
return handler_function
class Media:
#FIXME: dripping jQueries is straight-up wack.
js = ('//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js',
'ajax_changelist/js/lib/jquery.django_csrf.js',
'ajax_changelist/js/admin.js',)
css = {
'all': ('ajax_changelist/css/admin.css',)
}
|
SohoTechLabs/django-ajax-changelist
|
ajax_changelist/admin.py
|
Python
|
mit
| 4,495
|
#!/usr/bin/env python -Es
"""
Script to set up a custom genome for bcbio-nextgen
"""
from __future__ import print_function
from argparse import ArgumentParser
import collections
import gzip
import os
from Bio import SeqIO
import toolz as tz
from bcbio.utils import safe_makedir, file_exists, chdir, is_gzipped
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
from bcbio.install import (REMOTES, get_cloudbiolinux, SUPPORTED_INDEXES,
_get_data_dir)
from bcbio.pipeline.run_info import ALLOWED_CONTIG_NAME_CHARS
from bcbio.galaxy import loc
from bcbio.log import logger
import subprocess
import sys
import shutil
import yaml
import gffutils
from gffutils.iterators import DataIterator
import tempfile
SEQ_DIR = "seq"
RNASEQ_DIR = "rnaseq"
SRNASEQ_DIR = "srnaseq"
ERCC_BUCKET = "bcbio-data.s3.amazonaws.com/"
def extract_if_gzipped(filename):
stem, ext = os.path.splitext(filename)
if ext == ".gz":
subprocess.check_call("gzip -cd %s > %s" % (filename, stem), shell=True)
return stem
else:
return filename
def gff3_to_gtf(gff3_file):
dialect = {'field separator': '; ',
'fmt': 'gtf',
'keyval separator': ' ',
'leading semicolon': False,
'multival separator': ',',
'quoted GFF2 values': True,
'order': ['gene_id', 'transcript_id'],
'repeated keys': False,
'trailing semicolon': True}
out_file = os.path.splitext(gff3_file)[0] + ".gtf"
if file_exists(out_file):
return out_file
logger.info("Converting %s to %s." % (gff3_file, out_file))
if _is_from_ncbi(gff3_file):
logger.info("NCBI format detected by the presence of the %s key."
% _is_from_ncbi(gff3_file))
_output_ncbi_gff3(gff3_file, out_file, dialect)
else:
_output_gff3(gff3_file, out_file, dialect)
return out_file
def _output_gff3(gff3_file, out_file, dialect):
db = gffutils.create_db(gff3_file, ":memory:")
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feature in DataIterator(db.features_of_type("exon"), dialect=dialect):
transcript_id = feature["Parent"][0]
gene_id = db[transcript_id]["Parent"][0]
attr = {"transcript_id": transcript_id, "gene_id": gene_id}
attributes = gffutils.attributes.Attributes(attr)
feature.attributes = attributes
print(feature, file=out_handle, end="")
def _output_ncbi_gff3(gff3_file, out_file, dialect):
gene_key = "gene"
id_spec = {"gene": gene_key}
db = gffutils.create_db(gff3_file, ":memory:", id_spec=id_spec)
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feature in DataIterator(db.features_of_type("exon"), dialect=dialect):
# Gnomon features are often missing a transcript id
# some malformed features are also missing the gene key
try:
transcript_id = feature["transcript_id"]
except KeyError:
try:
transcript_id = feature[gene_key]
except KeyError:
continue
gene_id = feature[gene_key]
try:
biotype = feature["gene_biotype"]
except KeyError:
biotype = "unknown"
attr = {"transcript_id": transcript_id, "gene_id": gene_id,
"gene_biotype": biotype}
attributes = gffutils.attributes.Attributes(attr)
feature.attributes = attributes
print(feature, file=out_handle, end="")
def _is_from_ncbi(gff3_file):
with open(gff3_file) as in_handle:
for line in tz.take(10000, in_handle):
if "Dbxref" in line:
return "Dbxref"
if "db_xref" in line:
return "db_xref"
return None
def _index_w_command(env, dir_name, command, ref_file, pre=None, post=None, ext=None):
index_name = os.path.splitext(os.path.basename(ref_file))[0]
if ext is not None: index_name += ext
build_path = os.path.join(os.path.dirname(ref_file), os.pardir)
out_dir = os.path.join(build_path, dir_name)
index_path = os.path.join(out_dir, index_name)
safe_makedir(out_dir)
subprocess.check_call(command.format(ref_file=ref_file,
index_name=index_path), shell=True)
return index_path
def setup_base_directories(genome_dir, name, build, gtf=None):
name_dir = os.path.join(genome_dir, name)
safe_makedir(name_dir)
build_dir = os.path.join(name_dir, build)
safe_makedir(build_dir)
seq_dir = os.path.join(build_dir, SEQ_DIR)
safe_makedir(seq_dir)
if gtf:
gtf_dir = os.path.join(build_dir, RNASEQ_DIR)
safe_makedir(gtf_dir)
return build_dir
def install_fasta_file(build_dir, fasta, build):
out_file = os.path.join(build_dir, SEQ_DIR, build + ".fa")
if not file_exists(out_file):
recs = SeqIO.parse(fasta, "fasta")
with open(out_file, "w") as out_handle:
SeqIO.write((_clean_rec_name(rec) for rec in recs), out_handle, "fasta")
return out_file
def _clean_rec_name(rec):
"""Clean illegal characters in input fasta file which cause problems downstream.
"""
out_id = []
for char in list(rec.id):
if char in ALLOWED_CONTIG_NAME_CHARS:
out_id.append(char)
else:
out_id.append("_")
rec.id = "".join(out_id)
rec.description = ""
return rec
def install_gtf_file(build_dir, gtf, build):
out_file = os.path.join(build_dir, RNASEQ_DIR, "ref-transcripts.gtf")
if not file_exists(out_file):
if is_gzipped(gtf):
with gzip.open(gtf_file, 'rb') as in_handle:
with open(out_file, 'wb') as out_handle:
shutil.copyfileobj(in_handle, out_handle)
else:
shutil.copyfile(gtf, out_file)
return out_file
def install_srna(species, gtf):
out_file = os.path.join(SRNASEQ_DIR, "srna-transcripts.gtf")
safe_makedir(SRNASEQ_DIR)
if gtf:
if not file_exists(out_file):
shutil.copyfile(gtf, out_file)
try:
from seqcluster import install
except ImportError:
raise ImportError("install seqcluster first, please.")
with chdir(SRNASEQ_DIR):
hairpin, miRNA = install._install_mirbase()
cmd = ("cat %s | awk '{if ($0~/>%s/){name=$0; print name} else if ($0~/^>/){name=0};if (name!=0 && $0!~/^>/){print $0;}}' | sed 's/U/T/g' > hairpin.fa")
do.run(cmd % (hairpin, species), "set precursor.")
cmd = ("grep -A 1 {species} {miRNA} > miRNA.str")
do.run(cmd.format(**locals()), "set miRNA.")
shutil.rmtree("mirbase")
return out_file
def append_ercc(gtf_file, fasta_file):
ercc_fa = ERCC_BUCKET + "ERCC92.fasta.gz"
tmp_fa = tempfile.NamedTemporaryFile(delete=False, suffix=".gz").name
append_fa_cmd = "wget {ercc_fa} -O {tmp_fa}; gzip -cd {tmp_fa} >> {fasta_file}"
print(append_fa_cmd.format(**locals()))
subprocess.check_call(append_fa_cmd.format(**locals()), shell=True)
ercc_gtf = ERCC_BUCKET + "ERCC92.gtf.gz"
tmp_gtf = tempfile.NamedTemporaryFile(delete=False, suffix=".gz").name
append_gtf_cmd = "wget {ercc_gtf} -O {tmp_gtf}; gzip -cd {tmp_gtf} >> {gtf_file}"
print(append_gtf_cmd.format(**locals()))
subprocess.check_call(append_gtf_cmd.format(**locals()), shell=True)
class MyParser(ArgumentParser):
def error(self, message):
self.print_help()
galaxy_base = os.path.join(_get_data_dir(), "galaxy")
print("\nCurrent genomes\n")
print(open(loc.get_loc_file(galaxy_base, "samtools")).read())
sys.exit(0)
if __name__ == "__main__":
description = ("Set up a custom genome for bcbio-nextgen. This will "
"place the genome under name/build in the genomes "
"directory in your bcbio-nextgen installation.")
parser = MyParser(description=description)
parser.add_argument("-c", "--cores", default=1,
help="number of cores to use")
parser.add_argument("-f", "--fasta", required=True,
help="FASTA file of the genome.")
parser.add_argument("--gff3", default=False, action='store_true',
help="File is a GFF3 file.")
parser.add_argument("-g", "--gtf", default=None,
help="GTF file of the transcriptome")
parser.add_argument("-n", "--name", required=True,
help="Name of organism, for example Hsapiens.")
parser.add_argument("-b", "--build", required=True,
help="Build of genome, for example hg19.")
parser.add_argument("-i", "--indexes", choices=SUPPORTED_INDEXES, nargs="*",
default=["seq"], help="Space separated list of indexes to make")
parser.add_argument("--ercc", action='store_true', default=False,
help="Add ERCC spike-ins.")
parser.add_argument("--mirbase", help="species in mirbase for smallRNAseq data.")
parser.add_argument("--srna_gtf", help="gtf to use for smallRNAseq data.")
parser.add_argument("--buildversion", required=True,
help=("String describing build of genome used. Examples: "
"Ensembl_94, EnsemblMetazoa_94, Flybase_21, etc"))
args = parser.parse_args()
# if not all([args.mirbase, args.srna_gtf]) and any([args.mirbase, args.srna_gtf]):
# raise ValueError("--mirbase and --srna_gtf both need a value.")
os.environ["PATH"] += os.pathsep + os.path.dirname(sys.executable)
cbl = get_cloudbiolinux(REMOTES)
sys.path.insert(0, cbl["dir"])
genomemod = __import__("cloudbio.biodata", fromlist=["genomes"])
# monkey patch cloudbiolinux to use this indexing command instead
genomes = getattr(genomemod, 'genomes')
genomes._index_w_command = _index_w_command
genome_dir = os.path.abspath(os.path.join(_get_data_dir(), "genomes"))
args.fasta = os.path.abspath(args.fasta)
if not file_exists(args.fasta):
print("%s does not exist, exiting." % args.fasta)
sys.exit(1)
args.gtf = os.path.abspath(args.gtf) if args.gtf else None
if args.gtf and not file_exists(args.gtf):
print("%s does not exist, exiting." % args.gtf)
sys.exit(1)
args.srna_gtf = os.path.abspath(args.srna_gtf) if args.srna_gtf else None
gtf_file = args.gtf
if args.gff3:
gtf_file = extract_if_gzipped(gtf_file)
gtf_file = gff3_to_gtf(gtf_file)
# always make a sequence dictionary
if "seq" not in args.indexes:
args.indexes.append("seq")
prepare_tx = os.path.join(cbl["dir"], "utils", "prepare_tx_gff.py")
print("Creating directories using %s as the base." % (genome_dir))
build_dir = setup_base_directories(genome_dir, args.name, args.build, args.gtf)
os.chdir(build_dir)
print("Genomes will be installed into %s." % (build_dir))
fasta_file = extract_if_gzipped(args.fasta)
fasta_file = install_fasta_file(build_dir, fasta_file, args.build)
print("Installed genome as %s." % (fasta_file))
if args.gtf:
if "bowtie2" not in args.indexes:
args.indexes.append("bowtie2")
gtf_file = install_gtf_file(build_dir, gtf_file, args.build)
print("Installed GTF as %s." % (gtf_file))
if args.ercc:
print("Appending ERCC sequences to %s and %s." % (gtf_file, fasta_file))
append_ercc(gtf_file, fasta_file)
indexed = {}
Env = collections.namedtuple("Env", "system_install, cores")
env = Env(genome_dir, args.cores)
for index in args.indexes:
print("Creating the %s index." % (index))
index_fn = genomes.get_index_fn(index)
if not index_fn:
print("Do not know how to make the index %s, skipping." % (index))
continue
indexed[index] = index_fn(env, fasta_file)
indexed["samtools"] = fasta_file
if args.gtf:
"Preparing transcriptome."
with chdir(os.path.join(build_dir, os.pardir)):
cmd = ("{sys.executable} {prepare_tx} --buildversion {args.buildversion} --cores {args.cores} --genome-dir {genome_dir} "
"--gtf {gtf_file} {args.name} {args.build}")
subprocess.check_call(cmd.format(**locals()), shell=True)
if args.mirbase:
"Preparing smallRNA data."
with chdir(os.path.join(build_dir)):
install_srna(args.mirbase, args.srna_gtf)
base_dir = os.path.normpath(os.path.dirname(fasta_file))
resource_file = os.path.join(base_dir, "%s-resources.yaml" % args.build)
print("Dumping genome resources to %s." % resource_file)
resource_dict = {"version": 1}
if args.gtf:
transcripts = ["rnaseq", "transcripts"]
mask = ["rnaseq", "transcripts_mask"]
index = ["rnaseq", "transcriptome_index", "tophat"]
dexseq = ["rnaseq", "dexseq"]
refflat = ["rnaseq", "refflat"]
rRNA_fa = ["rnaseq", "rRNA_fa"]
resource_dict = tz.update_in(resource_dict, transcripts,
lambda x: "../rnaseq/ref-transcripts.gtf")
resource_dict = tz.update_in(resource_dict, mask,
lambda x: "../rnaseq/ref-transcripts-mask.gtf")
resource_dict = tz.update_in(resource_dict, index,
lambda x: "../rnaseq/tophat/%s_transcriptome.ver" % args.build)
resource_dict = tz.update_in(resource_dict, refflat,
lambda x: "../rnaseq/ref-transcripts.refFlat")
resource_dict = tz.update_in(resource_dict, dexseq,
lambda x: "../rnaseq/ref-transcripts.dexseq.gff3")
resource_dict = tz.update_in(resource_dict, rRNA_fa,
lambda x: "../rnaseq/rRNA.fa")
if args.mirbase:
srna_gtf = ["srnaseq", "srna_transcripts"]
srna_mirbase = ["srnaseq", "mirbase_hairpin"]
resource_dict = tz.update_in(resource_dict, srna_gtf,
lambda x: "../srnaseq/srna-transcripts.gtf")
resource_dict = tz.update_in(resource_dict, srna_mirbase,
lambda x: "../srnaseq/hairpin.fa")
# write out resource dictionarry
with file_transaction(resource_file) as tx_resource_file:
with open(tx_resource_file, "w") as out_handle:
out_handle.write(yaml.dump(resource_dict, default_flow_style=False))
print("Updating Galaxy .loc files.")
galaxy_base = os.path.join(_get_data_dir(), "galaxy")
for index, index_file in indexed.items():
if index_file:
loc.update_loc_file(galaxy_base, index, args.build, index_file)
print("Genome installation complete.")
|
a113n/bcbio-nextgen
|
scripts/bcbio_setup_genome.py
|
Python
|
mit
| 15,182
|
from .ctp_gateway import CtpGateway
|
bigdig/vnpy
|
vnpy/gateway/ctp/__init__.py
|
Python
|
mit
| 35
|
import os
from jedi._compatibility import FileNotFoundError, force_unicode, scandir
from jedi.api import classes
from jedi.api.strings import StringName, get_quote_ending
from jedi.api.helpers import match
from jedi.inference.helpers import get_str_or_none
class PathName(StringName):
api_type = u'path'
def complete_file_name(inference_state, module_context, start_leaf, quote, string,
like_name, signatures_callback, code_lines, position, fuzzy):
# First we want to find out what can actually be changed as a name.
like_name_length = len(os.path.basename(string))
addition = _get_string_additions(module_context, start_leaf)
if string.startswith('~'):
string = os.path.expanduser(string)
if addition is None:
return
string = addition + string
# Here we use basename again, because if strings are added like
# `'foo' + 'bar`, it should complete to `foobar/`.
must_start_with = os.path.basename(string)
string = os.path.dirname(string)
sigs = signatures_callback(*position)
is_in_os_path_join = sigs and all(s.full_name == 'os.path.join' for s in sigs)
if is_in_os_path_join:
to_be_added = _add_os_path_join(module_context, start_leaf, sigs[0].bracket_start)
if to_be_added is None:
is_in_os_path_join = False
else:
string = to_be_added + string
base_path = os.path.join(inference_state.project.path, string)
try:
listed = sorted(scandir(base_path), key=lambda e: e.name)
# OSError: [Errno 36] File name too long: '...'
except (FileNotFoundError, OSError):
return
quote_ending = get_quote_ending(quote, code_lines, position)
for entry in listed:
name = entry.name
if match(name, must_start_with, fuzzy=fuzzy):
if is_in_os_path_join or not entry.is_dir():
name += quote_ending
else:
name += os.path.sep
yield classes.Completion(
inference_state,
PathName(inference_state, name[len(must_start_with) - like_name_length:]),
stack=None,
like_name_length=like_name_length,
is_fuzzy=fuzzy,
)
def _get_string_additions(module_context, start_leaf):
def iterate_nodes():
node = addition.parent
was_addition = True
for child_node in reversed(node.children[:node.children.index(addition)]):
if was_addition:
was_addition = False
yield child_node
continue
if child_node != '+':
break
was_addition = True
addition = start_leaf.get_previous_leaf()
if addition != '+':
return ''
context = module_context.create_context(start_leaf)
return _add_strings(context, reversed(list(iterate_nodes())))
def _add_strings(context, nodes, add_slash=False):
string = ''
first = True
for child_node in nodes:
values = context.infer_node(child_node)
if len(values) != 1:
return None
c, = values
s = get_str_or_none(c)
if s is None:
return None
if not first and add_slash:
string += os.path.sep
string += force_unicode(s)
first = False
return string
def _add_os_path_join(module_context, start_leaf, bracket_start):
def check(maybe_bracket, nodes):
if maybe_bracket.start_pos != bracket_start:
return None
if not nodes:
return ''
context = module_context.create_context(nodes[0])
return _add_strings(context, nodes, add_slash=True) or ''
if start_leaf.type == 'error_leaf':
# Unfinished string literal, like `join('`
value_node = start_leaf.parent
index = value_node.children.index(start_leaf)
if index > 0:
error_node = value_node.children[index - 1]
if error_node.type == 'error_node' and len(error_node.children) >= 2:
index = -2
if error_node.children[-1].type == 'arglist':
arglist_nodes = error_node.children[-1].children
index -= 1
else:
arglist_nodes = []
return check(error_node.children[index + 1], arglist_nodes[::2])
return None
# Maybe an arglist or some weird error case. Therefore checked below.
searched_node_child = start_leaf
while searched_node_child.parent is not None \
and searched_node_child.parent.type not in ('arglist', 'trailer', 'error_node'):
searched_node_child = searched_node_child.parent
if searched_node_child.get_first_leaf() is not start_leaf:
return None
searched_node = searched_node_child.parent
if searched_node is None:
return None
index = searched_node.children.index(searched_node_child)
arglist_nodes = searched_node.children[:index]
if searched_node.type == 'arglist':
trailer = searched_node.parent
if trailer.type == 'error_node':
trailer_index = trailer.children.index(searched_node)
assert trailer_index >= 2
assert trailer.children[trailer_index - 1] == '('
return check(trailer.children[trailer_index - 1], arglist_nodes[::2])
elif trailer.type == 'trailer':
return check(trailer.children[0], arglist_nodes[::2])
elif searched_node.type == 'trailer':
return check(searched_node.children[0], [])
elif searched_node.type == 'error_node':
# Stuff like `join(""`
return check(arglist_nodes[-1], [])
|
sserrot/champion_relationships
|
venv/Lib/site-packages/jedi/api/file_name.py
|
Python
|
mit
| 5,707
|
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mssdk.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""engine.SCons.Tool.mssdk
Tool-specific initialization for Microsoft SDKs, both Platform
SDKs and Windows SDKs.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
from MSCommon import mssdk_exists, \
mssdk_setup_env
def generate(env):
"""Add construction variables for an MS SDK to an Environment."""
mssdk_setup_env(env)
def exists(env):
return mssdk_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
EmanueleCannizzaro/scons
|
src/engine/SCons/Tool/mssdk.py
|
Python
|
mit
| 1,834
|
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
from django.contrib import admin
from django.contrib.admin.sites import NotRegistered
from models import UserOpenidAssociation
class OpenIDInline(admin.StackedInline):
model = UserOpenidAssociation
class UserAdminWithOpenIDs(UserAdmin):
inlines = [OpenIDInline]
# Add OpenIDs to the user admin, but only if User has been registered
try:
admin.site.unregister(User)
admin.site.register(User, UserAdminWithOpenIDs)
except NotRegistered:
pass
#from models import Nonce, Association
#admin.site.register(Nonce)
#admin.site.register(Association)
|
indro/t2c
|
apps/external_apps/django_openid/admin.py
|
Python
|
mit
| 658
|
#!/usr/bin/python3
import sys, getpass, urllib.request, urllib.error, json
def mgmt(cmd, data=None, is_json=False):
# The base URL for the management daemon. (Listens on IPv4 only.)
mgmt_uri = 'http://127.0.0.1:10222'
setup_key_auth(mgmt_uri)
req = urllib.request.Request(mgmt_uri + cmd, urllib.parse.urlencode(data).encode("utf8") if data else None)
try:
response = urllib.request.urlopen(req)
except urllib.error.HTTPError as e:
if e.code == 401:
try:
print(e.read().decode("utf8"))
except:
pass
print("The management daemon refused access. The API key file may be out of sync. Try 'service mailinabox restart'.", file=sys.stderr)
elif hasattr(e, 'read'):
print(e.read().decode('utf8'), file=sys.stderr)
else:
print(e, file=sys.stderr)
sys.exit(1)
resp = response.read().decode('utf8')
if is_json: resp = json.loads(resp)
return resp
def read_password():
first = getpass.getpass('password: ')
second = getpass.getpass(' (again): ')
while first != second:
print('Passwords not the same. Try again.')
first = getpass.getpass('password: ')
second = getpass.getpass(' (again): ')
return first
def setup_key_auth(mgmt_uri):
key = open('/var/lib/mailinabox/api.key').read().strip()
auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password(
realm='Mail-in-a-Box Management Server',
uri=mgmt_uri,
user=key,
passwd='')
opener = urllib.request.build_opener(auth_handler)
urllib.request.install_opener(opener)
if len(sys.argv) < 2:
print("Usage: ")
print(" tools/mail.py user (lists users)")
print(" tools/mail.py user add user@domain.com [password]")
print(" tools/mail.py user password user@domain.com [password]")
print(" tools/mail.py user remove user@domain.com")
print(" tools/mail.py user make-admin user@domain.com")
print(" tools/mail.py user remove-admin user@domain.com")
print(" tools/mail.py user admins (lists admins)")
print(" tools/mail.py alias (lists aliases)")
print(" tools/mail.py alias add incoming.name@domain.com sent.to@other.domain.com")
print(" tools/mail.py alias add incoming.name@domain.com 'sent.to@other.domain.com, multiple.people@other.domain.com'")
print(" tools/mail.py alias remove incoming.name@domain.com")
print()
print("Removing a mail user does not delete their mail folders on disk. It only prevents IMAP/SMTP login.")
print()
elif sys.argv[1] == "user" and len(sys.argv) == 2:
# Dump a list of users, one per line. Mark admins with an asterisk.
users = mgmt("/mail/users?format=json", is_json=True)
for domain in users:
for user in domain["users"]:
if user['status'] == 'inactive': continue
print(user['email'], end='')
if "admin" in user['privileges']:
print("*", end='')
print()
elif sys.argv[1] == "user" and sys.argv[2] in ("add", "password"):
if len(sys.argv) < 5:
if len(sys.argv) < 4:
email = input("email: ")
else:
email = sys.argv[3]
pw = read_password()
else:
email, pw = sys.argv[3:5]
if sys.argv[2] == "add":
print(mgmt("/mail/users/add", { "email": email, "password": pw }))
elif sys.argv[2] == "password":
print(mgmt("/mail/users/password", { "email": email, "password": pw }))
elif sys.argv[1] == "user" and sys.argv[2] == "remove" and len(sys.argv) == 4:
print(mgmt("/mail/users/remove", { "email": sys.argv[3] }))
elif sys.argv[1] == "user" and sys.argv[2] in ("make-admin", "remove-admin") and len(sys.argv) == 4:
if sys.argv[2] == "make-admin":
action = "add"
else:
action = "remove"
print(mgmt("/mail/users/privileges/" + action, { "email": sys.argv[3], "privilege": "admin" }))
elif sys.argv[1] == "user" and sys.argv[2] == "admins":
# Dump a list of admin users.
users = mgmt("/mail/users?format=json", is_json=True)
for domain in users:
for user in domain["users"]:
if "admin" in user['privileges']:
print(user['email'])
elif sys.argv[1] == "alias" and len(sys.argv) == 2:
print(mgmt("/mail/aliases"))
elif sys.argv[1] == "alias" and sys.argv[2] == "add" and len(sys.argv) == 5:
print(mgmt("/mail/aliases/add", { "source": sys.argv[3], "destination": sys.argv[4] }))
elif sys.argv[1] == "alias" and sys.argv[2] == "remove" and len(sys.argv) == 4:
print(mgmt("/mail/aliases/remove", { "source": sys.argv[3] }))
else:
print("Invalid command-line arguments.")
sys.exit(1)
|
Toilal/mailinabox
|
tools/mail.py
|
Python
|
cc0-1.0
| 4,331
|
# flags.py
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <dlehman@redhat.com>
#
import shlex
import selinux
from .util import open # pylint: disable=redefined-builtin
class Flags(object):
def __init__(self):
#
# mode of operation
#
self.testing = False
self.installer_mode = False
#
# minor modes (installer-specific)
#
self.automated_install = False
self.live_install = False
self.image_install = False
#
# enable/disable functionality
#
self.selinux = selinux.is_selinux_enabled()
self.multipath = True
self.dmraid = True
self.ibft = True
self.noiswmd = False
self.gfs2 = True
self.jfs = True
self.reiserfs = True
self.arm_platform = None
self.gpt = False
self.multipath_friendly_names = True
# set to False to suppress the default LVM behavior of saving
# backup metadata in /etc/lvm/{archive,backup}
self.lvm_metadata_backup = True
# whether to include nodev filesystems in the devicetree (only
# meaningful when flags.installer_mode is False)
self.include_nodev = False
self.boot_cmdline = {}
self.update_from_boot_cmdline()
self.allow_imperfect_devices = True
def get_boot_cmdline(self):
buf = open("/proc/cmdline").read().strip()
args = shlex.split(buf)
for arg in args:
(opt, _equals, val) = arg.partition("=")
if val:
self.boot_cmdline[opt] = val
def update_from_boot_cmdline(self):
self.get_boot_cmdline()
if "nompath" in self.boot_cmdline:
self.multipath = False
if "nodmraid" in self.boot_cmdline:
self.dmraid = False
if "noiswmd" in self.boot_cmdline:
self.noiswmd = True
def update_from_anaconda_flags(self, anaconda_flags):
self.installer_mode = True
self.testing = anaconda_flags.testing
self.automated_install = anaconda_flags.automatedInstall
self.live_install = anaconda_flags.livecdInstall
self.image_install = anaconda_flags.imageInstall
self.selinux = anaconda_flags.selinux
self.gfs2 = "gfs2" in self.boot_cmdline
self.jfs = "jfs" in self.boot_cmdline
self.reiserfs = "reiserfs" in self.boot_cmdline
self.arm_platform = anaconda_flags.armPlatform
self.gpt = anaconda_flags.gpt
self.multipath_friendly_names = anaconda_flags.mpathFriendlyNames
self.allow_imperfect_devices = anaconda_flags.rescue_mode
self.ibft = anaconda_flags.ibft
self.dmraid = anaconda_flags.dmraid
# We don't want image installs writing backups of the *image* metadata
# into the *host's* /etc/lvm. This can get real messy on build systems.
if self.image_install:
self.lvm_metadata_backup = False
flags = Flags()
|
kellinm/blivet
|
blivet/flags.py
|
Python
|
gpl-2.0
| 3,954
|
import os
from functools import partial
from PyQt4.QtGui import QWidget
from PyQt4.QtCore import Qt
from qgis.core import QgsMapLayer
from qgis.gui import QgsExpressionBuilderDialog
from roam.api.utils import layer_by_name
from configmanager.models import QgsLayerModel, QgsFieldModel
from configmanager.editorwidgets.core import ConfigWidget
from configmanager.editorwidgets.uifiles.ui_listwidget_config import Ui_Form
class ListWidgetConfig(Ui_Form, ConfigWidget):
description = 'Select an item from a predefined list'
def __init__(self, parent=None):
super(ListWidgetConfig, self).__init__(parent)
self.setupUi(self)
self.allownull = False
self.orderby = False
self.orderbyCheck.hide()
self.layerRadio.clicked.connect(partial(self.stackedWidget.setCurrentIndex, 0))
self.listRadio.clicked.connect(partial(self.stackedWidget.setCurrentIndex, 1))
self.layermodel = QgsLayerModel(watchregistry=False)
self.layermodel.layerfilter = [QgsMapLayer.VectorLayer]
self.fieldmodel = QgsFieldModel()
self.blockSignals(True)
self.layerCombo.setModel(self.layermodel)
self.keyCombo.setModel(self.fieldmodel)
self.valueCombo.setModel(self.fieldmodel)
self.filterButton.pressed.connect(self.define_filter)
self.fieldmodel.setLayerFilter(self.layerCombo.view().selectionModel())
self.reset()
self.blockSignals(False)
def define_filter(self):
layer = self.layerCombo.currentText()
if not layer:
return
layer = layer_by_name(layer)
dlg = QgsExpressionBuilderDialog(layer, "List filter", self)
text = self.filterText.toPlainText()
dlg.setExpressionText(text)
if dlg.exec_():
self.filterText.setPlainText(dlg.expressionText())
def reset(self):
self.listtype = 'layer'
self.listText.setPlainText('')
self.orderby = False
self.allownull = False
self.filterText.setPlainText('')
self.layerCombo.setCurrentIndex(-1)
self.keyCombo.setCurrentIndex(-1)
self.valueCombo.setCurrentIndex(-1)
def widgetchanged(self):
self.widgetdirty.emit(self.getconfig())
@property
def allownull(self):
return self.allownullCheck.isChecked()
@allownull.setter
def allownull(self, value):
self.allownullCheck.setChecked(value)
@property
def orderby(self):
return self.orderbyCheck.isChecked()
@orderby.setter
def orderby(self, value):
self.orderbyCheck.setChecked(value)
@property
def list(self):
return [item for item in self.listText.toPlainText().split('\n')]
@property
def filter(self):
return self.filterText.toPlainText()
@property
def layer(self):
return self.layerCombo.currentText()
@property
def key(self):
index_key = self.fieldmodel.index(self.keyCombo.currentIndex(), 0)
fieldname_key = self.fieldmodel.data(index_key, QgsFieldModel.FieldNameRole)
return fieldname_key
@property
def value(self):
index_value = self.fieldmodel.index(self.valueCombo.currentIndex(), 0)
return self.fieldmodel.data(index_value, QgsFieldModel.FieldNameRole)
def getconfig(self):
config = {}
config['allownull'] = self.allownull
config['orderbyvalue'] = self.orderby
if self.layerRadio.isChecked():
subconfig = {}
# TODO Grab the data here and not just the text
subconfig['layer'] = self.layer
subconfig['key'] = self.key
subconfig['value'] = self.value
subconfig['filter'] = self.filter
config['layer'] = subconfig
else:
config['list'] = {}
config['list']['items'] = self.list
return config
def blockSignals(self, bool):
for child in self.findChildren(QWidget):
child.blockSignals(bool)
super(ListWidgetConfig, self).blockSignals(bool)
def setconfig(self, config):
self.blockSignals(True)
self.allownull = config.get('allownull', True)
self.orderby = config.get('orderbyvalue', False)
#Clear the widgets
self.listText.setPlainText('')
self.keyCombo.clear()
self.valueCombo.clear()
self.filterText.clear()
self.layermodel.refresh()
# Rebind all the values
if 'list' in config:
subconfig = config.get('list', {})
self.listRadio.setChecked(True)
self.stackedWidget.setCurrentIndex(1)
listitems = subconfig.get('items', [])
itemtext = '\n'.join(listitems)
self.listText.setPlainText(itemtext)
else:
self.layerRadio.setChecked(True)
self.stackedWidget.setCurrentIndex(0)
subconfig = config.get('layer', {})
layer = subconfig.get('layer', '') or ''
key = subconfig.get('key', '') or ''
value = subconfig.get('value', '') or ''
filter = subconfig.get('filter', None)
index = self.layerCombo.findData(layer, Qt.DisplayRole)
if index > -1:
self.layerCombo.setCurrentIndex(index)
index = self.layermodel.index(index, 0)
self.fieldmodel.updateLayer(index, None)
keyindex = self.keyCombo.findData(key.lower(), QgsFieldModel.FieldNameRole)
if keyindex > -1:
self.keyCombo.setCurrentIndex(keyindex)
valueindex = self.valueCombo.findData(value.lower(), QgsFieldModel.FieldNameRole)
if valueindex > -1:
self.valueCombo.setCurrentIndex(valueindex)
self.filterText.setPlainText(filter)
self.allownullCheck.setChecked(self.allownull)
self.orderbyCheck.setChecked(self.orderby)
self.blockSignals(False)
|
lmotta/Roam
|
src/configmanager/editorwidgets/listwidget.py
|
Python
|
gpl-2.0
| 5,997
|
# -*- coding: utf-8 -*-
#
# test documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 26 00:00:43 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sys, os
# To change default code-block format in Latex to footnotesize (8pt)
# Tip from https://stackoverflow.com/questions/9899283/how-do-you-change-the-code-example-font-size-in-latex-pdf-output-with-sphinx/9955928
# Note: sizes are \footnotesize (8pt), \small (9pt), and \normalsize (10pt).
#from sphinx.highlighting import PygmentsBridge
#from pygments.formatters.latex import LatexFormatter
#
#class CustomLatexFormatter(LatexFormatter):
# def __init__(self, **options):
# super(CustomLatexFormatter, self).__init__(**options)
# self.verboptions = r"formatcom=\footnotesize"
#
#PygmentsBridge.latex_formatter = CustomLatexFormatter
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.imgmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ns-3 project'
copyright = u'2006-2019'
#author = u'test'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'ns-3-dev'
# The full version, including alpha/beta/rc tags.
release = u'ns-3-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# These patterns also affect html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'ns3_html_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../..']
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'est vtest'
html_title = 'Manual'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
html_last_updated_fmt = '%b %d, %Y %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# VerbatimBorderColor: make the box around code samples blend into the background
# Tip from https://stackoverflow.com/questions/29403100/how-to-remove-the-box-around-the-code-block-in-restructuredtext-with-sphinx
#
# sphinxcode is the wrapper around \texttt that sphinx.sty provides.
# Redefine it here as needed to change the inline literal font size
# (double backquotes) to either \footnotesize (8pt) or \small (9pt)
#
# See above to change the font size of verbatim code blocks
#
# 'preamble': '',
'preamble': u'''\\usepackage{amssymb}
\\definecolor{VerbatimBorderColor}{rgb}{1,1,1}
\\renewcommand{\\sphinxcode}[1]{\\texttt{\\small{#1}}}
'''
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ns-3-manual.tex', u'ns-3 Manual',
u'ns-3 project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
latex_logo = '../../ns3_html_theme/static/ns-3.png'
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, will not define \strong, \code, \titleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... to help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-manual', u'ns-3 Manual',
[u'ns-3 project'], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for texinfo output ---------------------------------------
#texinfo_documents = [
# (master_doc, 'test', u'test Documentation',
# author, 'test', 'One line description of project.',
# 'Miscellaneous'),
#]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
|
hanyassasa87/ns3-802.11ad
|
doc/manual/source/conf.py
|
Python
|
gpl-2.0
| 10,879
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Batch Uploader core functions. Uploading metadata and documents.
"""
import os
import pwd
import grp
import sys
import time
import tempfile
import cgi
import re
from invenio.dbquery import run_sql, Error
from invenio.access_control_engine import acc_authorize_action
from invenio.webuser import collect_user_info, page_not_authorized
from invenio.config import CFG_BINDIR, CFG_TMPSHAREDDIR, CFG_LOGDIR, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \
CFG_OAI_ID_FIELD, CFG_BATCHUPLOADER_DAEMON_DIR, \
CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS, \
CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS, \
CFG_PREFIX, CFG_SITE_LANG
from invenio.textutils import encode_for_xml
from invenio.bibtask import task_low_level_submission
from invenio.messages import gettext_set_language
from invenio.textmarc2xmlmarc import transform_file
from invenio.shellutils import run_shell_command
from invenio.bibupload import xml_marc_to_records, bibupload
import invenio.bibupload as bibupload_module
from invenio.bibrecord import create_records, \
record_strip_empty_volatile_subfields, \
record_strip_empty_fields
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PERMITTED_MODES = ['-i', '-r', '-c', '-a', '-ir',
'--insert', '--replace', '--correct', '--append']
_CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS_RE = re.compile(CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS)
def cli_allocate_record(req):
req.content_type = "text/plain"
req.send_http_header()
# check IP and useragent:
if not _check_client_ip(req):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
return _write(req, msg)
if not _check_client_useragent(req):
msg = '[ERROR] Sorry, the "%s" useragent cannot use the service.' % _get_useragent(req)
_log(msg)
return _write(req, msg)
recid = run_sql("insert into bibrec (creation_date,modification_date) values(NOW(),NOW())")
return recid
def cli_upload(req, file_content=None, mode=None, callback_url=None, nonce=None, special_treatment=None):
""" Robot interface for uploading MARC files
"""
req.content_type = "text/plain"
req.send_http_header()
# check IP and useragent:
if not _check_client_ip(req):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
return _write(req, msg)
if not _check_client_useragent(req):
msg = "[ERROR] Sorry, the %s useragent cannot use the service." % _get_useragent(req)
_log(msg)
return _write(req, msg)
arg_mode = mode
if not arg_mode:
msg = "[ERROR] Please specify upload mode to use."
_log(msg)
return _write(req, msg)
if not arg_mode in PERMITTED_MODES:
msg = "[ERROR] Invalid upload mode."
_log(msg)
return _write(req, msg)
arg_file = file_content
if hasattr(arg_file, 'read'):
## We've been passed a readable file, e.g. req
arg_file = arg_file.read()
if not arg_file:
msg = "[ERROR] Please provide a body to your request."
_log(msg)
return _write(req, msg)
else:
if not arg_file:
msg = "[ERROR] Please specify file body to input."
_log(msg)
return _write(req, msg)
if hasattr(arg_file, "filename"):
arg_file = arg_file.value
else:
msg = "[ERROR] 'file' parameter must be a (single) file"
_log(msg)
return _write(req, msg)
# write temporary file:
tempfile.tempdir = CFG_TMPSHAREDDIR
filename = tempfile.mktemp(prefix="batchupload_" + \
time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_")
filedesc = open(filename, 'w')
filedesc.write(arg_file)
filedesc.close()
# check if this client can run this file:
client_ip = _get_client_ip(req)
permitted_dbcollids = CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]
if permitted_dbcollids != ['*']: # wildcard
allow = _check_client_can_submit_file(client_ip, filename, req, 0)
if not allow:
msg = "[ERROR] Cannot submit such a file from this IP. (Wrong collection.)"
_log(msg)
return _write(req, msg)
# check validity of marcxml
xmlmarclint_path = CFG_BINDIR + '/xmlmarclint'
xmlmarclint_output, dummy1, dummy2 = run_shell_command('%s %s' % (xmlmarclint_path, filename))
if xmlmarclint_output != 0:
msg = "[ERROR] MARCXML is not valid."
_log(msg)
return _write(req, msg)
args = ['bibupload', "batchupload", arg_mode, filename]
# run upload command
if callback_url:
args += ["--callback-url", callback_url]
if nonce:
args += ["--nonce", nonce]
if special_treatment:
args += ["--special-treatment", special_treatment]
task_low_level_submission(*args)
msg = "[INFO] %s" % ' '.join(args)
_log(msg)
return _write(req, msg)
def metadata_upload(req, metafile=None, filetype=None, mode=None, exec_date=None,
exec_time=None, metafilename=None, ln=CFG_SITE_LANG,
priority="1", email_logs_to=None):
"""
Metadata web upload service. Get upload parameters and exec bibupload for the given file.
Finally, write upload history.
@return: tuple (error code, message)
error code: code that indicates if an error ocurred
message: message describing the error
"""
# start output:
req.content_type = "text/html"
req.send_http_header()
error_codes = {'not_authorized': 1}
# write temporary file:
if filetype != 'marcxml':
metafile = _transform_input_to_marcxml(file_input=metafile)
user_info = collect_user_info(req)
tempfile.tempdir = CFG_TMPSHAREDDIR
filename = tempfile.mktemp(prefix="batchupload_" + \
user_info['nickname'] + "_" + time.strftime("%Y%m%d%H%M%S",
time.localtime()) + "_")
filedesc = open(filename, 'w')
filedesc.write(metafile)
filedesc.close()
# check if this client can run this file:
if req is not None:
allow = _check_client_can_submit_file(req=req, metafile=metafile, webupload=1, ln=ln)
if allow[0] != 0:
return (error_codes['not_authorized'], allow[1])
# run upload command:
task_arguments = ('bibupload', user_info['nickname'], mode, "--name=" + metafilename, "--priority=" + priority)
if exec_date:
date = exec_date
if exec_time:
date += ' ' + exec_time
task_arguments += ("-t", date)
if email_logs_to:
task_arguments += ('--email-logs-to', email_logs_to)
task_arguments += (filename, )
jobid = task_low_level_submission(*task_arguments)
# write batch upload history
run_sql("""INSERT INTO hstBATCHUPLOAD (user, submitdate,
filename, execdate, id_schTASK, batch_mode)
VALUES (%s, NOW(), %s, %s, %s, "metadata")""",
(user_info['nickname'], metafilename,
exec_date != "" and (exec_date + ' ' + exec_time)
or time.strftime("%Y-%m-%d %H:%M:%S"), str(jobid), ))
return (0, "Task %s queued" % str(jobid))
def document_upload(req=None, folder="", matching="", mode="", exec_date="", exec_time="", ln=CFG_SITE_LANG, priority="1", email_logs_to=None):
""" Take files from the given directory and upload them with the appropiate mode.
@parameters:
+ folder: Folder where the files to upload are stored
+ matching: How to match file names with record fields (report number, barcode,...)
+ mode: Upload mode (append, revise, replace)
@return: tuple (file, error code)
file: file name causing the error to notify the user
error code:
1 - More than one possible recID, ambiguous behaviour
2 - No records match that file name
3 - File already exists
"""
import sys
if sys.hexversion < 0x2060000:
from md5 import md5
else:
from hashlib import md5
from invenio.bibdocfile import BibRecDocs, file_strip_ext
import shutil
from invenio.search_engine import perform_request_search, \
search_pattern, \
guess_collection_of_a_record
_ = gettext_set_language(ln)
errors = []
info = [0, []] # Number of files read, name of the files
try:
files = os.listdir(folder)
except OSError, error:
errors.append(("", error))
return errors, info
err_desc = {1: _("More than one possible recID, ambiguous behaviour"), 2: _("No records match that file name"),
3: _("File already exists"), 4: _("A file with the same name and format already exists"),
5: _("No rights to upload to collection '%s'")}
# Create directory DONE/ if doesn't exist
folder = (folder[-1] == "/") and folder or (folder + "/")
files_done_dir = folder + "DONE/"
try:
os.mkdir(files_done_dir)
except OSError:
# Directory exists or no write permission
pass
for docfile in files:
if os.path.isfile(os.path.join(folder, docfile)):
info[0] += 1
identifier = file_strip_ext(docfile)
extension = docfile[len(identifier):]
rec_id = None
if identifier:
rec_id = search_pattern(p=identifier, f=matching, m='e')
if not rec_id:
errors.append((docfile, err_desc[2]))
continue
elif len(rec_id) > 1:
errors.append((docfile, err_desc[1]))
continue
else:
rec_id = str(list(rec_id)[0])
rec_info = BibRecDocs(rec_id)
if rec_info.bibdocs:
for bibdoc in rec_info.bibdocs:
attached_files = bibdoc.list_all_files()
file_md5 = md5(open(os.path.join(folder, docfile), "rb").read()).hexdigest()
num_errors = len(errors)
for attached_file in attached_files:
if attached_file.checksum == file_md5:
errors.append((docfile, err_desc[3]))
break
elif attached_file.get_full_name() == docfile:
errors.append((docfile, err_desc[4]))
break
if len(errors) > num_errors:
continue
# Check if user has rights to upload file
if req is not None:
file_collection = guess_collection_of_a_record(int(rec_id))
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=file_collection)
if auth_code != 0:
error_msg = err_desc[5] % file_collection
errors.append((docfile, error_msg))
continue
tempfile.tempdir = CFG_TMPSHAREDDIR
# Move document to be uploaded to temporary folder
tmp_file = tempfile.mktemp(prefix=identifier + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_", suffix=extension)
shutil.copy(os.path.join(folder, docfile), tmp_file)
# Create MARC temporary file with FFT tag and call bibupload
filename = tempfile.mktemp(prefix=identifier + '_')
filedesc = open(filename, 'w')
marc_content = """ <record>
<controlfield tag="001">%(rec_id)s</controlfield>
<datafield tag="FFT" ind1=" " ind2=" ">
<subfield code="n">%(name)s</subfield>
<subfield code="a">%(path)s</subfield>
</datafield>
</record> """ % {'rec_id': rec_id,
'name': encode_for_xml(identifier),
'path': encode_for_xml(tmp_file),
}
filedesc.write(marc_content)
filedesc.close()
info[1].append(docfile)
user = ""
if req is not None:
user_info = collect_user_info(req)
user = user_info['nickname']
if not user:
user = "batchupload"
# Execute bibupload with the appropiate mode
task_arguments = ('bibupload', user, "--" + mode, "--name=" + docfile, "--priority=" + priority)
if exec_date:
date = '--runtime=' + "\'" + exec_date + ' ' + exec_time + "\'"
task_arguments += (date, )
if email_logs_to:
task_arguments += ("--email-logs-to", email_logs_to)
task_arguments += (filename, )
jobid = task_low_level_submission(*task_arguments)
# write batch upload history
run_sql("""INSERT INTO hstBATCHUPLOAD (user, submitdate,
filename, execdate, id_schTASK, batch_mode)
VALUES (%s, NOW(), %s, %s, %s, "document")""",
(user_info['nickname'], docfile,
exec_date != "" and (exec_date + ' ' + exec_time)
or time.strftime("%Y-%m-%d %H:%M:%S"), str(jobid)))
# Move file to DONE folder
done_filename = docfile + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_" + str(jobid)
try:
os.rename(os.path.join(folder, docfile), os.path.join(files_done_dir, done_filename))
except OSError:
errors.append('MoveError')
return errors, info
def get_user_metadata_uploads(req):
"""Retrieve all metadata upload history information for a given user"""
user_info = collect_user_info(req)
upload_list = run_sql("""SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
s.status \
FROM hstBATCHUPLOAD h INNER JOIN schTASK s \
ON h.id_schTASK = s.id \
WHERE h.user=%s and h.batch_mode="metadata"
ORDER BY h.submitdate DESC""", (user_info['nickname'],))
return upload_list
def get_user_document_uploads(req):
"""Retrieve all document upload history information for a given user"""
user_info = collect_user_info(req)
upload_list = run_sql("""SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
s.status \
FROM hstBATCHUPLOAD h INNER JOIN schTASK s \
ON h.id_schTASK = s.id \
WHERE h.user=%s and h.batch_mode="document"
ORDER BY h.submitdate DESC""", (user_info['nickname'],))
return upload_list
def get_daemon_doc_files():
""" Return all files found in batchuploader document folders """
files = {}
for folder in ['/revise', '/append']:
try:
daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \
or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR
directory = daemon_dir + '/documents' + folder
files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
for file_instance, info in files[directory]:
stat_info = os.lstat(os.path.join(directory, file_instance))
info.append("%s" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner
info.append("%s" % grp.getgrgid(stat_info.st_gid)[0]) # Group
info.append("%d" % stat_info.st_size) # Size
time_stat = stat_info.st_mtime
time_fmt = "%Y-%m-%d %R"
info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified
except OSError:
pass
return files
def get_daemon_meta_files():
""" Return all files found in batchuploader metadata folders """
files = {}
for folder in ['/correct', '/replace', '/insert', '/append']:
try:
daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \
or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR
directory = daemon_dir + '/metadata' + folder
files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
for file_instance, info in files[directory]:
stat_info = os.lstat(os.path.join(directory, file_instance))
info.append("%s" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner
info.append("%s" % grp.getgrgid(stat_info.st_gid)[0]) # Group
info.append("%d" % stat_info.st_size) # Size
time_stat = stat_info.st_mtime
time_fmt = "%Y-%m-%d %R"
info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified
except OSError:
pass
return files
def user_authorization(req, ln):
""" Check user authorization to visit page """
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader')
if auth_code != 0:
referer = '/batchuploader/'
return page_not_authorized(req=req, referer=referer,
text=auth_message, navmenuid="batchuploader")
else:
return None
def perform_basic_upload_checks(xml_record):
""" Performs tests that would provoke the bibupload task to fail with
an exit status 1, to prevent batchupload from crashing while alarming
the user wabout the issue
"""
from invenio.bibupload import writing_rights_p
errors = []
if not writing_rights_p():
errors.append("Error: BibUpload does not have rights to write fulltext files.")
recs = create_records(xml_record, 1, 1)
if recs == []:
errors.append("Error: Cannot parse MARCXML file.")
elif recs[0][0] is None:
errors.append("Error: MARCXML file has wrong format: %s" % recs)
return errors
def perform_upload_check(xml_record, mode):
""" Performs a upload simulation with the given record and mode
@return: string describing errors
@rtype: string
"""
error_cache = []
def my_writer(msg, stream=sys.stdout, verbose=1):
if verbose == 1:
if 'DONE' not in msg:
error_cache.append(msg.strip())
orig_writer = bibupload_module.write_message
bibupload_module.write_message = my_writer
error_cache.extend(perform_basic_upload_checks(xml_record))
if error_cache:
# There has been some critical error
return '\n'.join(error_cache)
recs = xml_marc_to_records(xml_record)
try:
upload_mode = mode[2:]
# Adapt input data for bibupload function
if upload_mode == "r insert-or-replace":
upload_mode = "replace_or_insert"
for record in recs:
if record:
record_strip_empty_volatile_subfields(record)
record_strip_empty_fields(record)
bibupload(record, opt_mode=upload_mode, pretend=True)
finally:
bibupload_module.write_message = orig_writer
return '\n'.join(error_cache)
def _get_useragent(req):
"""Return client user agent from req object."""
user_info = collect_user_info(req)
return user_info['agent']
def _get_client_ip(req):
"""Return client IP address from req object."""
return str(req.remote_ip)
def _check_client_ip(req):
"""
Is this client permitted to use the service?
"""
client_ip = _get_client_ip(req)
if client_ip in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS.keys():
return True
return False
def _check_client_useragent(req):
"""
Is this user agent permitted to use the service?
"""
client_useragent = _get_useragent(req)
if _CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS_RE.match(client_useragent):
return True
return False
def _check_client_can_submit_file(client_ip="", metafile="", req=None, webupload=0, ln=CFG_SITE_LANG):
"""
Is this client able to upload such a FILENAME?
check 980 $a values and collection tags in the file to see if they are among the
permitted ones as specified by CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS and ACC_AUTHORIZE_ACTION.
Useful to make sure that the client does not override other records by
mistake.
"""
_ = gettext_set_language(ln)
recs = create_records(metafile, 0, 0)
user_info = collect_user_info(req)
filename_tag980_values = _detect_980_values_from_marcxml_file(recs)
for filename_tag980_value in filename_tag980_values:
if not filename_tag980_value:
if not webupload:
return False
else:
return(1, "Invalid collection in tag 980")
if not webupload:
if not filename_tag980_value in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]:
return False
else:
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_tag980_value)
if auth_code != 0:
error_msg = _("The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'") % \
{'x_user': user_info['nickname'], 'x_coll': filename_tag980_value}
return (auth_code, error_msg)
filename_rec_id_collections = _detect_collections_from_marcxml_file(recs)
for filename_rec_id_collection in filename_rec_id_collections:
if not webupload:
if not filename_rec_id_collection in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]:
return False
else:
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_rec_id_collection)
if auth_code != 0:
error_msg = _("The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'") % \
{'x_user': user_info['nickname'], 'x_coll': filename_rec_id_collection}
return (auth_code, error_msg)
if not webupload:
return True
else:
return (0, " ")
def _detect_980_values_from_marcxml_file(recs):
"""
Read MARCXML file and return list of 980 $a values found in that file.
Useful for checking rights.
"""
from invenio.bibrecord import record_get_field_values
collection_tag = run_sql("SELECT value FROM tag, field_tag, field \
WHERE tag.id=field_tag.id_tag AND \
field_tag.id_field=field.id AND \
field.code='collection'")
collection_tag = collection_tag[0][0]
dbcollids = {}
for rec, dummy1, dummy2 in recs:
if rec:
for tag980 in record_get_field_values(rec,
tag=collection_tag[:3],
ind1=collection_tag[3],
ind2=collection_tag[4],
code=collection_tag[5]):
dbcollids[tag980] = 1
return dbcollids.keys()
def _detect_collections_from_marcxml_file(recs):
"""
Extract all possible recIDs from MARCXML file and guess collections
for these recIDs.
"""
from invenio.bibrecord import record_get_field_values
from invenio.search_engine import guess_collection_of_a_record
from invenio.bibupload import find_record_from_sysno, \
find_records_from_extoaiid, \
find_record_from_oaiid
dbcollids = {}
sysno_tag = CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG
oaiid_tag = CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG
oai_tag = CFG_OAI_ID_FIELD
for rec, dummy1, dummy2 in recs:
if rec:
for tag001 in record_get_field_values(rec, '001'):
collection = guess_collection_of_a_record(int(tag001))
dbcollids[collection] = 1
for tag_sysno in record_get_field_values(rec, tag=sysno_tag[:3],
ind1=sysno_tag[3],
ind2=sysno_tag[4],
code=sysno_tag[5]):
record = find_record_from_sysno(tag_sysno)
if record:
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
for tag_oaiid in record_get_field_values(rec, tag=oaiid_tag[:3],
ind1=oaiid_tag[3],
ind2=oaiid_tag[4],
code=oaiid_tag[5]):
try:
records = find_records_from_extoaiid(tag_oaiid)
except Error:
records = []
if records:
record = records.pop()
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
for tag_oai in record_get_field_values(rec, tag=oai_tag[0:3],
ind1=oai_tag[3],
ind2=oai_tag[4],
code=oai_tag[5]):
record = find_record_from_oaiid(tag_oai)
if record:
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
return dbcollids.keys()
def _transform_input_to_marcxml(file_input=""):
"""
Takes text-marc as input and transforms it
to MARCXML.
"""
# Create temporary file to read from
tmp_fd, filename = tempfile.mkstemp(dir=CFG_TMPSHAREDDIR)
os.write(tmp_fd, file_input)
os.close(tmp_fd)
try:
# Redirect output, transform, restore old references
old_stdout = sys.stdout
new_stdout = StringIO()
sys.stdout = new_stdout
transform_file(filename)
finally:
sys.stdout = old_stdout
return new_stdout.getvalue()
def _log(msg, logfile="webupload.log"):
"""
Log MSG into LOGFILE with timestamp.
"""
filedesc = open(CFG_LOGDIR + "/" + logfile, "a")
filedesc.write(time.strftime("%Y-%m-%d %H:%M:%S") + " --> " + msg + "\n")
filedesc.close()
return
def _write(req, msg):
"""
Write MSG to the output stream for the end user.
"""
req.write(msg + "\n")
return
|
AlbertoPeon/invenio
|
modules/bibupload/lib/batchuploader_engine.py
|
Python
|
gpl-2.0
| 28,252
|
# -*- coding: utf-8 -*-
#
# This file is part of the Shiboken Python Bindings Generator project.
#
# Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
#
# Contact: PySide team <contact@pyside.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# version 2.1 as published by the Free Software Foundation. Please
# review the following information to ensure the GNU Lesser General
# Public License version 2.1 requirements will be met:
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
# #
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
import sys
IS_PY3K = sys.version_info[0] == 3
if IS_PY3K:
def unicode(s):
return s
def b(s):
return bytes(s, "UTF8")
def l(n):
return n
long = int
else:
def b(s):
return s
def l(n):
return long(n)
unicode = unicode
long = long
|
codewarrior0/Shiboken
|
tests/py3kcompat.py
|
Python
|
gpl-2.0
| 1,372
|
# Copyright 2004-2005 Joe Wreschnig, Michael Urman, Iñigo Serna
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import re
import os
from gi.repository import Gtk
from senf import fsn2text
import quodlibet
from quodlibet import _
from quodlibet import config
from quodlibet import qltk
from quodlibet import util
from quodlibet.formats import AudioFileError
from quodlibet.plugins import PluginManager
from quodlibet.qltk._editutils import FilterPluginBox, FilterCheckButton
from quodlibet.qltk._editutils import EditingPluginHandler, OverwriteWarning
from quodlibet.qltk._editutils import WriteFailedError
from quodlibet.qltk.wlw import WritingWindow
from quodlibet.qltk.views import TreeViewColumn
from quodlibet.qltk.cbes import ComboBoxEntrySave
from quodlibet.qltk.models import ObjectStore
from quodlibet.qltk import Icons
from quodlibet.util.tagsfrompath import TagsFromPattern
from quodlibet.util.string.splitters import split_value
from quodlibet.util import connect_obj
from quodlibet.plugins.editing import TagsFromPathPlugin
TBP = os.path.join(quodlibet.get_user_dir(), "lists", "tagpatterns")
TBP_EXAMPLES = """\
<tracknumber>. <title>
<tracknumber> - <title>
<tracknumber> - <artist> - <title>
<artist> - <album>/<tracknumber>. <title>
<artist>/<album>/<tracknumber> - <title>"""
class UnderscoresToSpaces(FilterCheckButton):
_label = _("Replace _underscores with spaces")
_section = "tagsfrompath"
_key = "underscores"
_order = 1.0
def filter(self, tag, value):
return value.replace("_", " ")
class TitleCase(FilterCheckButton):
_label = _("_Title-case tags")
_section = "tagsfrompath"
_key = "titlecase"
_order = 1.1
def filter(self, tag, value):
return util.title(value)
class SplitTag(FilterCheckButton):
_label = _("Split into multiple _values")
_section = "tagsfrompath"
_key = "split"
_order = 1.2
def filter(self, tag, value):
spls = config.gettext("editing", "split_on")
spls = spls.split()
return "\n".join(split_value(value, spls))
class TagsFromPathPluginHandler(EditingPluginHandler):
Kind = TagsFromPathPlugin
class ListEntry:
def __init__(self, song):
self.song = song
self.matches = {}
def get_match(self, key):
return self.matches.get(key, u"")
def replace_match(self, key, value):
self.matches[key] = value
@property
def name(self):
return fsn2text(self.song("~basename"))
class TagsFromPath(Gtk.VBox):
title = _("Tags From Path")
FILTERS = [UnderscoresToSpaces, TitleCase, SplitTag]
handler = TagsFromPathPluginHandler()
@classmethod
def init_plugins(cls):
PluginManager.instance.register_handler(cls.handler)
def __init__(self, parent, library):
super().__init__(spacing=6)
self.set_border_width(12)
hbox = Gtk.HBox(spacing=6)
cbes_defaults = TBP_EXAMPLES.split("\n")
self.combo = ComboBoxEntrySave(TBP, cbes_defaults,
title=_("Path Patterns"),
edit_title=_(u"Edit saved patterns…"))
self.combo.show_all()
hbox.pack_start(self.combo, True, True, 0)
self.preview = qltk.Button(_("_Preview"), Icons.VIEW_REFRESH)
self.preview.show()
hbox.pack_start(self.preview, False, True, 0)
self.pack_start(hbox, False, True, 0)
self.combo.get_child().connect('changed', self._changed)
model = ObjectStore()
self.view = Gtk.TreeView(model=model)
self.view.show()
sw = Gtk.ScrolledWindow()
sw.set_shadow_type(Gtk.ShadowType.IN)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
sw.add(self.view)
self.pack_start(sw, True, True, 0)
vbox = Gtk.VBox()
addreplace = Gtk.ComboBoxText()
addreplace.append_text(_("Tags replace existing ones"))
addreplace.append_text(_("Tags are added to existing ones"))
addreplace.set_active(config.getboolean("tagsfrompath", "add"))
addreplace.connect('changed', self.__add_changed)
vbox.pack_start(addreplace, True, True, 0)
addreplace.show()
self.pack_start(vbox, False, True, 0)
filter_box = FilterPluginBox(self.handler, self.FILTERS)
filter_box.connect("preview", self.__filter_preview)
filter_box.connect("changed", self.__filter_changed)
self.filter_box = filter_box
self.pack_start(filter_box, False, True, 0)
# Save button
self.save = qltk.Button(_("_Save"), Icons.DOCUMENT_SAVE)
self.save.show()
bbox = Gtk.HButtonBox()
bbox.set_layout(Gtk.ButtonBoxStyle.END)
bbox.pack_start(self.save, True, True, 0)
self.pack_start(bbox, False, True, 0)
connect_obj(self.preview, 'clicked', self.__preview, None)
connect_obj(parent, 'changed', self.__class__.__preview, self)
# Save changes
connect_obj(self.save, 'clicked', self.__save, addreplace, library)
for child in self.get_children():
child.show()
def __filter_preview(self, *args):
Gtk.Button.clicked(self.preview)
def __filter_changed(self, *args):
self._changed(self.combo.get_child())
def _changed(self, entry):
self.save.set_sensitive(False)
self.preview.set_sensitive(bool(entry.get_text()))
def __add_changed(self, combo):
config.set("tagsfrompath", "add", str(bool(combo.get_active())))
def __preview(self, songs):
if songs is None:
songs = [row[0].song for row in (self.view.get_model() or [])]
if songs:
pattern_text = self.combo.get_child().get_text()
else:
pattern_text = ""
try:
pattern = TagsFromPattern(pattern_text)
except re.error:
qltk.ErrorMessage(
self, _("Invalid pattern"),
_("The pattern\n\t<b>%s</b>\nis invalid. "
"Possibly it contains the same tag twice or "
"it has unbalanced brackets (< / >).") % (
util.escape(pattern_text))).run()
return
else:
if pattern_text:
self.combo.prepend_text(pattern_text)
self.combo.write(TBP)
invalid = []
for header in pattern.headers:
if not min([song.can_change(header) for song in songs]):
invalid.append(header)
if len(invalid) and songs:
if len(invalid) == 1:
title = _("Invalid tag")
msg = _("Invalid tag <b>%s</b>\n\nThe files currently"
" selected do not support editing this tag.")
else:
title = _("Invalid tags")
msg = _("Invalid tags <b>%s</b>\n\nThe files currently"
" selected do not support editing these tags.")
qltk.ErrorMessage(
self, title, msg % ", ".join(invalid)).run()
pattern = TagsFromPattern("")
self.view.set_model(None)
model = ObjectStore()
for col in self.view.get_columns():
self.view.remove_column(col)
render = Gtk.CellRendererText()
col = TreeViewColumn(title=_('File'))
col.pack_start(render, True)
col.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
def cell_data_file(column, cell, model, iter_, data):
entry = model.get_value(iter_)
cell.set_property("text", entry.name)
col.set_cell_data_func(render, cell_data_file)
def cell_data_header(column, cell, model, iter_, header):
entry = model.get_value(iter_)
cell.set_property("text", entry.get_match(header))
self.view.append_column(col)
for i, header in enumerate(pattern.headers):
render = Gtk.CellRendererText()
render.set_property('editable', True)
render.connect('edited', self.__row_edited, model, header)
escaped_title = header.replace("_", "__")
col = Gtk.TreeViewColumn(escaped_title, render)
col.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col.set_cell_data_func(render, cell_data_header, header)
self.view.append_column(col)
for song in songs:
entry = ListEntry(song)
match = pattern.match(song)
for h in pattern.headers:
text = match.get(h, '')
for f in self.filter_box.filters:
if f.active:
text = f.filter(h, text)
if not song.can_multiple_values(h):
text = u", ".join(text.split("\n"))
entry.matches[h] = text
model.append([entry])
# save for last to potentially save time
if songs:
self.view.set_model(model)
self.preview.set_sensitive(False)
self.save.set_sensitive(len(pattern.headers) > 0)
def __save(self, addreplace, library):
pattern_text = self.combo.get_child().get_text()
pattern = TagsFromPattern(pattern_text)
model = self.view.get_model()
add = bool(addreplace.get_active())
win = WritingWindow(self, len(model))
win.show()
was_changed = set()
all_done = False
for entry in ((model and model.values()) or []):
song = entry.song
changed = False
if not song.valid():
win.hide()
dialog = OverwriteWarning(self, song)
resp = dialog.run()
win.show()
if resp != OverwriteWarning.RESPONSE_SAVE:
break
for i, h in enumerate(pattern.headers):
text = entry.get_match(h)
if text:
can_multiple = song.can_multiple_values(h)
if not add or h not in song or not can_multiple:
song[h] = text
changed = True
else:
for val in text.split("\n"):
if val not in song.list(h):
song.add(h, val)
changed = True
if changed:
try:
song.write()
except AudioFileError:
util.print_exc()
WriteFailedError(self, song).run()
library.reload(song, changed=was_changed)
break
was_changed.add(song)
if win.step():
break
else:
all_done = True
win.destroy()
library.changed(was_changed)
self.save.set_sensitive(not all_done)
def __row_edited(self, renderer, path, new, model, header):
entry = model[path][0]
if entry.get_match(header) != new:
entry.replace_match(header, new)
self.preview.set_sensitive(True)
self.save.set_sensitive(True)
|
Mellthas/quodlibet
|
quodlibet/qltk/tagsfrompath.py
|
Python
|
gpl-2.0
| 11,346
|
import socket
from pyroute2.common import map_namespace
from pyroute2.netlink import nlmsg
from pyroute2.netlink import nla
# address attributes
#
# Important comment:
# For IPv4, IFA_ADDRESS is a prefix address, not a local interface
# address. It makes no difference for normal interfaces, but
# for point-to-point ones IFA_ADDRESS means DESTINATION address,
# and the local address is supplied in IFA_LOCAL attribute.
#
IFA_F_SECONDARY = 0x01
# IFA_F_TEMPORARY IFA_F_SECONDARY
IFA_F_NODAD = 0x02
IFA_F_OPTIMISTIC = 0x04
IFA_F_DADFAILED = 0x08
IFA_F_HOMEADDRESS = 0x10
IFA_F_DEPRECATED = 0x20
IFA_F_TENTATIVE = 0x40
IFA_F_PERMANENT = 0x80
IFA_F_MANAGETEMPADDR = 0x100
IFA_F_NOPREFIXROUTE = 0x200
(IFA_F_NAMES, IFA_F_VALUES) = map_namespace('IFA_F', globals())
# 8<----------------------------------------------
IFA_F_TEMPORARY = IFA_F_SECONDARY
IFA_F_NAMES['IFA_F_TEMPORARY'] = IFA_F_TEMPORARY
IFA_F_VALUES6 = IFA_F_VALUES
IFA_F_VALUES6[IFA_F_TEMPORARY] = 'IFA_F_TEMPORARY'
# 8<----------------------------------------------
class ifaddrmsg(nlmsg):
'''
IP address information
struct ifaddrmsg {
unsigned char ifa_family; /* Address type */
unsigned char ifa_prefixlen; /* Prefixlength of address */
unsigned char ifa_flags; /* Address flags */
unsigned char ifa_scope; /* Address scope */
int ifa_index; /* Interface index */
};
'''
prefix = 'IFA_'
fields = (('family', 'B'),
('prefixlen', 'B'),
('flags', 'B'),
('scope', 'B'),
('index', 'I'))
nla_map = (('IFA_UNSPEC', 'hex'),
('IFA_ADDRESS', 'ipaddr'),
('IFA_LOCAL', 'ipaddr'),
('IFA_LABEL', 'asciiz'),
('IFA_BROADCAST', 'ipaddr'),
('IFA_ANYCAST', 'ipaddr'),
('IFA_CACHEINFO', 'cacheinfo'),
('IFA_MULTICAST', 'ipaddr'),
('IFA_FLAGS', 'uint32'))
class cacheinfo(nla):
fields = (('ifa_prefered', 'I'),
('ifa_valid', 'I'),
('cstamp', 'I'),
('tstamp', 'I'))
@staticmethod
def flags2names(flags, family=socket.AF_INET):
if family == socket.AF_INET6:
ifa_f_values = IFA_F_VALUES6
else:
ifa_f_values = IFA_F_VALUES
ret = []
for f in ifa_f_values:
if f & flags:
ret.append(ifa_f_values[f])
return ret
@staticmethod
def names2flags(flags):
ret = 0
for f in flags:
if f[0] == '!':
f = f[1:]
else:
ret |= IFA_F_NAMES[f]
return ret
|
jazzmes/pyroute2
|
pyroute2/netlink/rtnl/ifaddrmsg.py
|
Python
|
gpl-2.0
| 2,709
|
# -*- Mode: python; coding:utf-8; indent-tabs-mode: nil -*- */
#
# This file is part of systemd.
#
# Copyright 2012 David Strauss <david@davidstrauss.net>
# Copyright 2012 Zbigniew Jędrzejewski-Szmek <zbyszek@in.waw.pl>
# Copyright 2012 Marti Raudsepp <marti@juffo.org>
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# systemd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with systemd; If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import sys as _sys
import datetime as _datetime
import uuid as _uuid
import traceback as _traceback
import os as _os
import logging as _logging
if _sys.version_info >= (3,3):
from collections import ChainMap as _ChainMap
from syslog import (LOG_EMERG, LOG_ALERT, LOG_CRIT, LOG_ERR,
LOG_WARNING, LOG_NOTICE, LOG_INFO, LOG_DEBUG)
from ._journal import __version__, sendv, stream_fd
from ._reader import (_Reader, NOP, APPEND, INVALIDATE,
LOCAL_ONLY, RUNTIME_ONLY, SYSTEM_ONLY,
_get_catalog)
from . import id128 as _id128
if _sys.version_info >= (3,):
from ._reader import Monotonic
else:
Monotonic = tuple
def _convert_monotonic(m):
return Monotonic((_datetime.timedelta(microseconds=m[0]),
_uuid.UUID(bytes=m[1])))
def _convert_source_monotonic(s):
return _datetime.timedelta(microseconds=int(s))
def _convert_realtime(t):
return _datetime.datetime.fromtimestamp(t / 1000000)
def _convert_timestamp(s):
return _datetime.datetime.fromtimestamp(int(s) / 1000000)
if _sys.version_info >= (3,):
def _convert_uuid(s):
return _uuid.UUID(s.decode())
else:
_convert_uuid = _uuid.UUID
DEFAULT_CONVERTERS = {
'MESSAGE_ID': _convert_uuid,
'_MACHINE_ID': _convert_uuid,
'_BOOT_ID': _convert_uuid,
'PRIORITY': int,
'LEADER': int,
'SESSION_ID': int,
'USERSPACE_USEC': int,
'INITRD_USEC': int,
'KERNEL_USEC': int,
'_UID': int,
'_GID': int,
'_PID': int,
'SYSLOG_FACILITY': int,
'SYSLOG_PID': int,
'_AUDIT_SESSION': int,
'_AUDIT_LOGINUID': int,
'_SYSTEMD_SESSION': int,
'_SYSTEMD_OWNER_UID': int,
'CODE_LINE': int,
'ERRNO': int,
'EXIT_STATUS': int,
'_SOURCE_REALTIME_TIMESTAMP': _convert_timestamp,
'__REALTIME_TIMESTAMP': _convert_realtime,
'_SOURCE_MONOTONIC_TIMESTAMP': _convert_source_monotonic,
'__MONOTONIC_TIMESTAMP': _convert_monotonic,
'COREDUMP': bytes,
'COREDUMP_PID': int,
'COREDUMP_UID': int,
'COREDUMP_GID': int,
'COREDUMP_SESSION': int,
'COREDUMP_SIGNAL': int,
'COREDUMP_TIMESTAMP': _convert_timestamp,
}
_IDENT_LETTER = set('ABCDEFGHIJKLMNOPQRTSUVWXYZ_')
def _valid_field_name(s):
return not (set(s) - _IDENT_LETTER)
class Reader(_Reader):
"""Reader allows the access and filtering of systemd journal
entries. Note that in order to access the system journal, a
non-root user must be in the `systemd-journal` group.
Example usage to print out all informational or higher level
messages for systemd-udevd for this boot:
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j:
... print(entry['MESSAGE'])
See systemd.journal-fields(7) for more info on typical fields
found in the journal.
"""
def __init__(self, flags=0, path=None, converters=None):
"""Create an instance of Reader, which allows filtering and
return of journal entries.
Argument `flags` sets open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens
journal on local machine only; RUNTIME_ONLY opens only
volatile journal files; and SYSTEM_ONLY opens only
journal files of system services and the kernel.
Argument `path` is the directory of journal files. Note that
`flags` and `path` are exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field
names are used as keys into this dictionary. The values must
be single argument functions, which take a `bytes` object and
return a converted value. When there's no entry for a field
name, then the default UTF-8 decoding will be attempted. If
the conversion fails with a ValueError, unconverted bytes
object will be returned. (Note that ValueEror is a superclass
of UnicodeDecodeError).
Reader implements the context manager protocol: the journal
will be closed when exiting the block.
"""
super(Reader, self).__init__(flags, path)
if _sys.version_info >= (3,3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key]
If `key` is not present in self.converters, a standard unicode
decoding will be attempted. If the conversion (either
key-specific or the default one) fails with a ValueError, the
original bytes object will be returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _covert_field"""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Part of iterator protocol.
Returns self.
"""
return self
if _sys.version_info >= (3,):
def __next__(self):
"""Part of iterator protocol.
Returns self.get_next().
"""
return self.get_next()
else:
def next(self):
"""Part of iterator protocol.
Returns self.get_next().
"""
return self.get_next()
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined in a logical AND,
and matches of the same field are automatically combined in a
logical OR.
Matches can be passed as strings of form "FIELD=value", or
keyword arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
"""Return the next log entry as a mapping type, currently
a standard dictionary of fields.
Optional skip value will return the `skip`\-th log entry.
Entries will be processed with converters specified during
Reader creation.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
"""Return the previous log entry as a mapping type,
currently a standard dictionary of fields.
Optional skip value will return the -`skip`\-th log entry.
Entries will be processed with converters specified during
Reader creation.
Equivalent to get_next(-skip).
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return unique values appearing in the journal for given `field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal. `timeout` is the maximum
time in seconds to wait, or None, to wait forever.
Returns one of NOP (no change), APPEND (new entries have been
added to the end of the journal), or INVALIDATE (journal files
have been added or removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `realtime` time.
Argument `realtime` must be either an integer unix timestamp
or datetime.datetime instance.
"""
if isinstance(realtime, _datetime.datetime):
realtime = float(realtime.strftime("%s.%f")) * 1000000
return super(Reader, self).seek_realtime(int(realtime))
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either
seconds or a datetime.timedelta instance. Argument `bootid`
is a string or UUID representing which boot the monotonic time
is reference to. Defaults to current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.totalseconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.get_hex()
return super(Reader, self).seek_monotonic(monotonic, bootid)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.get_hex()
self.add_match(MESSAGE_ID=messageid)
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID equal to current boot ID or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
def get_catalog(mid):
if isinstance(mid, _uuid.UUID):
mid = mid.get_hex()
return _get_catalog(mid)
def _make_line(field, value):
if isinstance(value, bytes):
return field.encode('utf-8') + b'=' + value
else:
return field + '=' + value
def send(MESSAGE, MESSAGE_ID=None,
CODE_FILE=None, CODE_LINE=None, CODE_FUNC=None,
**kwargs):
r"""Send a message to the journal.
>>> journal.send('Hello world')
>>> journal.send('Hello, again, world', FIELD2='Greetings!')
>>> journal.send('Binary message', BINARY=b'\xde\xad\xbe\xef')
Value of the MESSAGE argument will be used for the MESSAGE=
field. MESSAGE must be a string and will be sent as UTF-8 to
the journal.
MESSAGE_ID can be given to uniquely identify the type of
message. It must be a string or a uuid.UUID object.
CODE_LINE, CODE_FILE, and CODE_FUNC can be specified to
identify the caller. Unless at least on of the three is given,
values are extracted from the stack frame of the caller of
send(). CODE_FILE and CODE_FUNC must be strings, CODE_LINE
must be an integer.
Additional fields for the journal entry can only be specified
as keyword arguments. The payload can be either a string or
bytes. A string will be sent as UTF-8, and bytes will be sent
as-is to the journal.
Other useful fields include PRIORITY, SYSLOG_FACILITY,
SYSLOG_IDENTIFIER, SYSLOG_PID.
"""
args = ['MESSAGE=' + MESSAGE]
if MESSAGE_ID is not None:
id = getattr(MESSAGE_ID, 'hex', MESSAGE_ID)
args.append('MESSAGE_ID=' + id)
if CODE_LINE == CODE_FILE == CODE_FUNC == None:
CODE_FILE, CODE_LINE, CODE_FUNC = \
_traceback.extract_stack(limit=2)[0][:3]
if CODE_FILE is not None:
args.append('CODE_FILE=' + CODE_FILE)
if CODE_LINE is not None:
args.append('CODE_LINE={:d}'.format(CODE_LINE))
if CODE_FUNC is not None:
args.append('CODE_FUNC=' + CODE_FUNC)
args.extend(_make_line(key, val) for key, val in kwargs.items())
return sendv(*args)
def stream(identifier, priority=LOG_DEBUG, level_prefix=False):
r"""Return a file object wrapping a stream to journal.
Log messages written to this file as simple newline sepearted
text strings are written to the journal.
The file will be line buffered, so messages are actually sent
after a newline character is written.
>>> stream = journal.stream('myapp')
>>> stream
<open file '<fdopen>', mode 'w' at 0x...>
>>> stream.write('message...\n')
will produce the following message in the journal::
PRIORITY=7
SYSLOG_IDENTIFIER=myapp
MESSAGE=message...
Using the interface with print might be more convinient:
>>> from __future__ import print_function
>>> print('message...', file=stream)
priority is the syslog priority, one of `LOG_EMERG`,
`LOG_ALERT`, `LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`,
`LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`.
level_prefix is a boolean. If true, kernel-style log priority
level prefixes (such as '<1>') are interpreted. See
sd-daemon(3) for more information.
"""
fd = stream_fd(identifier, priority, level_prefix)
return _os.fdopen(fd, 'w', 1)
class JournalHandler(_logging.Handler):
"""Journal handler class for the Python logging framework.
Please see the Python logging module documentation for an
overview: http://docs.python.org/library/logging.html.
To create a custom logger whose messages go only to journal:
>>> log = logging.getLogger('custom_logger_name')
>>> log.propagate = False
>>> log.addHandler(journal.JournalHandler())
>>> log.warn("Some message: %s", detail)
Note that by default, message levels `INFO` and `DEBUG` are
ignored by the logging framework. To enable those log levels:
>>> log.setLevel(logging.DEBUG)
To redirect all logging messages to journal regardless of where
they come from, attach it to the root logger:
>>> logging.root.addHandler(journal.JournalHandler())
For more complex configurations when using `dictConfig` or
`fileConfig`, specify `systemd.journal.JournalHandler` as the
handler class. Only standard handler configuration options
are supported: `level`, `formatter`, `filters`.
To attach journal MESSAGE_ID, an extra field is supported:
>>> import uuid
>>> mid = uuid.UUID('0123456789ABCDEF0123456789ABCDEF')
>>> log.warn("Message with ID", extra={'MESSAGE_ID': mid})
Fields to be attached to all messages sent through this
handler can be specified as keyword arguments. This probably
makes sense only for SYSLOG_IDENTIFIER and similar fields
which are constant for the whole program:
>>> journal.JournalHandler(SYSLOG_IDENTIFIER='my-cool-app')
The following journal fields will be sent:
`MESSAGE`, `PRIORITY`, `THREAD_NAME`, `CODE_FILE`, `CODE_LINE`,
`CODE_FUNC`, `LOGGER` (name as supplied to getLogger call),
`MESSAGE_ID` (optional, see above), `SYSLOG_IDENTIFIER` (defaults
to sys.argv[0]).
"""
def __init__(self, level=_logging.NOTSET, **kwargs):
super(JournalHandler, self).__init__(level)
for name in kwargs:
if not _valid_field_name(name):
raise ValueError('Invalid field name: ' + name)
if 'SYSLOG_IDENTIFIER' not in kwargs:
kwargs['SYSLOG_IDENTIFIER'] = _sys.argv[0]
self._extra = kwargs
def emit(self, record):
"""Write record as journal event.
MESSAGE is taken from the message provided by the
user, and PRIORITY, LOGGER, THREAD_NAME,
CODE_{FILE,LINE,FUNC} fields are appended
automatically. In addition, record.MESSAGE_ID will be
used if present.
"""
try:
msg = self.format(record)
pri = self.mapPriority(record.levelno)
mid = getattr(record, 'MESSAGE_ID', None)
send(msg,
MESSAGE_ID=mid,
PRIORITY=format(pri),
LOGGER=record.name,
THREAD_NAME=record.threadName,
CODE_FILE=record.pathname,
CODE_LINE=record.lineno,
CODE_FUNC=record.funcName,
**self._extra)
except Exception:
self.handleError(record)
@staticmethod
def mapPriority(levelno):
"""Map logging levels to journald priorities.
Since Python log level numbers are "sparse", we have
to map numbers in between the standard levels too.
"""
if levelno <= _logging.DEBUG:
return LOG_DEBUG
elif levelno <= _logging.INFO:
return LOG_INFO
elif levelno <= _logging.WARNING:
return LOG_WARNING
elif levelno <= _logging.ERROR:
return LOG_ERR
elif levelno <= _logging.CRITICAL:
return LOG_CRIT
else:
return LOG_ALERT
|
pragmatux/systemd
|
src/python-systemd/journal.py
|
Python
|
gpl-2.0
| 20,273
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import json
import sqlalchemy as sa
from twisted.internet import defer
from buildbot.db import base
from buildbot.util import epoch2datetime
class StepsConnectorComponent(base.DBConnectorComponent):
# Documentation is in developer/db.rst
url_lock = None
@defer.inlineCallbacks
def getStep(self, stepid=None, buildid=None, number=None, name=None):
tbl = self.db.model.steps
if stepid is not None:
wc = (tbl.c.id == stepid)
else:
if buildid is None:
raise RuntimeError('must supply either stepid or buildid')
if number is not None:
wc = (tbl.c.number == number)
elif name is not None:
wc = (tbl.c.name == name)
else:
raise RuntimeError('must supply either number or name')
wc = wc & (tbl.c.buildid == buildid)
def thd(conn):
q = self.db.model.steps.select(whereclause=wc)
res = conn.execute(q)
row = res.fetchone()
rv = None
if row:
rv = self._stepdictFromRow(row)
res.close()
return rv
return (yield self.db.pool.do(thd))
# returns a Deferred that returns a value
def getSteps(self, buildid):
def thd(conn):
tbl = self.db.model.steps
q = tbl.select()
q = q.where(tbl.c.buildid == buildid)
q = q.order_by(tbl.c.number)
res = conn.execute(q)
return [self._stepdictFromRow(row) for row in res.fetchall()]
return self.db.pool.do(thd)
# returns a Deferred that returns a value
def addStep(self, buildid, name, state_string):
def thd(conn):
tbl = self.db.model.steps
# get the highest current number
r = conn.execute(sa.select([sa.func.max(tbl.c.number)],
whereclause=(tbl.c.buildid == buildid)))
number = r.scalar()
number = 0 if number is None else number + 1
# note that there is no chance for a race condition here,
# since only one master is inserting steps. If there is a
# conflict, then the name is likely already taken.
insert_row = dict(buildid=buildid, number=number,
started_at=None, complete_at=None,
state_string=state_string,
urls_json='[]', name=name)
try:
r = conn.execute(self.db.model.steps.insert(), insert_row)
got_id = r.inserted_primary_key[0]
except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
got_id = None
if got_id:
return (got_id, number, name)
# we didn't get an id, so calculate a unique name and use that
# instead. Because names are truncated at the right to fit in a
# 50-character identifier, this isn't a simple query.
res = conn.execute(sa.select([tbl.c.name],
whereclause=((tbl.c.buildid == buildid))))
names = {row[0] for row in res}
num = 1
while True:
numstr = '_%d' % num
newname = name[:50 - len(numstr)] + numstr
if newname not in names:
break
num += 1
insert_row['name'] = newname
r = conn.execute(self.db.model.steps.insert(), insert_row)
got_id = r.inserted_primary_key[0]
return (got_id, number, newname)
return self.db.pool.do(thd)
@defer.inlineCallbacks
def startStep(self, stepid):
started_at = int(self.master.reactor.seconds())
def thd(conn):
tbl = self.db.model.steps
q = tbl.update(whereclause=(tbl.c.id == stepid))
conn.execute(q, started_at=started_at)
yield self.db.pool.do(thd)
# returns a Deferred that returns None
def setStepStateString(self, stepid, state_string):
def thd(conn):
tbl = self.db.model.steps
q = tbl.update(whereclause=(tbl.c.id == stepid))
conn.execute(q, state_string=state_string)
return self.db.pool.do(thd)
def addURL(self, stepid, name, url, _racehook=None):
# This methods adds an URL to the db
# This is a read modify write and thus there is a possibility
# that several urls are added at the same time (e.g with a deferredlist
# at the end of a step)
# this race condition is only inside the same master, as only one master
# is supposed to add urls to a buildstep.
# so threading.lock is used, as we are in the thread pool
if self.url_lock is None:
# this runs in reactor thread, so no race here..
self.url_lock = defer.DeferredLock()
def thd(conn):
tbl = self.db.model.steps
wc = (tbl.c.id == stepid)
q = sa.select([tbl.c.urls_json],
whereclause=wc)
res = conn.execute(q)
row = res.fetchone()
if _racehook is not None:
_racehook()
urls = json.loads(row.urls_json)
url_item = dict(name=name, url=url)
if url_item not in urls:
urls.append(url_item)
q = tbl.update(whereclause=wc)
conn.execute(q, urls_json=json.dumps(urls))
return self.url_lock.run(lambda: self.db.pool.do(thd))
# returns a Deferred that returns None
def finishStep(self, stepid, results, hidden):
def thd(conn):
tbl = self.db.model.steps
q = tbl.update(whereclause=(tbl.c.id == stepid))
conn.execute(q,
complete_at=int(self.master.reactor.seconds()),
results=results,
hidden=1 if hidden else 0)
return self.db.pool.do(thd)
def _stepdictFromRow(self, row):
return dict(
id=row.id,
number=row.number,
name=row.name,
buildid=row.buildid,
started_at=epoch2datetime(row.started_at),
complete_at=epoch2datetime(row.complete_at),
state_string=row.state_string,
results=row.results,
urls=json.loads(row.urls_json),
hidden=bool(row.hidden))
|
tardyp/buildbot
|
master/buildbot/db/steps.py
|
Python
|
gpl-2.0
| 7,224
|
# This file is part of Lurklib.
# Copyright (C) 2011 LK-
#
# Lurklib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lurklib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Lurklib. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
class _Connection(object):
def _connect(self, server, port, tls=True, tls_verify=True, proxy=False,
proxy_type='SOCKS5', proxy_server=None,
proxy_port=None, proxy_username=None, proxy_password=None):
"""
Connects the socket to an IRC server.
Required arguments:
* server - Server to connect to.
* port - Port to use.
Optional arguments:
* tls=True - Should we use TLS/SSL?
* tls_verify=True - Verify the TLS certificate?
Only works with Python 3.
* proxy=False - Should we use a proxy?
* proxy_type='SOCKS5' - Proxy type: SOCKS5, SOCKS4 or HTTP
* proxy_server=None - Proxy server's address
* proxy_port=None - Proxy server's port
* proxy_username=None - If SOCKS5 is used,
a proxy username/password can be specified.
* proxy_password=None - If SOCKS5 is used,
a proxy username/password can be specified.
"""
with self.lock:
if proxy:
if proxy_type == 'SOCKS5':
proxy_type = self._m_proxy.PROXY_TYPE_SOCKS5
elif proxy_type == 'SOCKS4':
proxy_type = self._m_proxy.PROXY_TYPE_SOCKS4
elif proxy_type == 'HTTP':
proxy_type = self._m_proxy.PROXY_TYPE_HTTP
self._socket = self._m_proxy.socksocket()
self._socket.setproxy(proxytype=proxy_type, \
addr=proxy_server, \
port=proxy_port, \
username=proxy_username, \
password=proxy_password)
if tls:
if tls_verify:
ca_bundle = self._m_tempfile.NamedTemporaryFile().name
with open(ca_bundle, 'w') as bundle_file:
bundle_file.write(self._ca_bundle)
cert_required = self._m_tls.CERT_REQUIRED
self._socket = \
self._m_tls.wrap_socket(self._socket, \
cert_reqs=cert_required, \
ca_certs=ca_bundle)
self._socket.connect((server, port))
self._m_tls.match_hostname(self._socket.getpeercert(), \
server)
return None
else:
self._socket = self._m_tls.wrap_socket(self._socket)
self._socket.connect((server, port))
def _register(self, nick, user, real_name, password=None):
"""
Register the connection with the IRC server.
Required arguments:
* nick - Nick to use. If a tuple/list is specified -
it will try to use the first,
and if the first is already used -
it will try to use the second and so on.
* user - Username to use.
* real_name - Real name to use.
Optional arguments:
* password=None - IRC server password.
"""
with self.lock:
if password:
self._password(password)
self.nick(nick)
self._user(user, real_name)
def _init(self, server, nick, user, real_name, password, port=None,
tls=True, tls_verify=True,
proxy=False, proxy_type='SOCKS5', proxy_server=None,
proxy_port=None, proxy_username=None, proxy_password=None):
"""
Connect and register with the IRC server and -
set server-related information variables.
Required arguments:
* server - Server to connect to.
* nick - Nick to use.
If a tuple/list is specified it will try to use the first,
and if the first is already used -
it will try to use the second and so on.
* user - Username to use.
* real_name - Real name to use.
* password=None - IRC server password.
Optional arguments:
* port - Port to use.
* tls=True - Should we use TLS/SSL?
* tls_verify=True - Verify the TLS certificate?
Only works with Python 3.
* proxy=False - Should we use a proxy?
* proxy_type='SOCKS5' - Proxy type: SOCKS5, SOCKS4 or HTTP
* proxy_server=None - Proxy server's address
* proxy_port=None - Proxy server's port
* proxy_username=None - If SOCKS5 is used,
a proxy username/password can be specified.
* proxy_password=None - If SOCKS5 is used,
a proxy username/password can be specified.
"""
with self.lock:
self.current_nick = nick
if tls:
if not port:
port = 6697
self._connect(server, port, tls, tls_verify, proxy, \
proxy_type, proxy_server, proxy_port, \
proxy_username, proxy_password)
else:
if not port:
port = 6667
self._connect(server, port, tls, tls_verify, proxy, \
proxy_type, proxy_server, proxy_port, \
proxy_username, proxy_password)
while self.readable(2):
data = self.recv()
if data[0] == 'NOTICE':
self.server = data[1][0]
self.con_msg.append(data)
self._register(nick, user, real_name, password)
while self.readable(timeout=4):
rdata = self.recv()
if rdata[0] == 'UNKNOWN':
data = rdata[1][3].replace(':', '', 1)
ncode = rdata[1][1]
if ncode == '004':
info = data.split()
self.server = info[0]
self.ircd = info[1]
self.umodes = info[2]
self.cmodes = info[3]
elif ncode == '005':
version = rdata[1][3].replace(':are supported' + \
'by this server', '')
version = version.split()
for info in version:
try:
info = info.split('=')
name = info[0]
value = info[1]
self.version[name] = value
if name == 'CHARSET':
self.encoding = value
except IndexError:
self.version[info[0]] = True
elif ncode == '376':
self.con_msg.append(rdata)
break
elif ncode == '422':
self.con_msg.append(rdata)
break
else:
if rdata[0] == 'NOTICE':
self.server = rdata[1][0]
self.con_msg.append(rdata[1])
self.motd = tuple(self.motd)
self.con_msg = tuple(self.con_msg)
self.connected = True
self.keep_going = \
True
def _password(self, password):
"""
Authenticates with the IRC server.
NOTE: Method will not raise an exception,
if the password is wrong. It will just fail..
Required arguments:
* password - Password to send.
"""
with self.lock:
self.send('PASS :%s' % password, error_check=True)
def _nick(self, nick):
"""
Sets your nick.
Required arguments:
* nick - New nick.
"""
with self.lock:
self.send('NICK :%s' % nick)
if self.readable():
msg = self._recv(expected_replies='NICK')
if msg[0] == 'NICK':
if not self.hide_called_events:
self.stepback()
for channel in self.channels:
if 'USERS' in self.channels[channel]:
priv_level = \
self.channels[channel]['USERS'][self.current_nick]
del self.channels[channel]['USERS'][self.current_nick]
self.channels[channel]['USERS'][nick] = priv_level
self.current_nick = nick
def nick(self, nick):
"""
Sets your nick.
Required arguments:
* nick - New nick or a tuple of possible new nicks.
"""
nick_set_successfully = False
try:
self._nick(nick)
nick_set_successfully = True
except TypeError:
for nick_ in nick:
try:
self._nick(nick_)
nick_set_successfully = True
break
except self.NicknameInUse:
pass
if not nick_set_successfully:
self.exception('433')
def _user(self, user, real_name):
"""
Sends the USER message.
Required arguments:
* user - Username to send.
* real_name - Real name to send.
"""
with self.lock:
self.send('USER %s 0 * :%s' % (user, real_name))
if self.readable():
self._recv()
self.stepback()
def oper(self, name, password):
"""
Opers up.
Required arguments:
* name - Oper name.
* password - Oper password.
"""
with self.lock:
self.send('OPER %s %s' % (name, password))
snomasks = ''
new_umodes = ''
if self.readable():
msg = self._recv(expected_replies=( \
'MODE', '381', '008'))
if msg[0] == 'MODE':
new_umodes = msg[2].replace(':', '', 1)
elif msg[0] == '381':
return new_umodes, snomasks
elif msg[0] == '008':
snomasks = msg[2].split('(')[1].split(')')[0]
def umode(self, nick, modes=''):
"""
Sets/gets user modes.
Required arguments:
* nick - Nick to set/get user modes for.
Optional arguments:
* modes='' - Sets these user modes on a nick.
"""
with self.lock:
if not modes:
self.send('MODE %s' % nick)
if self.readable():
msg = self._recv(expected_replies=('221',))
if msg[0] == '221':
modes = msg[2].replace('+', '').replace(':', '', 1)
return modes
self.send('MODE %s %s' % (nick, modes))
if self.readable():
msg = self._recv(expected_replies=('MODE',))
if msg[0] == 'MODE':
if not self.hide_called_events:
self.stepback()
return msg[2].replace(':', '', 1)
def service(self):
""" Not implemented. """
raise self.NotImplemented('LurklibError: NotImplemented')
def _quit(self, reason=''):
"""
Sends a QUIT message to the server.
Optional arguments:
* reason='' - Reason for quitting.
"""
with self.lock:
self.send('QUIT :%s' % reason)
def quit(self, reason=''):
"""
Sends a QUIT message, closes the connection and -
ends Lurklib's main loop.
Optional arguments:
* reason='' - Reason for quitting.
"""
with self.lock:
self.keep_going = False
self._quit(reason)
self._socket.shutdown(self._m_socket.SHUT_RDWR)
self._socket.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
""" For use with the Python 'with' statement. """
with self.lock:
self.quit()
def squit(self, server, reason=''):
"""
Quits a server.
Required arguments:
* server - Server to quit.
Optional arguments:
* reason='' - Reason for the server quitting.
"""
with self.lock:
self.send('SQUIT %s :%s' % (server, reason))
while self.readable():
msg = self._recv(expected_replies=('SQUIT',))
if msg[0] == 'SQUIT':
if not self.hide_called_events:
self.stepback()
def latency(self):
""" Checks the connection latency. """
with self.lock:
self.send('PING %s' % self.server)
ctime = self._m_time.time()
msg = self._recv(expected_replies=('PONG',))
if msg[0] == 'PONG':
latency = self._m_time.time() - ctime
return latency
|
ElectroCode/lurklib
|
lurklib/connection.py
|
Python
|
gpl-3.0
| 13,948
|
# -*- coding: utf-8 -*-
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**metadata module.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'marco@opengis.ch'
__revision__ = '$Format:%H$'
__date__ = '27/05/2015'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
from xml.etree import ElementTree
from safe.metadata import BaseMetadata
from safe.metadata.utilities import reading_ancillary_files, prettify_xml
class GenericLayerMetadata(BaseMetadata):
"""
Base class for generic layers such as hazard, exposure and aggregation.
This class can be subclassed so you can create only a minimal
concrete class that implements only _standard_properties to add specific
properties. You can also add a standard XML property that applies to all
subclasses here. In both cases do it as explained below. @property and
@propname.setter will be generated automatically
_standard_properties = {
'TESTprop': (
'gmd:identificationInfo/'
'gmd:MD_DataIdentification/'
'gmd:supplementalInformation/'
'gco:CharacterString')
}
from safe.metadata.utils import merge_dictionaries
_standard_properties = merge_dictionaries(
# change BaseMetadata to GenericLayerMetadata in subclasses
BaseMetadata._standard_properties, _standard_properties)
.. versionadded:: 3.2
"""
def __init__(self, layer_uri, xml_uri=None, json_uri=None):
"""
Constructor
:param layer_uri: uri of the layer for which the metadata ae
:type layer_uri: str
:param xml_uri: uri of an xml file to use
:type xml_uri: str
:param json_uri: uri of a json file to use
:type json_uri: str
"""
# initialize base class
super(GenericLayerMetadata, self).__init__(
layer_uri, xml_uri, json_uri)
@property
def dict(self):
"""
calls the overridden method
:return: dictionary representation of the metadata
:rtype: dict
"""
return super(GenericLayerMetadata, self).dict
@property
def json(self):
"""
calls the overridden method
:return: json representation of the metadata
:rtype: str
"""
return super(GenericLayerMetadata, self).json
@property
def xml(self):
"""
calls the overridden method
:return: xml representation of the metadata
:rtype: str
"""
root = super(GenericLayerMetadata, self).xml
return prettify_xml(ElementTree.tostring(root))
def read_json(self):
"""
calls the overridden method
:return: the read metadata
:rtype: dict
"""
with reading_ancillary_files(self):
metadata = super(GenericLayerMetadata, self).read_json()
return metadata
def read_xml(self):
"""
calls the overridden method
:return: the read metadata
:rtype: ElementTree.Element
"""
with reading_ancillary_files(self):
root = super(GenericLayerMetadata, self).read_xml()
return root
def update_report(self):
"""
update the report.
"""
# TODO (MB): implement this by reading the kw and definitions
self.report = self.report
raise NotImplementedError()
|
Gustry/inasafe
|
safe/metadata/generic_layer_metadata.py
|
Python
|
gpl-3.0
| 3,730
|
#! /usr/bin/env python
import rospy
import actionlib
from move_base_msgs.msg import MoveBaseActionGoal
from actionlib_msgs.msg import GoalID
class ForceCancel(object):
def __init__(self, nodename="force_cancel", is_newnode=True, repetition=10):
self.repetition = rospy.get_param("~repetition", repetition)
if is_newnode:
rospy.init_node(name=nodename, anonymous=False)
rospy.on_shutdown(self.shutdown)
pub = rospy.Publisher("move_base/cancel", GoalID, queue_size=1)
sub = rospy.Subscriber("move_base/goal", MoveBaseActionGoal, self.callback, queue_size=1)
rospy.wait_for_message("move_base/goal", MoveBaseActionGoal, 60)
r = rospy.Rate(1)
counter = 0
while not rospy.is_shutdown() and (counter < self.repetition):
msg = GoalID()
msg.id = self.id
pub.publish(msg)
r.sleep()
counter += 1
def callback(self, msg):
self.id = msg.goal_id.id
def shutdown(self):
rospy.loginfo("cancel job finished")
rospy.sleep(1)
pass
if __name__ == "__main__":
fc = ForceCancel('force_cancel', False, 5)
|
ron1818/Singaboat_RobotX2016
|
robotx_nav/nodes/move_base_force_cancel.py
|
Python
|
gpl-3.0
| 1,191
|
# Copyright 2015 Rémy Lapeyrade <remy at lapeyrade dot net>
# Copyright 2015 LAAS-CNRS
#
#
# This file is part of TouSIX-Manager.
#
# TouSIX-Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# TouSIX-Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with TouSIX-Manager. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.admin import AdminSite
class TouSIXAdmin(AdminSite):
"""
Special admin site, created for display widgets in the main panel.
"""
site_header = "TouIX - Administration de TouSIX"
site_title = "TouIX"
index_template = "index_touSIX.html"
admin_tousix = TouSIXAdmin(name='Administration')
|
Baloc/TouSIX-Manager
|
tousix_manager/Administration/adminsite.py
|
Python
|
gpl-3.0
| 1,145
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2020 Stephane Caron <stephane.caron@normalesup.org>
#
# This file is part of pymanoid <https://github.com/stephane-caron/pymanoid>.
#
# pymanoid is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pymanoid is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# pymanoid. If not, see <http://www.gnu.org/licenses/>.
from numpy import dot, eye, hstack, maximum, minimum, ones, sqrt, vstack, zeros
from threading import Lock
from .misc import norm
from .qpsolvers import solve_qp
from .sim import Process
from .tasks import AxisAngleContactTask, ContactTask, DOFTask, PoseTask
RANK_DEFICIENCY_MSG = "rank deficiency in IK problem, " \
"did you add a regularization task?"
class IKSolver(Process):
"""
Compute velocities bringing the system closer to fulfilling a set of tasks.
Parameters
----------
robot : Robot
Robot to be updated.
active_dofs : list of integers, optional
List of DOFs updated by the IK solver.
doflim_gain : scalar, optional
DOF-limit gain as described in [Kanoun12]_. In `this implementation
<https://scaron.info/teaching/inverse-kinematics.html>`_, it should be
between zero and one.
Attributes
----------
doflim_gain : scalar, optional
DOF-limit gain as described in [Kanoun12]_. In `this implementation
<https://scaron.info/teaching/inverse-kinematics.html>`_, it should be
between zero and one.
lm_damping : scalar
Add Levenberg-Marquardt damping as described in [Sugihara11]_. This
damping significantly improves numerical stability, but convergence
gets slower when its value is too high.
slack_dof_limits : bool
Add slack variables to maximize DOF range? This method is used in
[Nozawa16]_ to keep joint angles as far away from their limits as
possible. It slows down computations as there are twice as many
optimization variables, but is more numerically stable and won't
produce inconsistent constraints. Defaults to False.
slack_maximize : scalar
Linear cost weight applied when ``slack_dof_limits`` is True.
slack_regularize : scalar
Regularization weight applied when ``slack_dof_limits`` is True.
qd : array
Velocity returned by last solver call.
robot : pymanoid.Robot
Robot model.
tasks : dict
Dictionary of active IK tasks, indexed by task name.
Notes
-----
One unsatisfactory aspect of the DOF-limit gain is that it slows down the
robot when approaching DOF limits. For instance, it may slow down a foot
motion when approaching the knee singularity, despite the robot being able
to move faster with a fully extended knee.
"""
DEFAULT_GAINS = {
'COM': 0.85,
'CONTACT': 0.85,
'DOF': 0.85,
'MIN_ACCEL': 0.85,
'MIN_CAM': 0.85,
'MIN_VEL': 0.85,
'PENDULUM': 0.85,
'POSE': 0.85,
'POSTURE': 0.85,
}
DEFAULT_WEIGHTS = {
'CONTACT': 1.,
'COM': 1e-2,
'POSE': 1e-3,
'MIN_ACCEL': 1e-4,
'MIN_CAM': 1e-4,
'DOF': 1e-5,
'POSTURE': 1e-6,
'MIN_VEL': 1e-6,
}
def __init__(self, robot, active_dofs=None, doflim_gain=0.5):
super(IKSolver, self).__init__()
if active_dofs is None:
active_dofs = range(robot.nb_dofs)
assert 0. <= doflim_gain <= 1.
self.__lock = Lock()
self.doflim_gain = doflim_gain
self.interaction_dist = 0.1 # [rad]
self.lm_damping = 1e-3
self.qd = zeros(robot.nb_dofs)
self.robot = robot
self.safety_dist = 0.01 # [rad]
self.slack_dof_limits = False
self.slack_maximize = 1e-3
self.slack_regularize = 1e-5
self.tasks = {}
self.verbosity = 0
#
self.set_active_dofs(active_dofs)
def clear(self):
"""
Clear all tasks in the IK solver.
"""
self.tasks = {}
def set_active_dofs(self, active_dofs):
"""
Set DOF indices modified by the IK.
Parameters
----------
active_dofs : list of integers
List of DOF indices.
"""
self.active_dofs = active_dofs
self.nb_active_dofs = len(active_dofs)
self.__reset_dof_limits()
def __reset_dof_limits(self):
"""
Read DOF position, velocity and acceleration limits from robot model.
"""
self.q_max = self.robot.q_max[self.active_dofs]
self.q_min = self.robot.q_min[self.active_dofs]
self.qd_lim = self.robot.qd_lim[self.active_dofs]
if self.robot.qdd_lim is not None:
self.qdd_lim = self.robot.qdd_lim[self.active_dofs]
else: # robot model has no joint acceleration limit
self.qdd_lim = None
def set_gains(self, gains):
"""
Set task gains from a dictionary.
Parameters
----------
gains : string -> double dictionary
Dictionary mapping task labels to default gain values.
"""
for (name, gain) in gains.iteritems():
self.tasks[name].gain = gain
def set_weights(self, weights):
"""
Set task weights from a dictionary.
Parameters
----------
weights : string -> double dictionary
Dictionary mapping task labels to default weight values.
"""
for (name, weight) in weights.iteritems():
self.tasks[name].weight = weight
def __fill_gain(self, task):
if task.name in self.DEFAULT_GAINS:
task.gain = self.DEFAULT_GAINS[task.name]
elif type(task) in [AxisAngleContactTask, ContactTask]:
task.gain = self.DEFAULT_GAINS['CONTACT']
elif type(task) is DOFTask:
task.gain = self.DEFAULT_GAINS['DOF']
elif type(task) is PoseTask:
task.gain = self.DEFAULT_GAINS['POSE']
else: # task type is not accounted for
raise Exception("no gain provided for task '%s'" % task.name)
def __fill_weight(self, task):
if task.name in self.DEFAULT_WEIGHTS:
task.weight = self.DEFAULT_WEIGHTS[task.name]
elif type(task) in [AxisAngleContactTask, ContactTask]:
task.weight = self.DEFAULT_WEIGHTS['CONTACT']
elif type(task) is DOFTask:
task.weight = self.DEFAULT_WEIGHTS['DOF']
elif type(task) is PoseTask:
task.weight = self.DEFAULT_WEIGHTS['POSE']
else: # task type is not accounted for
raise Exception("no weight provided for task '%s'" % task.name)
def add(self, task):
"""
Add a new task to the IK solver.
Parameters
----------
task : Task
New task to add to the list.
"""
if task.name in self.tasks:
raise Exception("Task '%s' already present in IK" % task.name)
if task.gain is None:
self.__fill_gain(task)
if task.weight is None:
self.__fill_weight(task)
with self.__lock:
self.tasks[task.name] = task
def print_costs(self, qd, dt):
"""
Print task costs for the current IK step.
Parameters
----------
qd : array
Robot DOF velocities.
dt : scalar
Timestep for the IK.
"""
print("\n TASK COST")
print("------------------------------")
for task in self.tasks.itervalues():
J = task.jacobian()
r = task.residual(dt)
print("%20s %.2e" % (task.name, norm(dot(J, qd) - r)))
print("")
def remove(self, ident):
"""
Remove a task.
Parameters
----------
ident : string or object
Name or object with a ``name`` field identifying the task.
"""
name = ident if type(ident) is str else ident.name
with self.__lock:
if name not in self.tasks:
return
del self.tasks[name]
def compute_cost(self, dt):
"""
Compute the IK cost of the present system state for a time step of dt.
Parameters
----------
dt : scalar
Time step in [s].
"""
return sum(task.cost(dt) for task in self.tasks.itervalues())
def build_qp_matrices(self, dt):
"""
Build matrices of the quatratic program.
Parameters
----------
dt : scalar
Time step in [s].
Returns
-------
P : (n, n) array
Positive semi-definite cost matrix.
q : array
Cost vector.
qd_max : array
Maximum joint velocity vector.
qd_min : array
Minimum joint velocity vector.
Notes
-----
When the robot model has joint acceleration limits, special care should
be taken when computing the corresponding velocity bounds for the IK.
In short, the robot now needs to avoid the velocity range where it (1)
is not going to collide with a DOF limit in one iteration but (2)
cannot brake fast enough to avoid a collision in the future due to
acceleration limits. This function implements the solution to this
problem described in Equation (14) of [Flacco15]_.
"""
n = self.nb_active_dofs
P = zeros((n, n))
v = zeros(n)
with self.__lock:
for task in self.tasks.itervalues():
J = task.jacobian()[:, self.active_dofs]
r = task.residual(dt)
mu = self.lm_damping * max(1e-3, dot(r, r))
P += task.weight * (dot(J.T, J) + mu * eye(n))
v += task.weight * dot(-r.T, J)
q = self.robot.q[self.active_dofs]
qd_max_doflim = (self.q_max - q) / dt
qd_min_doflim = (self.q_min - q) / dt
qd_max = minimum(+self.qd_lim, self.doflim_gain * qd_max_doflim)
qd_min = maximum(-self.qd_lim, self.doflim_gain * qd_min_doflim)
if self.qdd_lim is not None: # straightforward acceleration bounds
qd = self.robot.qd[self.active_dofs]
qd_max_acc = qd + self.qdd_lim * dt
qd_min_acc = qd - self.qdd_lim * dt
qd_max = minimum(qd_max, qd_max_acc)
qd_min = maximum(qd_min, qd_min_acc)
if self.qdd_lim is not None: # DOF-limit acceleration bounds
Delta_q_max = maximum(self.q_max - q, 1e-32)
Delta_q_min = maximum(q - self.q_min, 1e-32)
qd_max_doflim_acc = +sqrt(2 * self.qdd_lim * Delta_q_max)
qd_min_doflim_acc = -sqrt(2 * self.qdd_lim * Delta_q_min)
qd_max = minimum(qd_max, self.doflim_gain * qd_max_doflim_acc)
qd_min = maximum(qd_min, self.doflim_gain * qd_min_doflim_acc)
return (P, v, qd_max, qd_min)
def compute_velocity(self, dt):
"""
Compute a new velocity satisfying all tasks at best.
Parameters
----------
dt : scalar
Time step in [s].
Returns
-------
qd : array
Vector of active joint velocities.
Note
----
This QP formulation is the default for
:func:`pymanoid.ik.IKSolver.solve` (posture generation) as it converges
faster.
Notes
-----
The method implemented in this function is reasonably fast but may
become unstable when some tasks are widely infeasible. In such
situations, you can either increase the Levenberg-Marquardt bias
``self.lm_damping`` or set ``slack_dof_limits=True`` which will call
:func:`pymanoid.ik.IKSolver.compute_velocity_with_slack`.
The returned velocity minimizes squared residuals as in the weighted
cost function, which corresponds to the Gauss-Newton algorithm. Indeed,
expanding the square expression in ``cost(task, qd)`` yields
.. math::
\\mathrm{minimize} \\ \\dot{q} J^T J \\dot{q} - 2 r^T J \\dot{q}
Differentiating with respect to :math:`\\dot{q}` shows that the minimum
is attained for :math:`J^T J \\dot{q} = r`, where we recognize the
Gauss-Newton update rule.
"""
n = self.nb_active_dofs
P, v, qd_max, qd_min = self.build_qp_matrices(dt)
G = vstack([+eye(n), -eye(n)])
h = hstack([qd_max, -qd_min])
try:
x = solve_qp(P, v, G, h)
self.qd[self.active_dofs] = x
except ValueError as e:
if "matrix G is not positive definite" in e:
raise Exception(RANK_DEFICIENCY_MSG)
raise
return self.qd
def compute_velocity_with_slack(self, dt):
"""
Compute a new velocity satisfying all tasks at best, while trying to
stay away from kinematic constraints.
Parameters
----------
dt : scalar
Time step in [s].
Returns
-------
qd : array
Vector of active joint velocities.
Note
----
This QP formulation is the default for
:func:`pymanoid.ik.IKSolver.step` as it has a more numerically-stable
behavior.
Notes
-----
Check out the discussion of this method around Equation (10) of
[Nozawa16]_. DOF limits are better taken care of by slack variables,
but the variable count doubles and the QP takes roughly 50% more time
to solve.
"""
n = self.nb_active_dofs
E, Z = eye(n), zeros((n, n))
P0, v0, qd_max, qd_min = self.build_qp_matrices(dt)
P = vstack([hstack([P0, Z]), hstack([Z, self.slack_regularize * E])])
v = hstack([v0, -self.slack_maximize * ones(n)])
G = vstack([
hstack([+E, +E / dt]), hstack([-E, +E / dt]), hstack([Z, -E])])
h = hstack([qd_max, -qd_min, zeros(n)])
try:
x = solve_qp(P, v, G, h)
self.qd[self.active_dofs] = x[:n]
except ValueError as e:
if "matrix G is not positive definite" in e:
raise Exception(RANK_DEFICIENCY_MSG)
raise
return self.qd
def step(self, dt):
"""
Apply velocities computed by inverse kinematics.
Parameters
----------
dt : scalar
Time step in [s].
"""
q = self.robot.q
if self.slack_dof_limits:
qd = self.compute_velocity_with_slack(dt)
else: # default QP formulation
qd = self.compute_velocity(dt)
if self.verbosity >= 2:
self.print_costs(qd, dt)
self.robot.set_dof_values(q + qd * dt, clamp=True)
self.robot.set_dof_velocities(qd)
def solve(self, max_it=1000, cost_stop=1e-10, impr_stop=1e-5, dt=1e-2,
warm_start=False, debug=False):
"""
Compute joint-angles that satisfy all kinematic constraints at best.
Parameters
----------
max_it : integer
Maximum number of solver iterations.
cost_stop : scalar
Stop when cost value is below this threshold.
impr_stop : scalar, optional
Stop when cost improvement (relative variation from one iteration
to the next) is less than this threshold.
dt : scalar, optional
Time step in [s].
warm_start : bool, optional
Set to True if the current robot posture is a good guess for IK.
Otherwise, the solver will start by an exploration phase with DOF
velocity limits relaxed and no Levenberg-Marquardt damping.
debug : bool, optional
Set to True for additional debug messages.
Returns
-------
nb_it : int
Number of solver iterations.
cost : scalar
Final value of the cost function.
Notes
-----
Good values of `dt` depend on the weights of the IK tasks. Small values
make convergence slower, while big values make the optimization
unstable (in which case there may be no convergence at all).
"""
cost = 100000.
init_lm_damping = self.lm_damping
init_slack_dof_limits = self.slack_dof_limits
exploration_phase = not warm_start
if exploration_phase:
self.lm_damping = 0
self.slack_dof_limits = False
self.qd_lim = 10. * self.robot.qd_lim[self.active_dofs]
self.qdd_lim = None
for itnum in range(max_it):
prev_cost = cost
cost = self.compute_cost(dt)
impr = abs(cost - prev_cost) / prev_cost
if debug or self.verbosity >= 1:
print("%2d: %.3e (impr: %+.2e)" % (itnum, cost, impr))
if abs(cost) < cost_stop or impr < impr_stop:
break
if exploration_phase and (itnum >= max_it / 2 or impr < 1e-2):
exploration_phase = False
self.lm_damping = init_lm_damping
self.slack_dof_limits = init_slack_dof_limits
self.qd_lim = self.robot.qd_lim[self.active_dofs]
self.step(dt)
self.lm_damping = init_lm_damping
self.slack_dof_limits = init_slack_dof_limits
self.__reset_dof_limits()
self.robot.set_dof_velocities(zeros(self.robot.qd.shape))
return 1 + itnum, cost
def on_tick(self, sim):
"""
Step the IK at each simulation tick.
Parameters
----------
sim : Simulation
Simulation instance.
"""
self.step(sim.dt)
|
stephane-caron/pymanoid
|
pymanoid/ik.py
|
Python
|
gpl-3.0
| 18,284
|
########################################################################
#
# File Name: HTMLStyleElement
#
# Documentation: http://docs.4suite.com/4DOM/HTMLStyleElement.html
#
### This file is automatically generated by GenerateHtml.py.
### DO NOT EDIT!
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from xml.dom import Node
from xml.dom.html.HTMLElement import HTMLElement
class HTMLStyleElement(HTMLElement):
def __init__(self, ownerDocument, nodeName="STYLE"):
HTMLElement.__init__(self, ownerDocument, nodeName)
### Attribute Methods ###
def _get_disabled(self):
return self.hasAttribute("DISABLED")
def _set_disabled(self, value):
if value:
self.setAttribute("DISABLED", "DISABLED")
else:
self.removeAttribute("DISABLED")
def _get_media(self):
return self.getAttribute("MEDIA")
def _set_media(self, value):
self.setAttribute("MEDIA", value)
def _get_type(self):
return self.getAttribute("TYPE")
def _set_type(self, value):
self.setAttribute("TYPE", value)
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update({
"disabled" : _get_disabled,
"media" : _get_media,
"type" : _get_type
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update({
"disabled" : _set_disabled,
"media" : _set_media,
"type" : _set_type
})
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
|
iCarto/siga
|
extScripting/scripts/jython/Lib/xml/dom/html/HTMLStyleElement.py
|
Python
|
gpl-3.0
| 1,885
|
#!/usr/bin/python
import numpy as np
import os
import sys
from keras.layers import Activation, Dense, Input
from keras.layers.normalization import BatchNormalization
from keras.models import Model, Sequential
from keras.optimizers import RMSprop
NUM_OF_HIDDEN_NEURONS = 100
QNETWORK_NAME = 'online_network'
TARGETNET_NAME = 'target_network'
TAU = 0.0001 # soft update / low pass filter
class QNetworks:
def __init__(self, num_of_actions, num_of_states, num_of_hidden_neurons=NUM_OF_HIDDEN_NEURONS, tau=TAU):
self.NUM_OF_ACTIONS = num_of_actions
self.NUM_OF_HIDDEN_NEURONS = num_of_hidden_neurons
self.NUM_OF_STATES = num_of_states
self.TAU = tau
self.online_net = self.init_model(QNETWORK_NAME)
self.target_net = self.init_model(QNETWORK_NAME)
def do_soft_update(self):
weights = self.online_net.get_weights()
target_weights = self.target_net.get_weights()
for i in xrange(len(weights)):
target_weights[i] = self.TAU*weights[i] + (1.0-self.TAU)*target_weights[i]
self.target_net.set_weights(target_weights)
return
def do_hard_update(self):
weights = self.online_net.get_weights()
target_weights = self.target_net.get_weights()
for i in xrange(len(weights)):
target_weights[i] = weights[i]
self.target_net.set_weights(target_weights)
return
def get_weights(self):
# get weights of the online Q network
return self.online_net.get_weights()
def init_model(self, net_name):
model = Sequential()
model.add(Dense(self.NUM_OF_HIDDEN_NEURONS, input_shape=(self.NUM_OF_STATES,)))
model.add(Activation('relu'))
model.add(Dense(self.NUM_OF_HIDDEN_NEURONS))
model.add(Activation('relu'))
model.add(Dense(self.NUM_OF_HIDDEN_NEURONS))
model.add(Activation('relu'))
model.add(Dense(self.NUM_OF_ACTIONS))
model.add(Activation('linear'))
model.compile(loss='mse', optimizer='rmsprop')
filename = net_name+'/'+net_name
if os.path.isfile(filename+str(0)+'.txt'):
weights = model.get_weights()
for i in xrange(len(weights)):
loaded_weights = np.loadtxt(filename+str(i)+'.txt')
weights[i] = loaded_weights
model.set_weights(weights)
else:
print 'No model', filename, 'found. Creating a new model.'
return model
def save_models(self):
weights = self.online_net.get_weights()
for i in xrange(len(weights)):
np.savetxt(QNETWORK_NAME+'/'+QNETWORK_NAME+str(i)+'.txt', weights[i])
weights = self.target_net.get_weights()
for i in xrange(len(weights)):
np.savetxt(TARGETNET_NAME+'/'+TARGETNET_NAME+str(i)+'.txt', weights[i])
print("Saved models to disk.")
|
356255531/SpikingDeepRLControl
|
code/EnvBo/Q-Learning/Testing_Arm_4points/q_networks.py
|
Python
|
gpl-3.0
| 3,008
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
Internal package.
Package containing modules that are used internally by Numenta Python
tools and plugins to extend standard library functionality.
These modules should NOT be used by client applications.
The following modules are included:
nupic.support.paths
Module containing filesystem path manipulation utilities.
nupic.support.serialization
Module containing Python object serialization (pickling and unpickling) and
versioning utilities.
nupic.support.compress
Module containing Python object encoding and compression utilities.
nupic.support.processes
Module containing operating system process management utilities and wrappers.
nupic.support.output
Module containing operating system interprocess communication utilities and
wrappers.
nupic.support.diff
Module containing file difference calculation wrappers.
nupic.support.vision
Temporary location for vision framework before the move to nupic.vision.
nupic.support.deprecate
Contains the deprecate decorator used for automatic handling of deprecated
methods.
nupic.support.memchecker
Contains the MemChecker class, for checking physical memory and monitoring
memory usage.
nupic.support.imagesearch
Contains functions for searching for images on the web and downloading them.
"""
from __future__ import with_statement
# Standard imports
import os
import sys
import inspect
import logging
import logging.config
import logging.handlers
from platform import python_version
import struct
from StringIO import StringIO
import time
import traceback
from configuration import Configuration
from nupic.support.fshelpers import makeDirectoryFromAbsolutePath
# Local imports
#############################################################################
def getCallerInfo(depth=2):
"""Utility function to get information about function callers
The information is the tuple (function/method name, filename, class)
The class will be None if the caller is just a function and not an object
method.
depth: how far back in the callstack to go to extract the caller info
"""
f = sys._getframe(depth)
method_name = f.f_code.co_name
filename = f.f_code.co_filename
arg_class = None
args = inspect.getargvalues(f)
if len(args[0]) > 0:
arg_name = args[0][0] # potentially the 'self' arg if its a method
arg_class = args[3][arg_name].__class__.__name__
return (method_name, filename, arg_class)
#############################################################################
def title(s=None, additional='', stream=sys.stdout, frame='-'):
"""Utility function to display nice titles
It automatically extracts the name of the function/method it is called from
and you can add additional text. title() will then print the name
of the function/method and the additional text surrounded by tow lines
of dashes. If you don't want the name of the function, you can provide
alternative text (regardless of the additional text)
@param s - text to display, uses the function name and arguments by default
@param additional - extra text to display (not needed if s is not None)
@param stream - the stream to print to. Ny default goes to standard output
@param frame - the character used for the over and under line. Default is '-'
Examples:
def foo():
title()
will display:
---
foo
---
def foo():
title(additional='(), this is cool!!!')
will display:
----------------------
foo(), this is cool!!!
----------------------
def foo():
title('No function name here!')
will display:
----------------------
No function name here!
----------------------
"""
if s is None:
callable_name, file_name, class_name = getCallerInfo(2)
s = callable_name
if class_name is not None:
method_name = s
s = class_name + '.' + callable_name
lines = (s + additional).split('\n')
length = max(len(line) for line in lines)
print >> stream, '-' * length
print >> stream, s + additional
print >> stream, '-' * length
#############################################################################
def bringToFront(title):
"""Bring a top-level window with a given title
to the front on Windows"""
if sys.platform != 'win32':
return
import ctypes
find_window = ctypes.windll.user32.FindWindowA
set_foreground_window = ctypes.windll.user32.SetForegroundWindow
hwnd = find_window(None, title)
if hwnd == 0:
raise Exception('There is no window titled: "%s"' % title)
set_foreground_window(hwnd)
#############################################################################
def getUserDocumentsPath():
"""
Find the user's "Documents" directory (OS X), "My Documents" directory
(Windows), or home directory (Unix).
"""
# OS X and Windows code from:
# http://www.blueskyonmars.com/2005/08/05
# /finding-a-users-my-documents-folder-on-windows/
# Alternate Windows code from:
# http://bugs.python.org/issue1763
if sys.platform.startswith('win'):
if sys.platform.startswith('win32'):
# Try the primary method on 32-bit windows
try:
from win32com.shell import shell
alt = False
except ImportError:
try:
import ctypes
dll = ctypes.windll.shell32
alt = True
except:
raise Exception("Could not find 'My Documents'")
else:
# Use the alternate method on 64-bit Windows
alt = True
if not alt:
# Primary method using win32com
df = shell.SHGetDesktopFolder()
pidl = df.ParseDisplayName(0, None,
"::{450d8fba-ad25-11d0-98a8-0800361b1103}")[1]
path = shell.SHGetPathFromIDList(pidl)
else:
# Alternate method using ctypes rather than win32com
buf = ctypes.create_string_buffer(300)
dll.SHGetSpecialFolderPathA(None, buf, 0x0005, False)
path = buf.value
elif sys.platform.startswith('darwin'):
from Carbon import Folder, Folders
folderref = Folder.FSFindFolder(Folders.kUserDomain,
Folders.kDocumentsFolderType,
False)
path = folderref.as_pathname()
else:
path = os.getenv('HOME')
return path
#############################################################################
def getArgumentDescriptions(f):
"""
Get the arguments, default values, and argument descriptions for a function.
Returns a list of tuples: (argName, argDescription, defaultValue). If an
argument has no default value, the tuple is only two elements long (as None
cannot be used, since it could be a default value itself).
Parses the argument descriptions out of the function docstring, using a
format something lke this:
[junk]
argument_name: description...
description...
description...
[junk]
[more arguments]
It will find an argument as long as the exact argument name starts the line.
It will then strip a trailing colon, if present, then strip the rest of the
line and use it to start the description. It will then strip and append any
subsequent lines with a greater indent level than the original argument name.
"""
# Get the argument names and default values
argspec = inspect.getargspec(f)
# Scan through the docstring to extract documentation for each argument as
# follows:
# Check the first word of the line, stripping a colon if one is present.
# If it matches an argument name:
# Take the rest of the line, stripping leading whitespeace
# Take each subsequent line if its indentation level is greater than the
# initial indentation level
# Once the indentation level is back to the original level, look for
# another argument
docstring = f.__doc__
descriptions = {}
if docstring:
lines = docstring.split('\n')
i = 0
while i < len(lines):
stripped = lines[i].lstrip()
if not stripped:
i += 1
continue
# Indentation level is index of the first character
indentLevel = lines[i].index(stripped[0])
# Get the first word and remove the colon, if present
firstWord = stripped.split()[0]
if firstWord.endswith(':'):
firstWord = firstWord[:-1]
if firstWord in argspec.args:
# Found an argument
argName = firstWord
restOfLine = stripped[len(firstWord)+1:].strip()
argLines = [restOfLine]
# Take the next lines as long as they are indented more
i += 1
while i < len(lines):
stripped = lines[i].lstrip()
if not stripped:
# Empty line - stop
break
if lines[i].index(stripped[0]) <= indentLevel:
# No longer indented far enough - stop
break
# This line counts too
argLines.append(lines[i].strip())
i += 1
# Store this description
descriptions[argName] = ' '.join(argLines)
else:
# Not an argument
i += 1
# Build the list of (argName, description, defaultValue)
args = []
if argspec.defaults:
defaultCount = len(argspec.defaults)
else:
defaultCount = 0
nonDefaultArgCount = len(argspec.args) - defaultCount
for i, argName in enumerate(argspec.args):
if i >= nonDefaultArgCount:
defaultValue = argspec.defaults[i - nonDefaultArgCount]
args.append((argName, descriptions.get(argName, ""), defaultValue))
else:
args.append((argName, descriptions.get(argName, "")))
return args
#############################################################################
# TODO queryNumInwardIters appears to be unused and should probably be deleted
# from here altogether; it's likely an artifact of the legacy vision support.
#def queryNumInwardIters(configPath, radialLength, numRepetitions=1):
# """
# Public utility API that accepts a config path and
# radial length, and determines the proper number of
# training iterations with which to invoke net.run()
# when running a PictureSensor in 'inward' mode.
# """
# numCats = queryNumCategories(configPath)
# sequenceLen = radialLength + 1
# numItersPerCat = (8 * radialLength) * sequenceLen
# numTrainingItersTP = numItersPerCat * numCats
# return numTrainingItersTP * numRepetitions
#############################################################################
gLoggingInitialized = False
def initLogging(verbose=False, console='stdout', consoleLevel='DEBUG'):
"""
Initilize NuPic logging by reading in from the logging configuration file. The
logging configuration file is named 'nupic-logging.conf' and is expected to be
in the format defined by the python logging module.
If the environment variable 'NTA_CONF_PATH' is defined, then the logging
configuration file is expected to be in the NTA_CONF_PATH directory. If
NTA_CONF_PATH is not defined, then it is found in the 'conf/default'
subdirectory of the NuPic installation directory (typically
~/nta/current/conf/default)
The logging configuration file can use the environment variable 'NTA_LOG_DIR'
to set the locations of log files. If this variable is not defined already in
the environment, this method will set it to the 'logs' subdirectory of the
NuPic install directory (typically ~/nta/eng/logs) before loading in the
configuration file.
console: Defines console output for the default "root" logging
configuration; this may be one of 'stdout', 'stderr', or None;
Use None to suppress console logging output
consoleLevel:
Logging-level filter string for console output corresponding to
logging levels in the logging module; may be one of:
'DEBUG', 'INFO', 'WARNING', 'ERROR', or 'CRITICAL'.
E.g., a value of'WARNING' suppresses DEBUG and INFO level output
to console, but allows WARNING, ERROR, and CRITICAL
"""
# NOTE: If you call this twice from the same process there seems to be a
# bug - logged messages don't show up for loggers that you do another
# logging.getLogger() on.
global gLoggingInitialized
if gLoggingInitialized:
if verbose:
print >> sys.stderr, "Logging already initialized, doing nothing."
return
consoleStreamMappings = {
'stdout' : 'stdoutConsoleHandler',
'stderr' : 'stderrConsoleHandler',
}
consoleLogLevels = ['DEBUG', 'INFO', 'WARNING', 'WARN', 'ERROR', 'CRITICAL',
'FATAL']
assert console is None or console in consoleStreamMappings.keys(), (
'Unexpected console arg value: %r') % (console,)
assert consoleLevel in consoleLogLevels, (
'Unexpected consoleLevel arg value: %r') % (consoleLevel)
# -----------------------------------------------------------------------
# Setup logging. Look for the nupic-logging.conf file, first in the
# NTA_CONFIG_DIR path (if defined), then in a subdirectory of the nupic
# module
# TODO: move into nupic.support
configFilename = 'nupic-logging.conf'
try:
configFilePath = Configuration.findConfigFile(configFilename)
except:
configFilePath = None
# If NTA_LOG_DIR is not defined, set it now. This is used by the logging
# config file to set the path for the log files
if 'NTA_LOG_DIR' not in os.environ:
os.environ['NTA_LOG_DIR'] = os.path.join(os.environ['NUPIC'], 'logs')
if not os.path.exists(os.environ['NTA_LOG_DIR']):
makeDirectoryFromAbsolutePath(os.path.abspath(os.environ['NTA_LOG_DIR']))
# Load in the logging configuration file
if configFilePath is None:
print >> sys.stderr, (
"WARNING: Could not find the logging configuration file " \
"(filename: '%s', expected to be in search path: %s). Logging is " \
" disabled.") % (configFilename, Configuration.getConfigPaths())
else:
if verbose:
print >> sys.stderr, (
"Using logging configuration file: %s") % (configFilePath)
# This dict will hold our replacement strings for logging configuration
replacements = dict()
def makeKey(name):
""" Makes replacement key """
return "$$%s$$" % (name)
platform = sys.platform.lower()
if platform.startswith('java'):
# Jython
import java.lang
platform = java.lang.System.getProperty("os.name").lower()
if platform.startswith('mac os x'):
platform = 'darwin'
if platform.startswith('darwin'):
replacements[makeKey('SYSLOG_HANDLER_ADDRESS')] = '"/var/run/syslog"'
elif platform.startswith('linux'):
replacements[makeKey('SYSLOG_HANDLER_ADDRESS')] = '"/dev/log"'
else:
raise RuntimeError("This platform is neither darwin nor linux: %s" % (
sys.platform,))
if False: #os.path.isdir('/var/log/numenta/nupic'):
# NOTE: Not using syslogHandler for now because it either truncates or
# drops messages over ~1,400 bytes (depending on platform)
# Nupic logs go to syslog. Also, SysLogHandler raises an exception
# on jython (at least on 2.5.2): "AttributeError: 'module' object has no
# attribute 'AF_UNIX'" (jython is used by a sub-moduleof
# ClientJobManager)
replacements[makeKey('PERSISTENT_LOG_HANDLER')] = 'syslogHandler'
else:
# Nupic logs go to file
replacements[makeKey('PERSISTENT_LOG_HANDLER')] = 'fileHandler'
# Set up log file path for the default file handler
logFilePath = _genLoggingFilePath()
makeDirectoryFromAbsolutePath(os.path.dirname(logFilePath))
replacements[makeKey('FILE_HANDLER_LOG_FILENAME')] = repr(logFilePath)
# Set up root logger
replacements[makeKey('ROOT_LOGGER_HANDLERS')] = (
replacements[makeKey('PERSISTENT_LOG_HANDLER')])
if console is not None:
replacements[makeKey('ROOT_LOGGER_HANDLERS')] += (
',' + consoleStreamMappings[console])
# Set up log level for console handlers
replacements[makeKey('CONSOLE_LOG_LEVEL')] = consoleLevel
customConfig = StringIO()
with open(configFilePath) as src:
for lineNum, line in enumerate(src):
if "$$" in line:
for (key, value) in replacements.items():
line = line.replace(key, value)
# If there is still a replacement string in the line, we're missing it
# from our replacements dict
if "$$" in line and "$$<key>$$" not in line:
raise RuntimeError(("The text %r, found at line #%d of file %r, "
"contains a string not found in our replacement "
"dict.") % (line, lineNum, configFilePath))
customConfig.write(line)
customConfig.seek(0)
if python_version()[:3] >= '2.6':
# NOTE: the disable_existing_loggers arg is new as of Python 2.6, so it's
# not supported on our jython interperter, which was v2.5.x as of this
# writing
logging.config.fileConfig(customConfig, disable_existing_loggers=False)
else:
logging.config.fileConfig(customConfig)
gLoggingInitialized = True
#############################################################################
def reinitLoggingDir():
""" (Re-)Initialize the loging directory for the calling application that
uses initLogging() for logging configuration
NOTE: It's typially unnecessary to call this function directly since
initLogging takes care of it for you. This function is exposed primarily for
the benefit of nupic-services.py to allow it to restore its logging directory
after the hard-reset operation.
"""
if gLoggingInitialized:
makeDirectoryFromAbsolutePath(os.path.dirname(_genLoggingFilePath()))
#############################################################################
def _genLoggingFilePath():
""" Generate a filepath for the calling app """
appName = os.path.splitext(os.path.basename(sys.argv[0]))[0] or 'UnknownApp'
appLogDir = os.path.abspath(os.path.join(
os.environ['NTA_LOG_DIR'],
'numenta-logs-%s' % (os.environ['USER'],),
appName))
appLogFileName = '%s-%s-%s.log' % (
appName, long(time.mktime(time.gmtime())), os.getpid())
return os.path.join(appLogDir, appLogFileName)
#############################################################################
def enableLoggingErrorDebugging():
""" Overrides the python logging facility's Handler.handleError function to
raise an exception instead of print and suppressing it. This allows a deeper
stacktrace to be emitted that is very helpful for quickly finding the
file/line that initiated the invalidly-formatted logging operation.
NOTE: This is for debugging only - be sure to remove the call to this function
*before* checking in your changes to the source code repository, as it will
cause the application to fail if some invalidly-formatted logging statement
still exists in your code.
Example usage: enableLoggingErrorDebugging must be called *after*
initLogging()
import nupic.support
nupic.support.initLogging()
nupic.support.enableLoggingErrorDebugging()
"TypeError: not all arguments converted during string formatting" is an
example exception that might be output by the built-in handlers with the
following very shallow traceback that doesn't go deep enough to show the
source of the problem:
File ".../python2.6/logging/__init__.py", line 776, in emit
msg = self.format(record)
File ".../python2.6/logging/__init__.py", line 654, in format
return fmt.format(record)
File ".../python2.6/logging/__init__.py", line 436, in format
record.message = record.getMessage()
File ".../python2.6/logging/__init__.py", line 306, in getMessage
msg = msg % self.args
TypeError: not all arguments converted during string formatting
"""
print >> sys.stderr, ("WARNING")
print >> sys.stderr, ("WARNING: "
"nupic.support.enableLoggingErrorDebugging() was "
"called to install a debugging patch into all logging handlers that "
"will cause the program to fail if a logging exception occurrs; this "
"call is for debugging only and MUST be removed before checking in code "
"into production system. Caller: %s") % (
traceback.format_stack(),)
print >> sys.stderr, ("WARNING")
def handleErrorPatch(*args, **kwargs):
if logging.raiseExceptions:
raise
for handler in logging._handlerList:
handler.handleError = handleErrorPatch
return
#############################################################################
def clippedObj(obj, maxElementSize=64):
"""
Return a clipped version of obj suitable for printing, This
is useful when generating log messages by printing data structures, but
don't want the message to be too long.
If passed in a dict, list, or namedtuple, each element of the structure's
string representation will be limited to 'maxElementSize' characters. This
will return a new object where the string representation of each element
has been truncated to fit within maxElementSize.
"""
# Is it a named tuple?
if hasattr(obj, '_asdict'):
obj = obj._asdict()
# Printing a dict?
if isinstance(obj, dict):
objOut = dict()
for key,val in obj.iteritems():
objOut[key] = clippedObj(val)
# Printing a list?
elif hasattr(obj, '__iter__'):
objOut = []
for val in obj:
objOut.append(clippedObj(val))
# Some other object
else:
objOut = str(obj)
if len(objOut) > maxElementSize:
objOut = objOut[0:maxElementSize] + '...'
return objOut
###############################################################################
def intTo8ByteArray(inValue):
"""
Converts an int to a packed byte array, with left most significant byte
"""
values = (
(inValue >> 56 ) & 0xff,
(inValue >> 48 ) & 0xff,
(inValue >> 40 ) & 0xff,
(inValue >> 32 ) & 0xff,
(inValue >> 24 ) & 0xff,
(inValue >> 16 ) & 0xff,
(inValue >> 8 ) & 0xff,
inValue & 0xff
)
s = struct.Struct('B B B B B B B B')
packed_data = s.pack(*values)
return packed_data
###############################################################################
def byteArrayToInt(packed_data):
"""
Converts a byte array into an integer
"""
value = struct.unpack('B B B B B B B B', packed_data)
return value[0] << 56 | \
value[1] << 48 | \
value[2] << 40 | \
value[3] << 32 | \
value[4] << 24 | \
value[5] << 16 | \
value[6] << 8 | \
value[7]
###############################################################################
def getSpecialRowID():
"""
Special row id is 0xFF FFFF FFFF FFFF FFFF (9 bytes of 0xFF)
"""
values = (0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)
s = struct.Struct('B B B B B B B B B')
packed_data = s.pack(*values)
return packed_data
################################################################################
_FLOAT_SECONDS_IN_A_DAY = 24.0 * 60.0 * 60.0
def floatSecondsFromTimedelta(td):
""" Convert datetime.timedelta to seconds in floating point """
sec = (td.days * _FLOAT_SECONDS_IN_A_DAY + td.seconds * 1.0 +
td.microseconds / 1E6)
return sec
#############################################################################
def aggregationToMonthsSeconds(interval):
"""
Return the number of months and seconds from an aggregation dict that
represents a date and time.
Interval is a dict that contain one or more of the following keys: 'years',
'months', 'weeks', 'days', 'hours', 'minutes', seconds', 'milliseconds',
'microseconds'.
Parameters:
---------------------------------------------------------------------
interval: The aggregation interval, as a dict representing a date and time
retval: number of months and seconds in the interval, as a dict:
{months': XX, 'seconds': XX}. The seconds is
a floating point that can represent resolutions down to a
microsecond.
For example:
aggregationMicroseconds({'years': 1, 'hours': 4, 'microseconds':42}) ==
{'months':12, 'seconds':14400.000042}
"""
seconds = interval.get('microseconds', 0) * 0.000001
seconds += interval.get('milliseconds', 0) * 0.001
seconds += interval.get('seconds', 0)
seconds += interval.get('minutes', 0) * 60
seconds += interval.get('hours', 0) * 60 * 60
seconds += interval.get('days', 0) * 24 * 60 * 60
seconds += interval.get('weeks', 0) * 7 * 24 * 60 * 60
months = interval.get('months', 0)
months += 12 * interval.get('years', 0)
return {'months': months, 'seconds': seconds}
#############################################################################
def aggregationDivide(dividend, divisor):
"""
Return the result from dividing two dicts that represent date and time.
Both dividend and divisor are dicts that contain one or more of the following
keys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds',
'milliseconds', 'microseconds'.
Parameters:
---------------------------------------------------------------------
dividend: The numerator, as a dict representing a date and time
divisor: the denominator, as a dict representing a date and time
retval: number of times divisor goes into dividend, as a floating point
number.
For example:
aggregationDivide({'hours': 4}, {'minutes': 15}) == 16
"""
# Convert each into microseconds
dividendMonthSec = aggregationToMonthsSeconds(dividend)
divisorMonthSec = aggregationToMonthsSeconds(divisor)
# It is a usage error to mix both months and seconds in the same operation
if (dividendMonthSec['months'] != 0 and divisorMonthSec['seconds'] != 0) \
or (dividendMonthSec['seconds'] != 0 and divisorMonthSec['months'] != 0):
raise RuntimeError("Aggregation dicts with months/years can only be "
"inter-operated with other aggregation dicts that contain "
"months/years")
if dividendMonthSec['months'] > 0:
return float(dividendMonthSec['months']) / divisor['months']
else:
return float(dividendMonthSec['seconds']) / divisorMonthSec['seconds']
|
spbguru/repo1
|
nupic/support/__init__.py
|
Python
|
gpl-3.0
| 27,136
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import multiprocessing
import os
import tempfile
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.executor.process.result import ResultProcess
from ansible.executor.stats import AggregateStats
from ansible.playbook.block import Block
from ansible.playbook.play_context import PlayContext
from ansible.plugins import callback_loader, strategy_loader, module_loader
from ansible.template import Templar
from ansible.vars.hostvars import HostVars
from ansible.plugins.callback import CallbackBase
from ansible.utils.unicode import to_unicode
from ansible.compat.six import string_types
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['TaskQueueManager']
class TaskQueueManager:
'''
This class handles the multiprocessing requirements of Ansible by
creating a pool of worker forks, a result handler fork, and a
manager object with shared datastructures/queues for coordinating
work between all processes.
The queue manager is responsible for loading the play strategy plugin,
which dispatches the Play's tasks to hosts.
'''
def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False):
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._options = options
self._stats = AggregateStats()
self.passwords = passwords
self._stdout_callback = stdout_callback
self._run_additional_callbacks = run_additional_callbacks
self._run_tree = run_tree
self._callbacks_loaded = False
self._callback_plugins = []
self._start_at_done = False
self._result_prc = None
# make sure the module path (if specified) is parsed and
# added to the module_loader object
if options.module_path is not None:
for path in options.module_path.split(os.pathsep):
module_loader.add_directory(path)
# a special flag to help us exit cleanly
self._terminated = False
# this dictionary is used to keep track of notified handlers
self._notified_handlers = dict()
# dictionaries to keep track of failed/unreachable hosts
self._failed_hosts = dict()
self._unreachable_hosts = dict()
self._final_q = multiprocessing.Queue()
# A temporary file (opened pre-fork) used by connection
# plugins for inter-process locking.
self._connection_lockfile = tempfile.TemporaryFile()
def _initialize_processes(self, num):
self._workers = []
for i in range(num):
rslt_q = multiprocessing.Queue()
self._workers.append([None, rslt_q])
self._result_prc = ResultProcess(self._final_q, self._workers)
self._result_prc.start()
def _initialize_notified_handlers(self, handlers):
'''
Clears and initializes the shared notified handlers dict with entries
for each handler in the play, which is an empty array that will contain
inventory hostnames for those hosts triggering the handler.
'''
# Zero the dictionary first by removing any entries there.
# Proxied dicts don't support iteritems, so we have to use keys()
for key in self._notified_handlers.keys():
del self._notified_handlers[key]
def _process_block(b):
temp_list = []
for t in b.block:
if isinstance(t, Block):
temp_list.extend(_process_block(t))
else:
temp_list.append(t)
return temp_list
handler_list = []
for handler_block in handlers:
handler_list.extend(_process_block(handler_block))
# then initialize it with the handler names from the handler list
for handler in handler_list:
self._notified_handlers[handler.get_name()] = []
def load_callbacks(self):
'''
Loads all available callbacks, with the exception of those which
utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
only one such callback plugin will be loaded.
'''
if self._callbacks_loaded:
return
stdout_callback_loaded = False
if self._stdout_callback is None:
self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
if isinstance(self._stdout_callback, CallbackBase):
stdout_callback_loaded = True
elif isinstance(self._stdout_callback, string_types):
if self._stdout_callback not in callback_loader:
raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
else:
self._stdout_callback = callback_loader.get(self._stdout_callback)
stdout_callback_loaded = True
else:
raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin")
for callback_plugin in callback_loader.all(class_only=True):
if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:
# we only allow one callback of type 'stdout' to be loaded, so check
# the name of the current plugin and type to see if we need to skip
# loading this callback plugin
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', None)
callback_needs_whitelist = getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False)
(callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
if callback_type == 'stdout':
if callback_name != self._stdout_callback or stdout_callback_loaded:
continue
stdout_callback_loaded = True
elif callback_name == 'tree' and self._run_tree:
pass
elif not self._run_additional_callbacks or (callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)):
continue
self._callback_plugins.append(callback_plugin())
self._callbacks_loaded = True
def run(self, play):
'''
Iterates over the roles/tasks in a play, using the given (or default)
strategy for queueing tasks. The default is the linear strategy, which
operates like classic Ansible by keeping all hosts in lock-step with
a given task (meaning no hosts move on to the next task until all hosts
are done with the current task).
'''
if not self._callbacks_loaded:
self.load_callbacks()
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
self.hostvars = HostVars(
inventory=self._inventory,
variable_manager=self._variable_manager,
loader=self._loader,
)
# Fork # of forks, # of hosts or serial, whichever is lowest
contenders = [self._options.forks, play.serial, len(self._inventory.get_hosts(new_play.hosts))]
contenders = [ v for v in contenders if v is not None and v > 0 ]
self._initialize_processes(min(contenders))
play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno())
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_play_context'):
callback_plugin.set_play_context(play_context)
self.send_callback('v2_playbook_on_play_start', new_play)
# initialize the shared dictionary containing the notified handlers
self._initialize_notified_handlers(new_play.handlers)
# load the specified strategy (or the default linear one)
strategy = strategy_loader.get(new_play.strategy, self)
if strategy is None:
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
# build the iterator
iterator = PlayIterator(
inventory=self._inventory,
play=new_play,
play_context=play_context,
variable_manager=self._variable_manager,
all_vars=all_vars,
start_at_done = self._start_at_done,
)
# during initialization, the PlayContext will clear the start_at_task
# field to signal that a matching task was found, so check that here
# and remember it so we don't try to skip tasks on future plays
if getattr(self._options, 'start_at_task', None) is not None and play_context.start_at_task is None:
self._start_at_done = True
# and run the play using the strategy and cleanup on way out
play_return = strategy.run(iterator, play_context)
self._cleanup_processes()
return play_return
def cleanup(self):
display.debug("RUNNING CLEANUP")
self.terminate()
self._final_q.close()
self._cleanup_processes()
def _cleanup_processes(self):
if self._result_prc:
self._result_prc.terminate()
for (worker_prc, rslt_q) in self._workers:
rslt_q.close()
if worker_prc and worker_prc.is_alive():
try:
worker_prc.terminate()
except AttributeError:
pass
def clear_failed_hosts(self):
self._failed_hosts = dict()
def get_inventory(self):
return self._inventory
def get_variable_manager(self):
return self._variable_manager
def get_loader(self):
return self._loader
def get_notified_handlers(self):
return self._notified_handlers
def get_workers(self):
return self._workers[:]
def terminate(self):
self._terminated = True
def send_callback(self, method_name, *args, **kwargs):
for callback_plugin in [self._stdout_callback] + self._callback_plugins:
# a plugin that set self.disabled to True will not be called
# see osx_say.py example for such a plugin
if getattr(callback_plugin, 'disabled', False):
continue
# try to find v2 method, fallback to v1 method, ignore callback if no method found
methods = []
for possible in [method_name, 'v2_on_any']:
gotit = getattr(callback_plugin, possible, None)
if gotit is None:
gotit = getattr(callback_plugin, possible.replace('v2_',''), None)
if gotit is not None:
methods.append(gotit)
for method in methods:
try:
# temporary hack, required due to a change in the callback API, so
# we don't break backwards compatibility with callbacks which were
# designed to use the original API
# FIXME: target for removal and revert to the original code here after a year (2017-01-14)
if method_name == 'v2_playbook_on_start':
import inspect
(f_args, f_varargs, f_keywords, f_defaults) = inspect.getargspec(method)
if 'playbook' in f_args:
method(*args, **kwargs)
else:
method()
else:
method(*args, **kwargs)
except Exception as e:
#TODO: add config toggle to make this fatal or not?
display.warning(u"Failure when attempting to use callback plugin (%s): %s" % (to_unicode(callback_plugin), to_unicode(e)))
|
reedloden/ansible
|
lib/ansible/executor/task_queue_manager.py
|
Python
|
gpl-3.0
| 13,130
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Sun May 12 18:04:51 2013
# by: The Resource Compiler for PyQt (Qt v5.0.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x03\x54\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xd6\xd8\xd4\x4f\x58\x32\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\xe6\x49\x44\x41\x54\x58\xc3\xd5\
\x97\xcd\x4e\x13\x61\x14\x86\xeb\x35\x94\x95\x7b\x71\xe1\xd2\xc4\
\xe0\x05\xb8\xe2\x0e\x5c\xb8\xf4\x02\x5c\xb1\x30\xea\x05\x18\x96\
\x26\x62\x58\xb8\xb0\x91\x58\x20\xd1\x9d\xbf\x89\xa4\x14\xb1\x52\
\xa4\x48\x45\x94\xfe\xd0\x02\x43\xff\xa6\x9d\x19\xa6\x65\x80\xe3\
\x79\x7b\xfa\x85\x51\x4a\x82\xc9\x21\x86\x49\xde\x9c\x33\xa7\xf3\
\xcd\xfb\x9c\xf3\x4d\x9b\x4e\x84\x88\x22\xff\x53\x91\x73\x01\xc0\
\xc7\xd5\x90\x6e\xff\xa5\xfb\xac\xc7\x3d\x3d\x64\x0d\xa9\x02\xf0\
\x31\x32\x3c\x3c\xbc\x6a\x34\x3a\x3a\xba\x19\x56\x3c\x1e\xaf\x26\
\x93\xc9\x56\x3a\x9d\x76\x13\x89\x44\x6b\x60\x60\x20\xcd\x6b\x6e\
\x68\x02\xa4\x38\xd2\xe1\xe1\x71\x99\xba\xef\xb7\xc9\xb2\x2c\xda\
\xdf\xdf\x27\x86\xf1\x78\xcd\x18\xeb\x8a\x1a\x40\x3f\xf3\xb0\x1c\
\xc7\xa5\x4c\x66\xb9\x0b\x14\x04\x01\xc5\x62\xb1\x3a\xaf\x7b\x70\
\x1a\x88\x53\x01\x1c\x1c\x10\x77\x77\xb2\x6c\xdb\xa1\xf9\xf9\xcf\
\x64\x0e\xd7\x75\xe9\xf9\xc4\x44\x17\x42\x05\x00\x26\x7b\xc1\xc9\
\xaa\x37\x1c\x4a\xce\xcd\x53\xf8\x70\x5d\x0f\x8b\x17\x54\x00\x82\
\x10\x40\x67\x4f\x14\xce\xed\xa6\x47\x1f\x67\x66\xe9\xf5\x9b\xb7\
\x14\x9f\x9c\xa4\xa9\xa9\x69\x7a\xf7\xfe\x03\x45\xa3\xd1\x65\x5e\
\x7f\x41\x05\xc0\xef\x10\xed\xb6\x25\x86\x85\x9a\xe3\x05\x94\x5d\
\xcd\xd1\xe4\xf4\x2b\x7a\x32\xfe\x94\x9e\xc5\x5e\xd0\x4c\x62\x0e\
\x8b\x17\x55\x00\xda\x81\x18\xf5\x13\x20\x3c\xff\x90\x6a\xcd\x36\
\x15\x37\xab\x94\x2f\x6e\x53\x89\x63\x8d\xb7\x85\xd7\x7e\x51\x01\
\xf0\x79\xcc\xcd\x5d\x1e\xb5\xc7\x7b\xdb\xee\x9f\x3b\xbe\xe4\x88\
\x5d\xb8\xbd\xee\xe2\x94\xca\x33\xe0\x75\xe4\xc6\x75\x57\x62\xd8\
\x10\x39\xea\xe6\x33\x44\xd4\x01\xa7\x06\xe0\xf4\x3a\xad\x39\x22\
\x98\x98\x68\x72\x80\x98\x6b\x50\x53\x9d\x00\x00\x2a\x2d\xb9\x31\
\xe2\x4e\x53\x8c\x10\x0d\x04\xf2\x6d\xfb\x28\xb6\x7c\x45\x00\x9b\
\x3b\xdb\x6a\xfc\x69\x8e\x3c\x6c\x88\x1a\xae\x39\x13\x80\x3a\x8f\
\xb7\x54\x23\x2a\xd7\xc5\x04\x06\x06\x00\x35\x28\x9c\x17\xab\xbc\
\x25\xbb\xca\x13\xc0\x4d\x61\x0e\x15\x2a\x72\x6e\xcc\x7e\x5a\x02\
\x68\x6a\xdd\xad\xf1\x94\x27\x00\x53\xdc\x1c\x71\x6d\x5b\x40\x60\
\x9a\xab\x1c\x75\x9e\xeb\x81\x41\x15\x47\x11\xc0\x6a\x89\x31\x0c\
\xd6\x77\x04\x20\x0c\x64\x26\x62\xb6\x69\x75\x8b\xa8\xaa\x09\x50\
\xb6\xc5\xbc\xd0\x03\xf8\xbe\x29\x63\x87\x29\x60\x0c\x18\x84\x1c\
\x00\x5b\x4d\x45\x00\x74\x03\x53\x98\xad\x94\xc5\x1c\xe7\x46\xe6\
\x1c\x00\xc8\x71\x5d\xa9\xa1\x08\x80\xfd\xfc\x56\x12\x73\x33\x01\
\x08\x35\x18\x42\xe8\xda\x7c\x8e\x29\xa8\x4e\x00\x5b\x00\x03\xc8\
\x98\x67\x36\x04\x00\x32\xe6\x85\xde\xf8\x17\x0b\xfc\x2c\xd8\x8a\
\x00\x18\x67\x3a\x4f\xb4\x54\x14\x23\x98\x02\x00\x02\x0c\x3e\xfb\
\xc5\x53\x28\xf0\x43\xb8\x66\x49\xf7\x6b\xf9\x52\x87\xd7\xbe\x54\
\x01\xc8\x55\x8f\xba\x4e\xad\x4b\x0e\x90\xaf\x85\xde\xb7\xc2\x92\
\x3d\x4f\xa6\xb3\xde\xa3\xb1\x71\xeb\xda\xd0\xf5\x15\x98\xb3\x6e\
\xa9\x00\x6c\x34\xa4\x6b\x18\xff\xe0\x11\x7f\x5a\x17\x53\xd4\x13\
\x0b\x59\x6f\xe4\xee\xbd\xe2\xa5\xc1\xcb\x4b\x7c\x6d\x8c\x75\x87\
\x35\xa8\xfa\xb7\x1c\xdd\x65\xd9\x3c\x8f\x1f\x19\xfe\x9e\xcf\x1e\
\x37\xbd\xc9\xba\x78\x26\x6f\x46\x00\x68\xf2\xff\x81\x99\x94\x9e\
\xe9\x3f\xbf\x19\x01\x42\xd3\xf4\xfc\xbd\x9c\x9e\xa5\x7e\x03\x51\
\x6c\x25\xa1\x92\x95\x0a\x77\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x06\x6d\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x06\x34\x49\x44\x41\x54\x78\x5e\xad\x97\x5b\x6c\x54\xc7\
\x1d\xc6\x7f\x73\xce\xd9\x8b\xbd\xf6\xfa\x16\xa0\xbe\x00\x0e\xb2\
\x69\x63\x24\x42\x4a\x21\x22\xa1\x2d\x95\x62\xa5\x2f\xee\x4b\x68\
\x2b\x95\xa6\x55\xa5\xc6\x60\x55\xaa\xda\xb4\xaa\xfa\x56\x09\x55\
\xca\x03\x94\x27\xda\x07\x84\x14\x29\xad\xc4\x8b\xa5\x52\x83\x79\
\x08\xc5\x18\x39\x0e\x69\xd3\x84\x9a\x9b\x63\x6a\xec\xb2\x04\x1b\
\x3b\xbb\xf6\x7a\x8f\xbd\xbb\xde\xb3\x67\xa6\xc3\x68\x85\xe5\x72\
\x6c\x88\xc9\x27\x7d\xfa\x9f\x9d\x87\xfd\x7e\xf3\x9f\x99\x73\x11\
\x4a\x29\x82\x24\x84\x78\x05\x78\x9e\xc7\x6b\x48\x29\xf5\x77\xd6\
\x28\x27\x20\xb8\x43\xbb\x01\x68\x97\x52\xbe\xc6\x63\x64\x59\xd6\
\x07\x1a\xf6\xbb\x40\xb7\x06\x39\xff\x14\x00\x26\xfc\xb7\xed\xf5\
\xe2\x60\x5d\x44\x44\x6e\xce\x89\x8a\x2b\x57\xae\x50\x5d\x53\x8d\
\x40\x00\xa0\x50\x08\x65\x28\x41\x29\x66\xd3\x69\x5e\xa9\x17\x2f\
\xbc\xb4\x4e\x6c\x3b\xf1\x1f\xb9\x47\x83\x7c\x5b\x43\x4c\x3c\x4d\
\x07\xf6\xff\x60\x8b\xdd\x2c\x25\xf8\x4a\x32\x3c\x3c\x4c\x65\x65\
\x25\x2b\xc9\x75\x5d\x1e\xc0\x6e\xa9\xb0\x22\x1b\xa2\x2a\x72\x3f\
\xa7\xea\x81\xb5\x03\x08\x2d\x05\x48\xa1\x0d\xf4\x5d\xbc\x48\x2e\
\x97\xc3\x2f\x16\x51\x4a\x91\xcf\xe7\x59\x5c\x5c\xa4\x50\x28\x50\
\xd4\x63\xb5\xb5\xb5\x94\x01\x58\x80\xf8\x82\xf6\x80\x01\x00\x36\
\x44\x05\x1f\x0f\xbc\x4b\x3e\x3b\x8f\x85\x44\x95\x32\xe2\xb6\xc4\
\xb6\x04\x21\x21\x70\x3e\x53\x6c\x8c\x3b\x80\x44\x2a\x04\xf0\x9c\
\x10\x02\xe0\xcb\x40\x05\x50\x0f\x34\x60\xc4\x48\x69\x9f\x24\x02\
\x01\x4e\x9c\x38\x21\x00\x81\x05\xd2\x87\x96\x96\x67\x09\x65\x6d\
\x14\xe5\x28\xa5\xb4\x41\x08\x58\x57\x19\x25\xe2\xd8\x44\x42\x16\
\xc3\x13\x73\x5c\xbc\x3d\x41\xf7\x58\x8e\x5c\x24\xbe\xa9\xbd\x7d\
\xf7\xef\x2d\xcb\x5a\xdc\xb1\x63\x47\x59\x55\x55\x95\xd3\xd8\xd8\
\x18\x7e\xe0\x86\x86\x86\xd0\xa5\x4b\x97\xdc\xae\xae\xae\x08\xf0\
\xd6\xaa\x1d\x00\x13\x44\x55\x2c\xc2\x73\xd5\x31\xf2\x9e\x4f\xa1\
\x28\x91\x4a\x61\x09\x41\xd8\xb1\x88\x86\x6c\xe6\x72\x05\x12\xa2\
\x8e\x3f\x9f\xff\x2b\x0d\x4d\x1b\x01\x22\xc0\x66\x96\x84\xef\xfb\
\x78\x9e\x47\x75\x75\xb5\x9e\x50\x4b\xf4\xea\xd5\xab\x87\x84\x10\
\x28\xa5\xde\x5a\x11\xc0\xb2\x41\x00\xb6\x2d\x90\xda\xb6\x14\x38\
\x08\xa4\x12\x58\xc2\x8c\x1b\x8f\x4c\xb9\xec\x7b\xf5\x3b\xd4\x37\
\x36\x11\x7c\x2f\xc1\x84\x67\x32\x19\xca\xcb\xcb\xcd\x66\x3e\x76\
\xec\xd8\x26\xbd\x7f\x0e\x2e\x41\x2c\x01\xd0\xd9\xd9\xa9\x0e\x1d\
\x3a\xa4\x6c\x21\x08\x59\x10\xb6\x2d\x1c\xc7\xc6\x42\x50\xb4\xcd\
\x1a\x1b\x00\xc7\xb2\x88\x38\x96\xae\x02\x60\x59\x78\x10\xc0\xdc\
\xdc\x1c\x35\x35\x35\x06\x20\x1a\x8d\x72\xe4\xc8\x91\xcd\xc0\x03\
\x88\x1b\x1a\xa2\xc7\x62\xb9\xb0\x6d\x74\x30\x66\x8d\xcb\x23\x36\
\xb1\xa8\xa3\xc7\x2c\x32\x8b\x1e\x93\x99\x1c\x63\xa9\x79\xee\xcc\
\x2e\xe8\xdf\x45\x72\xf9\x3c\xab\xc8\x2c\x41\x36\x9b\x35\xa7\x66\
\xe9\xff\x6d\x0e\x1c\x38\xb0\x1e\xe8\x00\x58\x06\xa0\xb4\x74\x16\
\x8e\x0d\xe1\x90\xc0\x53\x8a\xb1\xa4\xcb\x8d\x8c\x83\xd3\xb2\x97\
\xa6\x7d\xaf\xb3\xb5\xe3\x17\xac\xdb\xfb\x3a\x0d\x2f\xb4\x73\xfb\
\xce\x24\xfd\xfd\xfd\x24\x93\x49\x94\x52\xe6\xfa\xf8\xf1\xe3\xe8\
\xba\xac\x33\xe7\xce\x9d\xe3\xe8\xd1\xa3\x1c\x3e\x7c\x98\xde\xde\
\x5e\x12\x89\x84\x04\x2c\xa1\x15\xdc\x01\xed\xff\xce\xe6\xf8\xe7\
\x94\x4f\x6b\xc7\xcf\xf8\xe6\x2f\xdf\x26\xf6\xf5\x37\x99\x7c\xa6\
\x83\x6b\xfe\x2e\xae\xf1\x2d\x64\x6b\x17\xad\x7b\x7f\x4e\x5e\x56\
\x73\xfa\x6f\x67\xd1\x77\x4d\xee\xdc\x9d\xe2\x1b\xaf\x76\x72\xfd\
\xfa\x75\x03\xa0\x67\x6b\xd6\x3f\x16\x8b\x99\xeb\x78\x3c\x8e\xe3\
\x38\x25\x38\x04\xc0\x23\x00\x96\x25\x98\xca\x41\x3a\xde\xca\xfe\
\xdf\xbd\x4d\xd5\xae\xd7\x28\x84\x62\x08\xdb\x42\x59\x82\x6c\x41\
\x72\x7f\x66\x91\x4f\xee\x66\x18\xb8\xea\x72\xfa\x1f\x61\x64\xd5\
\x5e\xae\x8f\xdc\x67\x32\xd7\xc6\x85\x0f\xee\x9b\x00\xed\x87\xa1\
\xcd\xcd\xcd\xb4\xb5\xb5\x19\x37\x35\x35\xa1\xa1\x14\x20\x83\x1f\
\x46\x16\xdc\x71\x15\xdf\xff\xe9\x6f\xa8\x6c\xd8\x48\xe2\xec\x3b\
\x4c\x8f\x5e\xc3\x89\x94\xb1\xb5\x79\x07\x9b\x5b\xb6\xf3\x49\x79\
\x25\x63\x09\x97\xcf\x66\xf2\xdc\x9d\xce\x32\xa1\xed\x88\x0d\x4c\
\x27\xe7\xd8\xb7\x2b\xca\xfa\x25\x00\x33\x7b\x3d\x6b\xea\xea\xea\
\x00\xcc\x75\x2a\x95\x32\x00\x4a\x2b\x10\xa0\xb9\x5a\x70\xe1\x9d\
\x63\x28\x2c\xca\xe6\xc6\xd9\x10\x8f\x52\x94\x92\x7b\xc3\x7d\x24\
\x65\x05\xdb\xda\x7f\x4c\x4d\xdb\xcb\x7c\x3c\x9c\x66\xd2\x5f\xc0\
\xcd\x78\x2c\xcc\x6b\x2f\x78\x20\x00\xb5\x74\x3a\x42\xa1\x90\x09\
\x2d\xdd\xea\x1f\x8e\x01\x2a\xf8\x3e\x60\xc1\xc6\xb8\xa0\x50\x1c\
\x23\x1c\x8b\x53\xb7\xa5\x96\x92\x78\x76\x7d\x05\xe9\xac\xc7\x68\
\xff\x9f\x98\xae\xbc\x4c\xcb\xf6\x83\xb8\x0b\x61\xbc\x82\xa4\x58\
\x94\x78\xda\x21\xc7\x42\x2d\xaa\x80\xe3\x69\xa0\x96\xd5\x15\x01\
\x00\xd6\xc7\x43\x84\xca\x23\xfc\xbf\x6a\x63\x21\x9e\xa9\x0c\x73\
\xe1\xdf\x83\xec\xd9\xf9\x13\xca\xa3\x0e\xb9\x32\x47\x03\x28\x03\
\x61\x6b\x00\x16\x4b\x21\xa5\x1c\x25\x30\x2a\x15\xa4\x5c\x05\x40\
\x58\xa5\x2a\xcc\xf5\x23\xfa\x70\x6c\x86\xf1\x59\x8f\xef\xfd\xfa\
\x8f\xdc\xca\xd4\xe0\x44\x5c\xa2\x11\x1b\xcf\x93\x14\x3d\x07\xd3\
\x01\xa5\x90\x52\xf2\x50\x6a\x59\x01\x56\x05\x10\x08\x4c\x0d\x04\
\x18\x9d\x76\xf9\xd5\x5f\x86\x18\xbd\xb7\x80\x3d\x93\x67\xd3\xba\
\x32\xf2\x79\x5f\xbb\x68\xea\xce\xaf\xd4\x70\xf9\xdd\xe0\x25\x00\
\x9e\x78\x09\x4c\xb8\x10\x3c\xa2\xd6\x2f\x55\xf2\x87\x1f\x3e\xcf\
\xf5\x4f\x33\x44\x1b\xb7\xb1\xf3\xc5\x97\x59\x12\x5c\x4e\x60\x8e\
\xdb\x53\x01\x28\xc0\x12\x25\x00\x6d\xd4\x52\x7d\xb1\xb5\x96\xdd\
\x5b\xe2\x74\xbf\x97\xa5\x6a\xf7\x57\xf9\xd1\x1b\x6f\x10\xa0\xb5\
\x03\x98\xb5\x37\xd5\xd8\x08\x01\xd2\xcb\x53\x70\x53\x78\xf3\x33\
\x14\xb3\x69\x0a\x19\x1f\x25\xfd\xd5\x82\xd6\x08\xf0\xf0\x29\xe7\
\xe3\xe7\x33\x14\xe6\x75\xa8\x0e\xd6\x00\xcb\xf7\x89\x10\xc1\x33\
\x7d\xfa\xd7\x72\x8c\xb2\x13\x37\x03\xc7\x01\xb2\x1e\xfe\xad\x94\
\xcc\x6f\xf7\x44\x54\x03\xd8\x5f\x70\x07\x08\x92\x09\xfd\xd7\x3d\
\x3f\xfd\x7e\x42\xa6\xcf\xdf\xf6\xef\x02\xee\x76\x3b\xfc\x92\x06\
\xa8\xe3\x73\xca\x75\x5d\x1f\x70\x57\xed\x00\x40\x32\xab\x0a\x1f\
\x7e\x2a\xd3\xbd\xb7\xfc\xd4\xcd\x69\x39\x05\xf4\x03\x97\x74\x68\
\xbf\x10\xa2\xd3\xb6\xed\xaf\x7d\x9e\x25\x58\x58\x58\xf0\x07\x06\
\x06\xd2\x27\x4f\x9e\x9c\x06\xba\x83\x00\x3e\x1a\x49\xca\xad\xe3\
\xb3\x2a\xd7\x3b\xe2\xa7\x6e\x4c\xcb\xd1\x52\xe8\x59\x1d\x74\x8b\
\x00\x3d\x09\xc0\xd0\xd0\x90\xdb\xd3\xd3\x93\xd2\x4e\xcf\xce\xce\
\x9e\x2e\xbd\x1d\xdf\x08\x02\xe8\xee\xea\x29\x00\x8c\x04\x84\x06\
\x85\xaf\x08\x30\x35\x35\x55\xd0\x2f\x22\xa9\x53\xa7\x4e\x25\xc7\
\xc7\xc7\x2f\x03\x67\x81\x7e\x1d\xec\xae\xb8\x09\x4b\xdf\x76\xda\
\x4f\x26\x85\x01\x40\x08\x40\x61\x5a\xfc\xde\xe0\x60\xba\xbb\xbb\
\x3b\xa5\xdf\x8a\xcc\x24\xd0\x5e\xed\x73\xcd\x61\xed\x9a\x77\x33\
\x6e\x11\x60\x70\xf0\xfd\x74\x5f\x5f\x5f\xfa\xcc\x99\x33\xa6\xc5\
\xa5\xd0\x8f\x78\x02\x89\xb5\x9e\x63\x21\x44\x18\x78\x13\xd8\x4f\
\x69\x73\x06\xb4\xf8\xb1\xfa\x1f\xbd\xfa\x2a\x5f\xf2\xd8\x15\x9d\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\xa3\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xd6\xd8\xd4\x4f\x58\x32\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x04\x35\x49\x44\x41\x54\x58\xc3\xe5\
\x97\xcd\x8f\x54\x45\x14\xc5\x7f\xb7\xea\xd6\x7b\xaf\xdb\x6e\xc7\
\xf9\x40\x9d\x89\x46\x4d\x34\x99\x44\x8d\x1a\x48\x98\xc4\x8c\x1f\
\x1b\xfe\x02\x4c\x5c\xf1\x07\x18\x16\x2e\x4d\x5c\x6b\x58\xc3\x8e\
\xc4\x8d\x1b\x17\xce\x82\x68\x74\x41\x5c\x18\x0d\xe2\xc4\xc6\x00\
\x3d\x60\x50\x51\x19\x60\x02\xa2\x0e\x0c\x83\xd3\xfd\x5e\xf7\x94\
\x8b\xaa\xee\xf9\x60\xe6\x0d\x84\x51\x16\x56\x52\xa9\xce\x7b\xb7\
\xeb\x9e\x3a\xf7\xd4\xa9\x7a\xea\xbd\xe7\x7e\x36\xe5\x3e\xb7\x3e\
\x80\x5d\xbb\x76\xbd\x03\xec\xfd\x8f\xf2\x4e\x35\x1a\x8d\x03\xeb\
\x19\xd8\xbb\xef\xbd\xa3\x3b\x1f\x1f\x76\x00\x9c\x3c\x3a\xcf\xcc\
\x97\x37\x58\x9c\xef\xdc\x53\xa6\xda\xa0\xf2\xdc\x6b\x03\xbc\xb8\
\x67\x10\x80\x8b\x7f\x16\x7c\xf8\xee\x1e\x80\xdb\x00\x70\xfc\xec\
\x1c\xdf\x3f\x30\x04\x78\x2e\xfd\xb8\xc0\xfe\xb7\xce\x6f\xcb\x72\
\x0f\x1d\x79\x9a\x0b\x23\x96\xd3\x9f\x1f\x64\xfc\xd5\x7d\x9b\x6b\
\x40\x45\xb0\x16\x40\x78\x70\x2c\x23\xcb\xb2\x6d\x01\x30\x30\x96\
\x61\x8d\x50\x1b\x7c\x14\x23\x25\x22\x14\x2b\xd8\x18\x91\xd5\x95\
\x73\xe7\xce\x83\x2a\xb8\x04\xd2\x14\xb2\x0c\xd2\x2c\x8c\x49\x0a\
\x49\x12\xde\x77\x3a\x90\xe7\x90\xb7\xa1\xd5\x82\x76\x2b\x8e\x6d\
\x28\x72\xb2\xfa\x38\xd6\x0a\xe3\xaf\xbc\x49\x6b\xf1\xfa\xe6\x00\
\xac\x15\xac\x15\x04\xb0\x46\xd8\xbd\x7b\xe7\x16\x6b\xeb\x86\xae\
\x80\x5a\xa8\x56\x81\xea\x6d\x51\x8d\xaf\x04\xb5\x82\xf7\xa0\xa6\
\x84\x01\x67\x05\x35\x82\x08\xa8\x0a\x95\x2c\xc3\x23\x20\x1e\x08\
\xc0\xf0\x1e\x2f\x02\xde\x23\x12\x26\x15\x7c\x88\x23\xc4\x21\x1e\
\x3c\x21\x5e\x40\x4d\x58\x18\x40\xd7\x4a\x89\x06\xac\xa0\xda\x63\
\x00\x9a\x33\xbf\x05\x8a\x53\x07\x69\x02\x95\x04\xb2\x34\xf6\x04\
\x12\x07\x4e\xa1\xe8\x40\x5e\x40\x2b\x8f\xbd\x05\x4b\x39\xb4\x73\
\xc8\x0b\x54\x87\x71\x3d\x00\x2a\xe5\x25\x70\x31\x40\xd5\x30\x39\
\xf9\xd2\xd6\x0a\xf3\x3e\xd0\xaf\x16\xaa\x1b\x8b\xf6\xd8\x27\x61\
\x61\xbd\x1c\x25\x25\x20\x00\xf0\x81\x8d\x34\x4d\xa3\x3a\xc3\xb3\
\x98\x11\x89\x6c\x07\xda\x63\x09\x56\x98\x5f\x29\x46\xfc\x61\xcd\
\x72\x7f\x61\x1d\x2d\xd1\x80\x3a\x09\x54\x49\x18\x4f\x34\x2f\xe0\
\x9d\x85\xc4\x21\x89\xc3\x67\x09\x92\x69\xd8\x11\x89\xe2\x13\x87\
\x58\x8b\xef\x76\x91\xbc\x80\xbc\x03\xed\x02\xdf\x6a\x23\xed\x02\
\xf2\x02\x9f\x77\x50\x1d\x45\xd5\x20\x78\x3a\xeb\x54\x78\x9b\x06\
\x9c\x33\x78\x0f\x03\x8f\x24\xbc\xfe\xf2\xf3\x77\x68\xe8\x36\x68\
\xa4\xbe\xf1\xeb\xc6\xfc\xdf\xb1\x04\x52\x5e\x82\x44\x4d\x5f\x84\
\x8f\x0d\xa5\x38\xe7\xb6\xc5\x88\x9e\x18\x4b\xb9\x76\xb3\x03\x08\
\x9d\x52\x11\xaa\x90\xb8\x50\xef\x5a\xc5\x30\x7d\xb1\xcb\x40\xc5\
\xb0\x0e\xf4\x26\xad\x57\xf9\x55\x2e\xe1\xe1\xc6\xd2\x32\xf5\xcc\
\x70\x7d\xc9\x84\x2d\xe9\x4a\x19\x10\x9c\x1a\xc0\x73\xe5\x66\x97\
\x2b\x37\xbb\xac\x51\x57\x3f\xd7\xaa\x64\x7e\xc5\x27\xa2\x29\xac\
\x05\x15\xc3\x9c\x0b\xb5\x77\xa6\x6c\x17\xa8\xc1\xa9\x20\xc8\x1a\
\x35\xaf\x9b\x35\x1a\x8f\x59\x31\x9e\xfe\x7b\xe9\xef\x14\x00\xf1\
\x82\xef\x9b\x58\x30\x2b\x57\x56\x02\x55\x21\xd1\x90\xfc\xe7\x53\
\xdf\xf2\xeb\x99\x13\x2c\x2d\xde\xb8\xa7\xfa\x57\x6a\x03\x3c\xf5\
\xec\x4e\x9e\x79\x61\x02\x0f\xa8\x33\x5b\x31\x10\x03\x7c\x87\xf7\
\xf7\xbf\xc1\xc2\xc2\x02\xb7\x6e\xdd\xa2\x28\x0a\x44\x04\x6b\x2d\
\xd6\x5a\x54\x15\x55\xc5\x39\x87\xaa\x62\xad\xc5\x98\xf0\xdf\xe5\
\xe5\x65\xf2\x3c\xef\xf7\x23\xcd\xf9\xb8\xf2\x2d\x18\x70\x56\x50\
\x17\x18\xdc\x31\x3a\xb6\x72\x4f\x38\x7e\x9c\xe9\xe9\x69\x8c\x31\
\x78\xef\x99\x98\x98\x60\x72\x72\xf2\x8e\x59\xd8\x31\x3a\xd6\xdf\
\x86\xae\xd4\x09\x55\x70\x36\xac\xa2\x56\xaf\xf7\x6b\x39\x33\x33\
\xc3\xd0\xd0\x10\xd6\x5a\xbc\xf7\x34\x9b\xcd\xbb\x02\x50\xab\xd7\
\x70\xd1\x88\xb4\xd4\x88\x14\x9c\x0b\x27\x5c\xa0\x2a\x00\xa8\x56\
\xab\x64\x59\xd6\xa7\xb8\x37\xde\x69\x73\x1a\xa9\x17\x41\x4b\xad\
\x38\x1e\xc7\xbd\x23\xb4\xd7\x8c\x31\x88\x44\xdf\x8f\x3a\xb8\xab\
\x9b\xaf\x35\xa8\x0d\xf3\xf6\x18\x2e\x3d\x8e\x83\x29\x6d\xe3\xd5\
\xdb\x12\xa9\xf7\xe5\x56\x6c\xad\xf4\x91\x0e\x8e\x0c\xc3\xf2\xef\
\xdb\x02\xe0\xa1\x91\x61\xd4\xc2\xb5\x2b\x97\x59\x9c\xbf\xbe\x05\
\x03\x36\xf8\xc0\x60\xad\x02\x0b\xdb\xc3\xc0\x50\xad\xc2\xec\xc5\
\x4b\x9c\xfd\xee\x1b\xce\x9f\x9c\x9e\x03\xa6\x36\x04\x60\x24\x5e\
\x4a\x05\x12\x0b\xed\x91\x27\xa9\x3d\x0c\x6f\x1f\x38\xc8\x66\xc7\
\x81\x27\x3a\xf1\x2a\xe7\x35\x1e\x32\x81\x14\x28\xba\x70\xf9\xea\
\x55\xce\x34\x8e\xd1\xfc\xfa\x8b\xb9\xd9\x1f\x4e\x1d\x02\x0e\x6f\
\x08\xe0\xb3\x8f\x3e\xe0\xa7\xd3\x27\x57\x99\xe9\xda\xa3\x86\x55\
\xe6\xbb\x1e\x04\x1b\x3c\x5f\x1d\x6f\x7c\x77\xee\x8f\xd9\x5f\x0e\
\x01\x87\x1b\x8d\xc6\x5f\x1b\x01\x98\x9a\xfe\xf4\xe3\x7f\xf5\x73\
\x6c\x7d\xf2\x35\x00\xe2\xb7\xda\x81\xff\xdd\xd7\xf1\x3f\x4d\xf0\
\x4b\xb9\xe8\x46\x89\xaf\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
\x00\x00\x08\x19\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xd6\xd8\xd4\x4f\x58\x32\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x07\xab\x49\x44\x41\x54\x58\xc3\xad\
\x57\x5b\x50\x93\x67\x1a\xf6\xca\xce\xec\xcc\xf6\x62\x2f\xbc\xd9\
\xe9\xce\xec\x6e\xbd\xda\xd9\x9b\xb5\xce\xba\x3b\x7b\xb0\xad\xcc\
\x7a\xb1\xce\xce\x3a\xb3\x76\x54\x70\x75\xdb\xe2\x81\xd6\xb6\x54\
\x04\xbb\xa5\x20\x6d\xc1\x82\x06\x08\x07\x51\x42\x80\x80\x80\x02\
\x21\x81\x10\x92\x40\x48\x10\x73\x24\x21\x67\x72\x80\x04\x42\x20\
\x9c\x09\x47\xb5\x54\x78\xf6\xfb\x7e\x13\x16\x30\x58\x8b\x7d\x67\
\x9e\xf9\x2f\x92\xfc\xcf\xfb\x3e\xcf\xfb\xbe\xdf\x97\x5d\x00\x76\
\xfd\x98\x20\xf1\x0b\x82\x14\x02\x03\xc1\x75\x82\x03\xcf\xfd\xfe\
\x8f\x48\xbc\x9b\x20\xe1\x57\xaf\xef\xb5\x2a\x8c\xd6\x65\xdb\x02\
\x60\x19\x1e\x5b\x09\x27\xf1\x33\xfa\x19\x81\x22\xfc\xdc\x3e\x76\
\x48\x7e\x8a\xa0\xb9\xb6\x59\x1c\x32\xcf\xad\x42\x39\xfe\x1d\x44\
\xf6\x51\xd8\xc7\xe6\xe8\x87\x86\x3d\x7b\xf6\x58\x53\x52\xae\x2c\
\xca\x3a\x3a\x10\x4e\xe2\xe5\x49\xc3\xc4\x31\x04\xb7\x3e\x49\xf9\
\x2c\x60\x9b\x5d\x59\x53\x4d\x03\x4d\xb6\x11\x34\xeb\xfb\x20\x31\
\x79\x60\x19\x9d\xc5\xbb\xef\xbe\x3f\xc5\xab\xbe\x83\xf1\x89\x29\
\x4c\x4f\xcf\xae\x92\xef\xd7\xbc\x74\x02\x11\x9f\x0f\xbe\x1d\xe3\
\xb2\x04\x43\x4f\xb4\x33\x40\x8b\x7b\x06\xcd\x3d\x2e\x34\xeb\xec\
\xa8\x57\xf6\x20\x87\x53\x85\x32\x5e\x35\x43\xbc\xb0\xf4\x90\x81\
\xc1\x60\x5c\x26\xbf\x4b\x7c\xe1\x04\x48\x1c\x24\x38\x41\xfd\xdd\
\xea\x73\x27\xf1\xb9\x27\x04\x48\x87\x97\xc1\xd7\xbb\x20\x22\x55\
\x37\xdc\x37\xa2\xb8\x4e\x88\x2c\x56\x3e\xcc\x56\xdb\x3a\x71\x04\
\x2c\x16\x6b\x2c\xfc\xce\xe7\x27\x10\x91\x36\x93\x95\x3f\x46\x7d\
\xa5\xfe\x12\xc4\x6f\xf4\x59\x31\xb6\x02\x7e\xef\x20\x5a\x7b\x9c\
\xe0\x3f\x30\xa1\x4c\x28\x43\x46\x0e\x1b\xb2\x0e\xf9\x26\xd2\xf9\
\xc5\x65\xcc\x2d\x2c\x21\x34\xbf\x88\xbd\x7b\xf7\x5a\xc9\x3b\x7e\
\xba\x6d\x02\x24\x7e\x43\x90\x46\x3d\x35\x13\x69\x75\xb3\x80\xd2\
\x3f\x0f\xcb\xc4\xe2\x9a\x50\xa1\x5a\xb4\x6c\xf1\x59\xa0\xb6\xa0\
\xa6\x5d\x8d\x2f\xb2\x73\x71\xb7\x9e\xff\x0c\x31\x25\x9d\x09\xcd\
\x63\x62\x6a\x06\x83\x43\x81\x27\xe4\xdd\xbc\x2d\xd3\xb0\x3b\x92\
\x03\x33\x26\xd4\x53\xb5\xd3\xfb\x58\x4f\x88\xc5\x03\x21\x88\x2c\
\x43\x50\xba\x46\xd0\xed\x09\x42\xe5\x9b\x42\x9b\x73\xfc\xa9\xcf\
\x5a\x1b\xee\x2a\x74\xc8\xbc\xc9\x45\x09\xa7\x6c\x93\xcf\x9b\x88\
\x27\xa7\x11\x18\x1d\xc3\x80\x6f\x08\xa2\xd6\xd6\x25\xc2\x51\xdb\
\x28\x12\x87\xc6\x1f\xaf\x82\x2f\x62\x94\x4d\x89\x24\x90\x22\xea\
\x52\x2d\x9a\x42\xab\xe8\x18\x79\x04\xa1\xc5\xcf\x10\x53\x74\xf6\
\x0d\xa3\xd3\xe1\x87\xd4\x3c\x80\x16\xbd\x03\x0d\x5d\x06\x14\xd5\
\x0a\x90\x91\x95\x0d\x2f\x79\xf1\xc6\xaa\xa9\xd4\xb3\x73\x0b\x4c\
\xc5\x94\xd8\xdd\xef\x85\xc9\x62\x05\xb7\xbc\x12\xa5\xe5\x95\x4b\
\x13\xf3\xcb\xab\x23\x0f\x01\x37\xd9\x11\xe6\xd9\x15\x84\x97\x15\
\x13\x06\xcb\x3c\xd0\x68\xf2\xa3\xdd\xee\x5f\x27\x96\x3b\x86\x20\
\xb3\x78\xd7\x7d\xe6\x08\xa4\xf8\x3c\x33\x1b\x2a\x8d\x36\xaa\xdc\
\x53\x33\x21\x8c\x8e\x8d\x33\x15\xd3\x26\xe4\x37\x09\xf1\xc1\xc5\
\x8f\x51\x73\xaf\x01\xbe\x65\x60\xfc\x11\xa0\x23\x13\x23\xf2\xce\
\xa1\xbe\x5d\xb9\xb8\x51\x01\x83\x81\x74\x74\x4d\xa7\x1e\x0a\x67\
\x80\xa9\xb8\xdd\xea\x83\xd8\xe8\x42\x93\xca\xcc\xf8\x7c\xe5\xcb\
\x2c\x88\xda\x24\x51\x89\xa7\x67\xe7\x18\x1b\x86\x86\x47\x60\x77\
\x38\x49\x82\x3a\x24\x7c\xf8\x21\xae\xb3\x0b\xe1\x99\x5c\x80\x6f\
\x09\xd0\x90\xde\xe1\x0f\x2c\x81\xab\x1f\xc4\x7d\xef\x04\xdd\x07\
\x1d\x61\xeb\xff\x9f\xc0\x1d\xb9\x16\x1d\xf6\x21\x48\xcc\xfd\x4f\
\x7d\xee\xd4\x22\x9d\x55\x84\xaa\x9a\xba\x4d\x3e\x47\xe4\x8e\xf8\
\x3c\x3c\x12\x84\xd3\xdd\x0f\xbd\xc1\x88\xc2\xe2\x62\x9c\x7e\x2f\
\x1e\x3d\x03\x01\xf4\x2f\x02\x83\x84\xbc\xc5\xff\x2d\xee\x3a\x43\
\x28\x51\x91\xf7\xf6\x05\xf1\x4e\xdc\xbf\x7d\x84\x33\x69\xe3\x20\
\x18\xf4\x33\xab\xe0\xc9\x54\x68\x35\x38\xd1\xd8\xdd\x0b\x9e\x58\
\x89\xac\x5c\xf6\x33\x3e\x47\xaa\x9e\x9c\x9e\x65\xe4\xee\xf7\x0e\
\xa2\xd7\x6c\x41\x43\x03\x1f\x27\x62\xe3\x20\xe9\xd6\xc0\x45\xcf\
\x01\x52\x90\x24\xb8\x86\xb2\x9e\x00\x6e\xb4\xdb\x50\xd1\x1b\x44\
\x85\xce\x8b\x4a\x7e\x0b\x6d\xbe\x9b\x5b\x27\xd1\xa0\x99\xf8\x16\
\x65\x22\x05\xee\x29\xf4\x28\x13\xc8\x90\x78\x35\x0b\x1a\xad\x3e\
\xaa\xdc\x63\x13\x93\xf0\x0d\x0d\xc3\x66\xef\x83\xb4\x5d\x8e\xc4\
\x4b\x97\x90\xc3\xca\xc3\xd4\x63\xc0\x4e\x7a\x49\x31\x4e\xfa\x89\
\x94\x7f\x5b\x3b\x84\x7c\x85\x13\x25\x6a\x1f\x4a\xd5\x03\xe8\xf2\
\x30\xa3\x28\x22\xf8\xf9\x33\x09\x74\x8f\x2e\xa1\xa8\xbe\x15\xa5\
\x7c\x09\xb2\x4a\x2a\xf0\xcf\xe3\x71\x51\xe5\xf6\x07\x46\xd1\xe7\
\xf2\x40\xab\x37\x20\xfd\x6a\x06\x92\xbf\x48\x83\xcd\x37\x02\x27\
\xa9\xda\x40\x1a\x4c\xe0\x7b\x88\x52\x9d\x1f\x45\xdd\xfd\x0c\x71\
\x41\x97\x1b\xc5\xdd\x1e\x88\x9c\x41\xfc\xf9\xcd\xb7\x5d\x84\xeb\
\x6c\xb4\x43\xd0\x28\xf7\x4e\x23\xa7\xfc\x1e\xb2\x4b\xab\xf1\x51\
\xea\x57\x48\xfe\x6f\xea\xfa\x58\x51\xb9\x47\x82\xe3\xf0\x0c\xf8\
\x60\x34\x99\x51\xc9\xab\xc2\xfb\x67\xcf\x41\xfe\x40\x03\x3f\xe9\
\x6e\xb2\x8d\x19\xb9\x6f\x69\x06\x19\xd2\x9b\x2a\x2f\x72\xe5\x0e\
\xe4\x75\xf6\xa1\xf0\xbe\x1b\x1c\x95\x1b\xf9\x9c\xca\x29\xc2\x53\
\xb8\xdd\x29\xdc\x2b\x76\x04\x90\x51\xc8\xc5\x95\x6b\x79\x38\x11\
\x9f\x80\x9b\xb7\x6e\x33\x63\x15\x91\xdb\x6a\x73\x40\x22\x6d\xc7\
\x85\x84\x0f\x50\x74\xbb\x0c\xf3\x2b\x80\x9f\x34\x58\xf7\x24\x20\
\x1c\x7c\x84\x4a\xd3\x18\x38\xfa\x61\x86\x9c\x56\xfd\x55\xb3\x1e\
\xac\x0e\x3b\xb8\x3a\x1f\xd9\x21\x1e\x7a\x2f\xe0\x13\xbc\xba\x5d\
\x02\x26\xbe\xc1\x83\x94\x6f\xd8\x38\x9f\x9c\x8a\x03\x7f\x3d\x04\
\x63\xaf\x99\xe9\x6e\x2a\xb7\x46\xd7\x83\xa4\xcb\xc9\x48\xff\x3a\
\x8b\x8c\xd5\x3c\x53\xb5\x71\xf6\xa9\xdc\x35\xf6\x69\x5c\x97\x59\
\x19\xd9\xbf\x6e\x21\xa7\xa0\xd4\x82\x74\xbe\x1a\x57\x9b\x34\x60\
\xc9\xcc\x10\xbb\x82\xf8\xe5\xaf\x5f\xa7\x67\xc0\x3b\xe1\x75\x1f\
\x35\xcc\x35\xdd\x66\x7c\x94\x96\x85\xb8\x73\x17\xf1\x97\x43\x31\
\x4c\xd5\x74\x99\xf0\xaa\xaa\x71\xfa\xf4\x19\x68\xcc\x0e\x8c\x92\
\x2d\x36\x14\x1e\xab\x5a\xc7\x0c\x78\xe6\x71\x70\x0d\x23\x4c\xa3\
\x65\x8a\x0c\x8c\xec\xb4\xfa\x9c\xb6\x5e\x94\x74\x39\xd0\x66\xf7\
\xaf\x1e\x3d\x11\x4b\x47\x2e\x6f\xc3\x79\x13\x35\x2c\x5c\x99\x1a\
\xf1\x97\x3e\xc7\xd1\xd8\x33\xf8\x38\x31\x09\x86\x5e\x13\x1a\x9b\
\x04\xf8\xdd\x1b\xfb\x51\x4f\xd4\xf1\x90\x99\xee\x9a\x00\xaa\xad\
\x93\x60\x2b\x5d\x0c\x39\xf5\xbc\xf0\xbe\x67\xbd\xea\xcc\x16\x3d\
\x4a\x55\x1e\x08\x6d\x01\x94\xd4\xf1\x43\xe1\x65\x53\x40\xf0\xca\
\xf7\x25\x60\x2b\x6e\x6a\xc7\xa9\x84\x44\xc4\x1c\x39\x8a\xdc\x7c\
\x36\x5a\x5a\xc5\x38\x14\x13\x83\x2f\x39\x35\xc8\x14\x6a\x98\xe6\
\xa2\xd5\xd2\x27\xf5\x9a\x7a\x4c\x13\xa1\x49\x64\xb7\x99\x90\xdb\
\x6e\x46\xb9\xda\x8d\x06\xa5\x76\x39\x2c\x39\x3d\xf9\x4e\x13\xec\
\xd9\x72\xd4\x47\x0d\x3b\xab\x46\x88\x63\xff\x39\x8f\xdf\xee\xfb\
\x3d\x1a\xf9\x02\x9c\xbf\x90\x80\x93\xf1\x17\x70\xa3\xad\x07\x19\
\xc4\x4f\x4a\x14\xe9\x6e\xba\x58\xa8\xef\x2c\xfa\x94\x98\x50\x28\
\xb7\x40\xe9\x0e\x3c\xf9\x57\xec\x29\x2a\x77\x2d\xc1\x67\x04\xfb\
\xb6\xb9\xe4\x44\x8d\xbe\xcc\xb2\x5a\xfc\xe3\xe4\x19\x1c\x3c\xf4\
\x37\xb0\x72\xf3\xb0\xef\xc0\x1f\x50\x20\xd1\x21\x89\x27\x65\x2a\
\xa6\x4b\x85\x3e\xbf\x21\xd5\x46\xe4\x2e\x90\x5b\x21\xb0\x0c\xae\
\xe5\xdc\xe2\xd2\x11\x13\x13\xe4\x87\x6f\x3c\xaf\x3c\xe7\x96\x15\
\x35\x9c\x69\x45\xe5\xf8\xfb\xb1\x58\x1c\x3f\x19\x87\x37\xf6\xef\
\xc7\x8d\x3a\x11\x92\xab\xa4\x0c\x21\xed\x70\xea\x35\x55\x21\x8b\
\x34\x5b\xc9\x03\x37\x2a\x34\x6e\xd4\x49\x3a\x17\xc3\x72\x73\x08\
\x8e\x6d\x95\xfb\x87\x24\xe0\x4a\x65\x73\x70\xe4\xf8\x29\x1c\x3e\
\x7c\x98\x8c\x63\x2e\x32\x05\x2a\x5c\x22\xd5\xd3\x5d\x7e\x4d\xdc\
\x0b\x36\xe9\x74\x76\xa7\x1d\x77\x8c\xe4\x88\xb6\xf9\x9e\x84\xb7\
\x1a\x95\xfb\x22\xbd\x49\xfd\x80\x0b\x6d\xf4\x04\x32\x4a\x78\x4c\
\x0f\x9c\x4b\x49\xc3\xb5\xa6\x2e\x7c\xc2\x6d\x65\x36\x59\xf1\x83\
\x01\x5c\x97\x9a\xc1\x51\x7b\x20\xf3\x04\xd7\xce\x25\x26\x05\x36\
\xc8\xfd\xc7\x9d\xc8\x1d\xd5\x82\xdc\x1a\x01\xce\x5e\x4e\x45\x81\
\x58\x85\x78\xf6\x5d\x5c\xa9\x55\x90\xaa\xfb\xc0\x96\xdb\x50\xad\
\x75\xe3\xae\x54\x41\x2f\x10\xca\x0d\x72\xbf\xba\xd3\x6a\xa3\x05\
\xb7\xa2\x51\xf8\x1d\xaf\x43\x8d\x4f\xb9\x2d\x88\xcb\xe6\xe1\x9a\
\x48\x8f\xaa\x1e\x2f\x9a\x35\xe6\xc7\x7f\x7a\xf3\x2d\x57\x78\xac\
\xa8\xdc\xaf\xbd\xac\xdc\xd1\xe2\x08\xdd\x05\x5c\x75\x1f\xde\xcb\
\xaf\x45\xb9\x76\x00\x32\x67\x60\xf5\xc2\xa7\x97\xa9\xdc\xf7\x08\
\xd2\xa9\xdc\x3b\xf8\x03\xf3\xc2\xf1\x13\x82\xca\x1c\xee\x9d\x50\
\x0b\x39\x94\xb8\x0d\xc2\xc8\x16\xa3\x17\x87\xc3\x2f\x22\xf7\x0e\
\xff\xda\x6d\x8a\xdd\x61\x99\xd5\x1b\xb6\xd8\x6b\xbb\x5e\x32\xbe\
\x2f\x89\xff\x01\x66\xb9\x5f\xfc\x11\x80\x3d\xcf\x00\x00\x00\x00\
\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x05\x2b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xd6\xd8\xd4\x4f\x58\x32\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x04\xbd\x49\x44\x41\x54\x58\xc3\xed\
\x57\x6b\x4c\x93\x57\x18\x3e\x23\x71\xc9\x32\xe9\x16\x97\xa8\x54\
\x65\x38\x9d\x02\x15\xf6\x03\x87\x32\x93\x01\x66\x2c\x5b\x70\xc4\
\x30\xff\x60\xa2\x2e\x1a\x3a\x1d\x4e\x03\xba\x31\x89\x5b\xb3\x80\
\xd9\x0c\x84\x02\x19\x58\x1c\x14\x8b\x85\xb2\x82\x95\x5e\xe4\x66\
\x0b\x8e\x31\xf8\xc3\x46\xcb\x2d\x81\x15\xdc\xa8\xc2\x1c\x1b\xb7\
\x6a\x69\x91\xf2\xee\xbc\x87\xaf\x0c\xdc\xb8\x0d\x61\xd9\xb2\x93\
\x3c\xed\x97\xf3\x7d\xfd\xde\xe7\xbc\xef\xf3\x5e\x4a\x00\x80\xfc\
\x93\x20\xff\x0a\x02\x74\x09\x28\x44\x14\xd9\x14\x71\x14\x01\x2b\
\x46\x80\xae\xdd\x64\xdd\xc6\x66\x22\x4c\xf8\x95\xc4\x8b\x47\xc8\
\xa1\xd3\xf7\xc8\x8e\x97\x3b\x38\x32\x61\x2b\x41\x20\x85\x9c\xbe\
\x30\x48\x2e\xdd\x80\x19\x40\x32\xab\x79\x4d\xf4\xbe\xfb\x72\x13\
\x68\x64\x06\x91\x04\x5e\xa3\x51\xf4\x06\xee\x85\x47\xf5\xd0\xbd\
\x83\xcb\x4d\x20\x9b\x9d\xf6\x40\x74\x2f\xbd\x16\x32\x3d\x20\x89\
\x3f\x48\xa5\x2c\x1b\x01\x8c\x31\x79\xc1\xbb\x9d\x88\x4b\xc6\xd7\
\xc6\x26\x0e\xa0\x10\xb9\xfd\x42\xfe\xc5\x2b\x36\x46\x8c\x12\x5c\
\x4e\x02\x93\xa7\xa7\xa7\x0d\xcc\xd3\x39\xb9\x98\x63\x36\x14\x0a\
\xd2\xe4\xa3\x2b\x41\x20\x8c\x29\x9e\x2a\xdf\x37\x47\xeb\xdc\x7b\
\xb5\xcc\x89\x9e\x40\x44\x96\x54\x83\x2b\x2c\x0b\x36\x46\x48\x08\
\x13\xf5\x64\x2a\x7b\x2e\x54\x03\x01\xf8\x03\x37\xbf\xc0\x0e\x34\
\x2a\x54\xdf\x62\x88\x52\xd5\x2c\x58\x03\x74\x1d\x16\x08\x04\x7a\
\x45\x55\xf5\xc8\xa0\x6d\x74\xc2\xd4\x73\xf7\x21\xbe\x73\x51\x95\
\x90\xae\x8f\xd0\x13\xcf\xe5\x94\x83\x87\xb4\x02\x9e\xcc\x2e\x03\
\xd4\x06\xdd\xaf\x99\xcb\xb0\xaf\xaf\xaf\x3e\xbf\xd2\x60\xb5\xdb\
\xed\x80\xf8\x79\xe4\x3e\xc4\x5e\xab\xb4\xb9\x88\x2f\x86\x80\x27\
\xd3\xc0\x67\xf9\x8e\x19\xf5\x60\xd7\x5e\x33\xba\x76\xda\x73\xee\
\x68\xd8\xc7\xc7\x47\x9f\xab\xab\xb0\x0e\x0f\x0d\xc1\x10\x87\xb2\
\xf6\x2e\xe7\x96\x37\xf7\x77\x73\x61\xd8\xbd\xe8\x5e\x80\x2f\x66\
\x9a\xa0\x86\xdf\xa9\x36\x42\xf7\xf0\x03\xd8\x19\x9f\xd4\xcf\xa5\
\xe7\x1a\x8a\x98\x2d\x7e\xfe\x6d\x97\x54\x1a\x6b\x5f\x5f\x1f\xb8\
\xd0\xd1\x73\x07\x62\x72\x15\x56\x4e\xc4\x87\x97\xd4\x8c\x30\x14\
\xe9\x15\xb7\x1e\x38\x1c\x0e\x40\xa4\xd6\x19\x31\x9e\x85\x9b\x05\
\x7e\x6d\xa9\x25\x1a\x5b\x97\xd9\x0c\xe6\x2e\x0a\xf3\x24\x14\xdf\
\x36\x8e\x7b\xbd\x1e\xd1\xcd\x42\xc8\x09\x6f\xa9\x04\x3c\xd1\xbd\
\x56\xab\x15\x10\x77\x7f\x1b\x84\xf3\x92\x5c\xbb\x52\xa9\x84\xfa\
\xfa\x7a\x30\x99\x4c\x0c\x75\xdf\x35\xc1\x51\xb1\x64\x18\xc9\x51\
\x44\x3e\xb6\x76\xcc\xb4\x40\x4f\x93\x5f\x7e\xd3\xd6\xdf\xdf\x0f\
\x32\x99\x0c\x44\x22\x11\xa8\x54\x2a\x90\x4a\xa5\xa0\xd1\x68\x20\
\x4b\x5b\x39\xbe\xe9\x95\xe0\x1f\xb8\x53\xaf\x79\x2c\xf3\x00\x97\
\x8e\x22\x9e\xc7\x86\xe6\x53\x29\x19\xf6\x82\x82\x02\xe6\xe2\xa0\
\xa0\x20\xe0\xf1\x78\x60\xb1\x58\x40\x5b\x5e\x01\xfb\xcf\x26\x0c\
\x2d\xa6\x53\xce\x67\x94\xcf\x09\x4c\x83\xe2\x5b\x7b\xe6\xc2\x60\
\x9a\xb2\x14\x14\x0a\x05\x88\xc5\x62\xc8\xcc\xcc\x84\xa2\xa2\x22\
\x50\xab\xd5\xd0\xd9\xd9\xc9\x60\xec\xfe\xc9\xb9\xc9\xdb\xa7\x75\
\x2e\xb7\xcf\x4b\x80\xae\xb7\xd8\x29\x70\x0e\xc0\x6a\x97\xac\x78\
\x88\xca\x7f\x82\xe2\x29\x89\x0e\x3e\x97\x2b\x21\x5b\x96\x0f\x07\
\x63\xe3\x47\x84\x1f\x26\xd8\x92\x72\x64\x8e\x6f\x1a\xbf\x07\xa3\
\xd1\x08\x2d\xad\x2d\xf0\xcb\xc0\x20\x1c\x38\xf1\xbe\x05\xb3\x62\
\xc1\x04\x5c\x69\x84\x85\x85\x84\x46\xdc\x26\xe7\x32\xac\x2c\xcf\
\x33\xb5\x13\xec\x3b\xe3\xba\xd3\x33\xaf\x82\xe5\xfe\x7a\x89\x06\
\x9e\xde\xfc\x62\x1b\xf7\x3c\x92\x8d\x7b\x66\xab\x4f\x5b\xca\x35\
\xed\x58\x43\x43\x3d\x34\x34\x34\x80\xa5\xb7\x17\x32\x14\xc5\xc3\
\xf3\xe9\xc0\x65\x3c\x92\xe5\x28\x9e\x36\x5d\xe5\x9c\x2a\x32\x78\
\x7d\xf4\x83\x2e\x5a\x6c\x12\x31\x0c\x1b\x25\xea\x71\xf7\x2f\xcb\
\x27\xef\x05\x87\x5f\xfe\xd3\xe4\x44\x0b\x4c\x68\xf4\xc9\x3e\x75\
\x95\x1e\x0c\x06\x03\xb4\xb7\xb7\xc3\xd7\xc6\x96\x31\xae\x81\x09\
\x66\xf1\x36\x6d\x38\x68\x3c\x49\x3a\x3a\x65\xf8\x62\x81\x83\x44\
\xbd\x57\x43\xb6\x0a\x5e\x9b\x2a\xc3\x94\x5c\xb0\x42\x0f\xab\x24\
\xb4\x04\x9f\x4a\xaa\x9b\x43\x37\x31\x28\xd4\x4f\xf2\x0a\xc7\x74\
\x3a\x1d\xd4\xd6\xd6\x82\xc9\x7c\xdb\xb9\x61\x9b\xf7\x5f\xea\x62\
\xb2\xe5\x7e\x9c\x75\x1f\x0d\xf3\xb2\xd4\x4e\xf2\xf6\xb1\xeb\x2e\
\xb6\xae\x94\xc3\x90\x6c\x97\x55\xc1\x4b\x57\xab\x80\x9c\x4d\x6e\
\x5a\xd0\x1c\x49\xbd\xb1\xe7\x88\xb0\xef\xca\x57\xc5\x50\x5a\x5a\
\x0a\x1d\x3f\xf6\x4c\x04\x06\x87\x74\x3c\xaa\x0b\xc2\x84\x46\x8d\
\x07\xc8\x6f\x02\xd9\xf9\xaa\x7e\x9a\xf1\x30\x46\x8e\x36\x20\xaf\
\xbc\x4a\x78\x43\x69\x00\x92\x28\x1d\x98\xcd\x95\xb3\x79\xc3\x7d\
\x3d\xbf\xf9\x44\x6a\xa6\x5d\x2e\x97\x43\x53\x4b\x2b\x44\x1c\x7b\
\xf7\xce\xf4\x14\x25\xae\xf1\x8a\xf5\x77\x9c\xf5\x70\x02\xc2\xd9\
\x0f\x89\xd1\x81\x03\x4f\x8e\xf7\xdc\xd2\x69\xe7\xf3\xdf\x75\xfc\
\x6f\x14\x2e\x36\xd2\xef\xd8\x17\x69\x49\xbe\x2c\x9d\xc8\xd3\x96\
\x3b\xa7\x0f\x31\x8c\x25\xc6\xdf\x9f\xba\x77\x5f\x71\x35\xa0\x41\
\x6c\xb5\x08\x8c\xf9\x94\xf1\xe0\xf0\x33\x4b\x9a\x7c\x68\x13\x5a\
\xbd\xce\xa3\xd9\x6b\x4f\x48\xf7\x0c\x0f\xb0\x0f\xfe\xf3\x87\xc8\
\xf9\x2f\xee\xb9\x49\x6e\x00\xf6\x7b\x3e\xed\xf7\x08\x1e\x2a\x3e\
\x5d\xe5\x58\xaa\xf1\x47\x5a\xf5\xb6\x59\x0b\x11\x1d\xb3\x43\xc9\
\x91\x38\x09\x39\xf9\xa9\x96\x21\xfa\x5c\x1a\x0d\xcf\xb3\xff\xff\
\x37\xfc\x4f\x13\xf8\x1d\xe7\x87\x19\xb9\x44\xc3\x01\xcf\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x05\x3a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xd6\xd8\xd4\x4f\x58\x32\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x04\xcc\x49\x44\x41\x54\x58\xc3\xb5\
\x97\x5d\x4c\x5b\x65\x1c\xc6\x77\x6f\xbc\xd9\xe5\x12\x49\x20\x71\
\xd7\x26\xe3\x4e\x13\xb8\x70\xd1\x85\x44\xbd\x50\xe3\x10\x18\xe5\
\x2b\x2e\x26\x4a\x04\x27\x86\xaa\x8b\x99\xe0\xd0\xa2\x6c\x19\x86\
\x39\x17\xdc\x1a\x16\x98\x80\x40\x6c\xa6\x43\xca\x20\x2b\x83\x1e\
\x28\xcc\xda\xd1\x96\xd2\xd2\x4a\x7b\xfa\x01\xa5\xd0\xef\x16\x1e\
\xdf\xff\xdb\x1d\xc7\xcc\x04\x2a\x87\x93\x3c\x39\x6f\x21\x9c\xe7\
\xf7\x3c\xef\x47\x0f\x87\x00\x1c\xca\x46\xcf\xbd\xfa\xe9\xbb\x4c\
\x5a\x26\x61\x0f\x6a\x60\xca\xd9\xe9\x79\xd9\x9a\x3f\x5d\x50\xf2\
\xa5\xc1\xe9\x8f\xa7\x57\xc3\x40\x30\x02\x84\xa2\x19\xad\xc7\x32\
\x8a\x27\x81\x58\x22\x73\xbf\x79\x6b\xda\x4b\x10\x72\x02\x1c\x7b\
\xe7\xac\xda\x1c\xd8\xc8\x98\x12\x40\x84\x99\x85\xe3\x19\x91\x31\
\x29\x1a\x4b\x61\x25\x94\x44\x38\x9a\x42\x73\x87\xc6\xbe\x13\xc4\
\xff\x02\x90\x12\x93\x79\x24\xf1\xc8\x58\x92\xcf\x1f\x84\x5d\x8c\
\xc2\xe5\x09\x22\x12\x4b\xa3\xf4\xc3\xef\x4d\x34\x75\x59\x01\xb0\
\xeb\xd8\x36\xd5\x90\x9e\x3a\xfc\xcc\xb9\xe7\x5f\x2e\x11\x3f\x56\
\x9e\x45\x45\x55\x0d\x2a\x99\xde\xaf\xad\xc3\x9d\xb1\x89\xc7\x00\
\xac\xb6\x25\xfc\xb9\xe8\x87\x6b\x15\x58\xf6\x04\x10\x08\xc6\xd2\
\xaf\x9c\xbe\x70\x9f\x41\x1c\xd9\x15\x80\x5d\x87\x99\x1a\x8a\x8a\
\x8a\xcc\x92\x5a\x5b\x5b\xdd\xa4\xaf\x55\xad\xfe\xaf\x54\xdf\xa6\
\x06\x06\x06\x31\x39\x35\x85\xd9\xb9\x39\xe8\x26\x26\x50\x50\x50\
\x80\x21\xcd\x6f\x7c\xde\x49\xa6\xf9\x05\xcc\x98\x5c\x1c\xc0\xe1\
\x4f\x41\xf4\x85\xf0\x43\xaf\xce\xcd\x00\x6a\xf6\x02\x50\x43\x66\
\xd8\xe5\x8a\xc7\xe3\xf0\x7a\xbd\x48\xa7\xd3\x98\x9c\x9c\x44\x65\
\x65\x35\x66\x67\x8d\xbc\x81\x07\x66\x1b\x74\xd3\x16\x0e\x40\x32\
\x2d\x78\xf0\xdd\x8d\x51\x8f\xac\x00\xe1\x70\x18\x46\xa3\x91\x8f\
\x53\xa9\x14\x7e\xea\xed\x45\xe3\x27\x9f\x61\x86\x41\x38\x96\xdc\
\x50\x77\x75\xe3\x4c\x43\x23\xce\x35\x9d\xc7\xed\x91\x71\x5c\xbc\
\x3e\x2c\x2f\xc0\xc6\xc6\x06\xf4\x7a\xfd\x63\x40\x7d\x7d\xfd\x50\
\x32\x88\xd0\x46\x1c\x66\x9b\x0b\x82\xc1\x88\xa9\x19\x13\xac\x0e\
\x11\x97\xba\x64\x6e\x80\x00\xa6\xd8\x3a\xd8\x7e\x45\x22\x11\x94\
\x2b\x2a\x30\xae\x13\x40\xe7\x04\x6d\x57\xda\xaa\x34\xbe\x7c\x53\
\xe6\x35\x40\x66\x3a\x9d\x0e\xc3\xc3\xc3\xe8\x65\xf5\xf7\xf7\xf7\
\x43\xab\xd5\xa2\xaa\xba\x06\x63\x77\xf5\x90\x0e\x2a\x77\x90\xed\
\x04\xb6\x0e\xda\xbb\x65\x06\xa0\x79\xb7\xdb\xed\x18\x1a\x1a\x42\
\x67\x67\x27\x7a\x7a\x7a\x38\x50\x49\x69\x19\x6e\x69\xf5\x10\xd7\
\x00\x6f\x08\xb0\xf9\x00\x67\x00\xb8\xd0\x25\x33\xc0\xd6\xd6\x16\
\xdf\x09\x81\x40\x00\xa2\x28\xc2\xef\xf7\x63\x6d\x6d\x0d\xa7\x14\
\x95\xd0\xfc\xae\xe7\xa9\xc9\x7c\xc1\x0b\x98\x3d\x40\x9b\xdc\x00\
\xdb\x41\x36\x37\x37\xf9\x76\xa4\x56\x14\x15\xd5\xe8\xfb\x55\xe0\
\xa9\x1d\x81\x47\x00\xe7\x3b\x0f\x00\x80\xcc\x25\x80\x24\x33\x4f\
\x24\x12\x28\x2b\xaf\xe2\x00\x7f\xb8\x00\x8b\x98\x01\xa0\x36\x5a\
\xd5\x07\x30\x05\xff\x98\x27\x93\x3c\x3d\x4d\x49\xc9\xa9\x4a\x0e\
\xa0\xb7\xb3\x03\x89\x3d\xc5\xf8\x17\x30\xb1\x00\x7c\x71\xf5\x00\
\x00\xa4\xea\xc9\x98\x14\x8b\xc5\x50\xa6\xa8\x82\x7a\x48\xc0\x98\
\x19\xb8\x6b\x05\xe6\x9c\x99\xfb\xe7\x57\x64\x04\x90\xd2\x53\x6a\
\x02\x88\x46\xa3\xdc\x3c\x14\x0a\xa1\xb8\xb4\x02\xd7\x06\x05\xdc\
\x66\x87\xe4\xa0\x01\x1c\x64\xc4\x04\x28\x3b\x64\x06\x48\x3d\x9c\
\x73\x12\x99\xd3\xb9\x40\x20\xc5\x65\x55\xb8\xd8\x2d\xa0\x7f\x3a\
\x63\xae\x7d\x90\x69\xe0\xa3\x76\x99\x00\xfe\x5d\x3d\xa5\x26\xad\
\xae\xae\x72\x88\xb7\x4a\x2a\x70\xb9\x57\xc0\x3d\x1b\xb8\x7e\x9e\
\x01\xee\xcc\x03\x67\x2e\xed\x13\x40\xaa\x9d\x44\x8b\x8e\x92\xd3\
\x71\x4c\xdf\x01\x2b\x2b\x2b\x58\x5f\x5f\xe7\x10\x27\x59\x03\xdf\
\x74\x09\x50\x4f\x00\xbf\xcc\x65\x1a\xb8\x32\x06\x34\xec\xa7\x01\
\xc9\x58\xda\xeb\x64\x4e\x69\x29\x39\x1d\x44\x04\x40\xf5\xd3\xcf\
\xde\x7c\x5b\x81\x96\xeb\x02\x4f\x7e\x75\x1c\xb8\x71\x0f\xf8\x71\
\x2c\x9e\x7e\xbd\x4e\x6d\xa6\x37\xaa\xac\x00\x9e\x64\x2c\x6d\x37\
\x32\x25\x00\xd1\x23\xf2\xe4\x12\xcc\x1b\x27\x15\x68\xef\x11\xa0\
\xbc\x66\x5b\x7f\x4f\x35\xe2\x3c\x71\x9a\xbf\x8e\x69\xf7\xfc\x4a\
\x26\x01\x90\xa9\x24\x69\xb5\x53\x42\x32\x0f\x06\x83\x70\xb9\x5c\
\xdc\x90\x5e\x4a\xe8\xb3\xc7\xe3\x81\xdb\xed\xc6\xf1\x13\xaf\x25\
\x9f\x7d\xa1\x9c\x4c\x3b\x98\x8a\x99\x8e\x3e\xc9\x78\x47\x00\x95\
\x4a\xc5\x01\xa4\x15\x2e\xcd\x37\x19\x52\x52\x3a\xf7\x29\xb5\xc3\
\xe1\xe0\x22\xe3\xc5\xc5\x45\x0e\xf5\xe2\xf1\x97\x5c\xf4\x1e\xb9\
\x93\xe9\xae\x00\x2d\x2d\x2d\x6e\xe9\x60\xa1\xd4\xd2\x97\x0d\x8d\
\x97\x97\x97\xe1\xf3\xf9\x60\xb3\xd9\xf8\x7d\x69\x69\x89\x43\x10\
\x00\x8d\x0b\x0b\x0b\xcd\xb2\x00\xd0\xa2\x92\x52\x93\x11\x8d\xe9\
\x4e\xdf\x78\x54\x3b\x35\x60\xb5\x5a\x79\xf5\xd4\x0a\xfd\xce\x60\
\x30\x24\xf2\xf2\xf2\xee\xb3\x67\x1c\xd9\x17\x40\x53\x53\x93\x5b\
\x9a\x67\x4a\x4f\x22\x13\xaa\x9a\xc6\x16\x8b\x99\x37\x40\x9f\x47\
\x47\x47\x23\x6d\x6d\x6d\xde\xfc\xfc\x7c\x13\xfb\xdb\x41\xa6\xb2\
\xbd\x9a\xff\x27\x40\x73\x73\x33\x9f\x02\x4a\x47\x10\x54\x3f\x55\
\x3f\x3f\x3f\xcf\xeb\xd6\x68\x34\x91\xba\xba\x3a\xe7\xc3\xb4\x5d\
\x4c\x1f\x30\x1d\xcd\xc6\x78\x47\x00\xa5\x52\xe9\x76\x3a\x9d\xbc\
\x62\x4a\x4a\x6f\x3e\x94\xb4\xbe\xbe\xde\x99\x93\x93\x23\x99\x16\
\x67\x53\x75\x56\x00\x8d\x8d\x8d\x6e\x8b\xc5\x82\x81\x81\x81\x48\
\x6d\x6d\xad\x33\x37\x37\x57\x56\xd3\xdd\x00\xf8\x7f\x46\x4c\xc2\
\x41\x99\x6e\xd7\xdf\x43\x39\x56\x18\x85\x70\xc8\x04\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x07\
\x04\xca\x57\xa7\
\x00\x6e\
\x00\x65\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x0a\xa8\xba\x47\
\x00\x70\
\x00\x61\x00\x73\x00\x74\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x08\xc8\x58\x67\
\x00\x73\
\x00\x61\x00\x76\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x06\xc1\x59\x87\
\x00\x6f\
\x00\x70\x00\x65\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x07\
\x0a\xc7\x57\x87\
\x00\x63\
\x00\x75\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x06\x7c\x5a\x07\
\x00\x63\
\x00\x6f\x00\x70\x00\x79\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x06\x00\x00\x00\x02\
\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x7e\x00\x00\x00\x00\x00\x01\x00\x00\x1b\xbc\
\x00\x00\x00\x54\x00\x00\x00\x00\x00\x01\x00\x00\x0e\x70\
\x00\x00\x00\x3e\x00\x00\x00\x00\x00\x01\x00\x00\x09\xc9\
\x00\x00\x00\x26\x00\x00\x00\x00\x00\x01\x00\x00\x03\x58\
\x00\x00\x00\x6a\x00\x00\x00\x00\x00\x01\x00\x00\x16\x8d\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
DevinDewitt/pyqt5
|
examples/mainwindows/sdi/sdi_rc.py
|
Python
|
gpl-3.0
| 36,524
|
# -*- coding: utf-8 -*-
__all__ = ["cc"]
|
MarieVdS/ComboCode
|
__init__.py
|
Python
|
gpl-3.0
| 42
|
################################################
#
# file moved to own repository:
# https://github.com/mozilla/Mozilla-GitHub-Standards
#
################################################
|
mozilla/github-org-scripts
|
check_CoC.py
|
Python
|
mpl-2.0
| 195
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Partner Fix',
'version': '8.0.1.0.0',
'category': '',
'sequence': 14,
'summary': '',
'description': """
Portal Partner Fix
==================
Let user read his commercial partner
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'portal',
],
'data': [
'security/portal_security.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
dvitme/odoo-addons
|
portal_partner_fix/__openerp__.py
|
Python
|
agpl-3.0
| 1,565
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Exception classes - Subclassing allows you to check for specific errors
"""
import base64
import xml.sax
import boto
from boto import handler
from boto.compat import json, six, StandardError
from boto.resultset import ResultSet
class BotoClientError(StandardError):
"""
General Boto Client error (error accessing AWS)
"""
def __init__(self, reason, *args):
super(BotoClientError, self).__init__(reason, *args)
self.reason = reason
def __repr__(self):
return 'BotoClientError: %s' % self.reason
def __str__(self):
return 'BotoClientError: %s' % self.reason
class SDBPersistenceError(StandardError):
pass
class StoragePermissionsError(BotoClientError):
"""
Permissions error when accessing a bucket or key on a storage service.
"""
pass
class S3PermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on S3.
"""
pass
class GSPermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on GS.
"""
pass
class BotoServerError(StandardError):
def __init__(self, status, reason, body=None, *args):
super(BotoServerError, self).__init__(status, reason, body, *args)
self.status = status
self.reason = reason
self.body = body or ''
self.request_id = None
self.error_code = None
self._error_message = None
self.message = ''
self.box_usage = None
if isinstance(self.body, bytes):
try:
self.body = self.body.decode('utf-8')
except UnicodeDecodeError:
boto.log.debug('Unable to decode body from bytes!')
# Attempt to parse the error response. If body isn't present,
# then just ignore the error response.
if self.body:
# Check if it looks like a ``dict``.
if hasattr(self.body, 'items'):
# It's not a string, so trying to parse it will fail.
# But since it's data, we can work with that.
self.request_id = self.body.get('RequestId', None)
if 'Error' in self.body:
# XML-style
error = self.body.get('Error', {})
self.error_code = error.get('Code', None)
self.message = error.get('Message', None)
else:
# JSON-style.
self.message = self.body.get('message', None)
else:
try:
h = handler.XmlHandlerWrapper(self, self)
h.parseString(self.body)
except (TypeError, xml.sax.SAXParseException) as pe:
# What if it's JSON? Let's try that.
try:
parsed = json.loads(self.body)
if 'RequestId' in parsed:
self.request_id = parsed['RequestId']
if 'Error' in parsed:
if 'Code' in parsed['Error']:
self.error_code = parsed['Error']['Code']
if 'Message' in parsed['Error']:
self.message = parsed['Error']['Message']
except (TypeError, ValueError):
# Remove unparsable message body so we don't include garbage
# in exception. But first, save self.body in self.error_message
# because occasionally we get error messages from Eucalyptus
# that are just text strings that we want to preserve.
self.message = self.body
self.body = None
def __getattr__(self, name):
if name == 'error_message':
return self.message
if name == 'code':
return self.error_code
raise AttributeError
def __setattr__(self, name, value):
if name == 'error_message':
self.message = value
else:
super(BotoServerError, self).__setattr__(name, value)
def __repr__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def __str__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name in ('RequestId', 'RequestID'):
self.request_id = value
elif name == 'Code':
self.error_code = value
elif name == 'Message':
self.message = value
elif name == 'BoxUsage':
self.box_usage = value
return None
def _cleanupParsedProperties(self):
self.request_id = None
self.error_code = None
self.message = None
self.box_usage = None
class ConsoleOutput(object):
def __init__(self, parent=None):
self.parent = parent
self.instance_id = None
self.timestamp = None
self.comment = None
self.output = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.instance_id = value
elif name == 'output':
self.output = base64.b64decode(value)
else:
setattr(self, name, value)
class StorageCreateError(BotoServerError):
"""
Error creating a bucket or key on a storage service.
"""
def __init__(self, status, reason, body=None):
self.bucket = None
super(StorageCreateError, self).__init__(status, reason, body)
def endElement(self, name, value, connection):
if name == 'BucketName':
self.bucket = value
else:
return super(StorageCreateError, self).endElement(name, value, connection)
class S3CreateError(StorageCreateError):
"""
Error creating a bucket or key on S3.
"""
pass
class GSCreateError(StorageCreateError):
"""
Error creating a bucket or key on GS.
"""
pass
class StorageCopyError(BotoServerError):
"""
Error copying a key on a storage service.
"""
pass
class S3CopyError(StorageCopyError):
"""
Error copying a key on S3.
"""
pass
class GSCopyError(StorageCopyError):
"""
Error copying a key on GS.
"""
pass
class SQSError(BotoServerError):
"""
General Error on Simple Queue Service.
"""
def __init__(self, status, reason, body=None):
self.detail = None
self.type = None
super(SQSError, self).__init__(status, reason, body)
def startElement(self, name, attrs, connection):
return super(SQSError, self).startElement(name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Detail':
self.detail = value
elif name == 'Type':
self.type = value
else:
return super(SQSError, self).endElement(name, value, connection)
def _cleanupParsedProperties(self):
super(SQSError, self)._cleanupParsedProperties()
for p in ('detail', 'type'):
setattr(self, p, None)
class SQSDecodeError(BotoClientError):
"""
Error when decoding an SQS message.
"""
def __init__(self, reason, message):
super(SQSDecodeError, self).__init__(reason, message)
self.message = message
def __repr__(self):
return 'SQSDecodeError: %s' % self.reason
def __str__(self):
return 'SQSDecodeError: %s' % self.reason
class StorageResponseError(BotoServerError):
"""
Error in response from a storage service.
"""
def __init__(self, status, reason, body=None):
self.resource = None
super(StorageResponseError, self).__init__(status, reason, body)
def startElement(self, name, attrs, connection):
return super(StorageResponseError, self).startElement(name, attrs,
connection)
def endElement(self, name, value, connection):
if name == 'Resource':
self.resource = value
else:
return super(StorageResponseError, self).endElement(name, value,
connection)
def _cleanupParsedProperties(self):
super(StorageResponseError, self)._cleanupParsedProperties()
for p in ('resource'):
setattr(self, p, None)
class S3ResponseError(StorageResponseError):
"""
Error in response from S3.
"""
pass
class GSResponseError(StorageResponseError):
"""
Error in response from GS.
"""
pass
class EC2ResponseError(BotoServerError):
"""
Error in response from EC2.
"""
def __init__(self, status, reason, body=None):
self.errors = None
self._errorResultSet = []
super(EC2ResponseError, self).__init__(status, reason, body)
self.errors = [ (e.error_code, e.error_message) \
for e in self._errorResultSet ]
if len(self.errors):
self.error_code, self.error_message = self.errors[0]
def startElement(self, name, attrs, connection):
if name == 'Errors':
self._errorResultSet = ResultSet([('Error', _EC2Error)])
return self._errorResultSet
else:
return None
def endElement(self, name, value, connection):
if name == 'RequestID':
self.request_id = value
else:
return None # don't call subclass here
def _cleanupParsedProperties(self):
super(EC2ResponseError, self)._cleanupParsedProperties()
self._errorResultSet = []
for p in ('errors'):
setattr(self, p, None)
class JSONResponseError(BotoServerError):
"""
This exception expects the fully parsed and decoded JSON response
body to be passed as the body parameter.
:ivar status: The HTTP status code.
:ivar reason: The HTTP reason message.
:ivar body: The Python dict that represents the decoded JSON
response body.
:ivar error_message: The full description of the AWS error encountered.
:ivar error_code: A short string that identifies the AWS error
(e.g. ConditionalCheckFailedException)
"""
def __init__(self, status, reason, body=None, *args):
self.status = status
self.reason = reason
self.body = body
if self.body:
self.error_message = self.body.get('message', None)
self.error_code = self.body.get('__type', None)
if self.error_code:
self.error_code = self.error_code.split('#')[-1]
class DynamoDBResponseError(JSONResponseError):
pass
class SWFResponseError(JSONResponseError):
pass
class EmrResponseError(BotoServerError):
"""
Error in response from EMR
"""
pass
class _EC2Error(object):
def __init__(self, connection=None):
self.connection = connection
self.error_code = None
self.error_message = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Code':
self.error_code = value
elif name == 'Message':
self.error_message = value
else:
return None
class SDBResponseError(BotoServerError):
"""
Error in responses from SDB.
"""
pass
class AWSConnectionError(BotoClientError):
"""
General error connecting to Amazon Web Services.
"""
pass
class StorageDataError(BotoClientError):
"""
Error receiving data from a storage service.
"""
pass
class S3DataError(StorageDataError):
"""
Error receiving data from S3.
"""
pass
class GSDataError(StorageDataError):
"""
Error receiving data from GS.
"""
pass
class InvalidUriError(Exception):
"""Exception raised when URI is invalid."""
def __init__(self, message):
super(InvalidUriError, self).__init__(message)
self.message = message
class InvalidAclError(Exception):
"""Exception raised when ACL XML is invalid."""
def __init__(self, message):
super(InvalidAclError, self).__init__(message)
self.message = message
class InvalidCorsError(Exception):
"""Exception raised when CORS XML is invalid."""
def __init__(self, message):
super(InvalidCorsError, self).__init__(message)
self.message = message
class NoAuthHandlerFound(Exception):
"""Is raised when no auth handlers were found ready to authenticate."""
pass
class InvalidLifecycleConfigError(Exception):
"""Exception raised when GCS lifecycle configuration XML is invalid."""
def __init__(self, message):
super(InvalidLifecycleConfigError, self).__init__(message)
self.message = message
# Enum class for resumable upload failure disposition.
class ResumableTransferDisposition(object):
# START_OVER means an attempt to resume an existing transfer failed,
# and a new resumable upload should be attempted (without delay).
START_OVER = 'START_OVER'
# WAIT_BEFORE_RETRY means the resumable transfer failed but that it can
# be retried after a time delay within the current process.
WAIT_BEFORE_RETRY = 'WAIT_BEFORE_RETRY'
# ABORT_CUR_PROCESS means the resumable transfer failed and that
# delaying/retrying within the current process will not help. If
# resumable transfer included a state tracker file the upload can be
# retried again later, in another process (e.g., a later run of gsutil).
ABORT_CUR_PROCESS = 'ABORT_CUR_PROCESS'
# ABORT means the resumable transfer failed in a way that it does not
# make sense to continue in the current process, and further that the
# current tracker ID should not be preserved (in a tracker file if one
# was specified at resumable upload start time). If the user tries again
# later (e.g., a separate run of gsutil) it will get a new resumable
# upload ID.
ABORT = 'ABORT'
class ResumableUploadException(Exception):
"""
Exception raised for various resumable upload problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
super(ResumableUploadException, self).__init__(message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableUploadException("%s", %s)' % (
self.message, self.disposition)
class ResumableDownloadException(Exception):
"""
Exception raised for various resumable download problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
super(ResumableDownloadException, self).__init__(message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableDownloadException("%s", %s)' % (
self.message, self.disposition)
class TooManyRecordsException(Exception):
"""
Exception raised when a search of Route53 records returns more
records than requested.
"""
def __init__(self, message):
super(TooManyRecordsException, self).__init__(message)
self.message = message
class PleaseRetryException(Exception):
"""
Indicates a request should be retried.
"""
def __init__(self, message, response=None):
self.message = message
self.response = response
def __repr__(self):
return 'PleaseRetryException("%s", %s)' % (
self.message,
self.response
)
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/boto/exception.py
|
Python
|
agpl-3.0
| 17,106
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 Andrea Cometa.
# Email: info@andreacometa.it
# Web site: http://www.andreacometa.it
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2012 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2012 Associazione OpenERP Italia
# (<http://www.odoo-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class AccountConfigSettings(models.TransientModel):
_inherit = 'account.config.settings'
due_cost_service_id = fields.Many2one(
related='company_id.due_cost_service_id',
help='Default Service for RiBa Due Cost (collection fees) on invoice',
domain=[('type', '=', 'service')])
def default_get(self, cr, uid, fields, context=None):
res = super(AccountConfigSettings, self).default_get(
cr, uid, fields, context)
if res:
user = self.pool['res.users'].browse(cr, uid, uid, context)
res['due_cost_service_id'] = user.company_id.due_cost_service_id.id
return res
class ResCompany(models.Model):
_inherit = 'res.company'
due_cost_service_id = fields.Many2one('product.product')
|
abstract-open-solutions/l10n-italy
|
l10n_it_ricevute_bancarie/models/account_config.py
|
Python
|
agpl-3.0
| 2,038
|
from __future__ import unicode_literals
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^upload/election/(?P<election>[^/]+)/post/(?P<post_id>[^/]+)/$',
views.CreateDocumentView.as_view(),
name='upload_document_view'),
url(r'^(?P<pk>\d+)/$',
views.DocumentView.as_view(),
name='uploaded_document_view'),
]
|
mysociety/yournextmp-popit
|
official_documents/urls.py
|
Python
|
agpl-3.0
| 376
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import requests
from odoo.addons.microsoft_calendar.models.microsoft_sync import microsoft_calendar_token
from datetime import timedelta
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.loglevels import exception_to_unicode
from odoo.addons.microsoft_account.models.microsoft_service import MICROSOFT_TOKEN_ENDPOINT
from odoo.addons.microsoft_calendar.utils.microsoft_calendar import MicrosoftCalendarService, InvalidSyncToken
_logger = logging.getLogger(__name__)
class User(models.Model):
_inherit = 'res.users'
microsoft_calendar_sync_token = fields.Char('Microsoft Next Sync Token', copy=False)
def _microsoft_calendar_authenticated(self):
return bool(self.sudo().microsoft_calendar_rtoken)
def _get_microsoft_calendar_token(self):
self.ensure_one()
if self._is_microsoft_calendar_valid():
self._refresh_microsoft_calendar_token()
return self.microsoft_calendar_token
def _is_microsoft_calendar_valid(self):
return self.microsoft_calendar_token_validity and self.microsoft_calendar_token_validity < (fields.Datetime.now() + timedelta(minutes=1))
def _refresh_microsoft_calendar_token(self):
self.ensure_one()
get_param = self.env['ir.config_parameter'].sudo().get_param
client_id = get_param('microsoft_calendar_client_id')
client_secret = get_param('microsoft_calendar_client_secret')
if not client_id or not client_secret:
raise UserError(_("The account for the Outlook Calendar service is not configured."))
headers = {"content-type": "application/x-www-form-urlencoded"}
data = {
'refresh_token': self.microsoft_calendar_rtoken,
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'refresh_token',
}
try:
dummy, response, dummy = self.env['microsoft.service']._do_request(MICROSOFT_TOKEN_ENDPOINT, params=data, headers=headers, method='POST', preuri='')
ttl = response.get('expires_in')
self.write({
'microsoft_calendar_token': response.get('access_token'),
'microsoft_calendar_token_validity': fields.Datetime.now() + timedelta(seconds=ttl),
})
except requests.HTTPError as error:
if error.response.status_code == 400: # invalid grant
# Delete refresh token and make sure it's commited
with self.pool.cursor() as cr:
self.env.user.with_env(self.env(cr=cr)).write({'microsoft_calendar_rtoken': False})
error_key = error.response.json().get("error", "nc")
error_msg = _("Something went wrong during your token generation. Maybe your Authorization Code is invalid or already expired [%s]", error_key)
raise UserError(error_msg)
def _sync_microsoft_calendar(self, calendar_service: MicrosoftCalendarService):
self.ensure_one()
full_sync = not bool(self.microsoft_calendar_sync_token)
with microsoft_calendar_token(self) as token:
try:
events, next_sync_token, default_reminders = calendar_service.get_events(self.microsoft_calendar_sync_token, token=token)
except InvalidSyncToken:
events, next_sync_token, default_reminders = calendar_service.get_events(token=token)
full_sync = True
self.microsoft_calendar_sync_token = next_sync_token
# Microsoft -> Odoo
recurrences = events.filter(lambda e: e.is_recurrent())
synced_events, synced_recurrences = self.env['calendar.event']._sync_microsoft2odoo(events, default_reminders=default_reminders) if events else (self.env['calendar.event'], self.env['calendar.recurrence'])
# Odoo -> Microsoft
recurrences = self.env['calendar.recurrence']._get_microsoft_records_to_sync(full_sync=full_sync)
recurrences -= synced_recurrences
recurrences._sync_odoo2microsoft(calendar_service)
synced_events |= recurrences.calendar_event_ids
events = self.env['calendar.event']._get_microsoft_records_to_sync(full_sync=full_sync)
(events - synced_events)._sync_odoo2microsoft(calendar_service)
return bool(events | synced_events) or bool(recurrences | synced_recurrences)
@api.model
def _sync_all_microsoft_calendar(self):
""" Cron job """
users = self.env['res.users'].search([('microsoft_calendar_rtoken', '!=', False)])
microsoft = MicrosoftCalendarService(self.env['microsoft.service'])
for user in users:
_logger.info("Calendar Synchro - Starting synchronization for %s", user)
try:
user.with_user(user).sudo()._sync_microsoft_calendar(microsoft)
except Exception as e:
_logger.exception("[%s] Calendar Synchro - Exception : %s !", user, exception_to_unicode(e))
|
rven/odoo
|
addons/microsoft_calendar/models/res_users.py
|
Python
|
agpl-3.0
| 5,093
|
from StringIO import StringIO
from sympy.core import symbols, Eq, pi, Catalan, Lambda, Dummy
from sympy.utilities.codegen import CCodeGen, Routine, InputArgument, Result, \
CodeGenError, FCodeGen, codegen, CodeGenArgumentListError, OutputArgument, \
InOutArgument
from sympy.utilities.pytest import XFAIL, raises
from sympy.utilities.lambdify import implemented_function
# import test:
#FIXME: Fails due to circular import in with core
# from sympy import codegen
#FIXME-py3k: Many AssertionErrors here, perhaps related to unicode;
#FIXME-py3k: some are just due to an extra space at the end of the string
def get_string(dump_fn, routines, prefix="file", header=False, empty=False):
"""Wrapper for dump_fn. dump_fn writes its results to a stream object and
this wrapper returns the contents of that stream as a string. This
auxiliary function is used by many tests below.
The header and the empty lines are not generator to facilitate the
testing of the output.
"""
output = StringIO()
dump_fn(routines, output, prefix, header, empty)
source = output.getvalue()
output.close()
return source
def test_Routine_argument_order():
a, x, y, z = symbols('a x y z')
expr = (x+y)*z
raises(CodeGenArgumentListError, 'Routine("test", expr, argument_sequence=[z, x])')
raises(CodeGenArgumentListError, 'Routine("test", Eq(a, expr), argument_sequence=[z, x, y])')
r = Routine('test', Eq(a, expr), argument_sequence=[z, x, a, y])
assert [ arg.name for arg in r.arguments ] == [z, x, a, y]
assert [ type(arg) for arg in r.arguments ] == [
InputArgument, InputArgument, OutputArgument, InputArgument ]
r = Routine('test', Eq(z, expr), argument_sequence=[z, x, y])
assert [ type(arg) for arg in r.arguments ] == [
InOutArgument, InputArgument, InputArgument ]
from sympy.tensor import IndexedBase, Idx
A, B = map(IndexedBase, ['A', 'B'])
m = symbols('m', integer=True)
i = Idx('i', m)
r = Routine('test', Eq(A[i], B[i]), argument_sequence=[B, A, m])
assert [ arg.name for arg in r.arguments ] == [B.label, A.label, m]
def test_empty_c_code():
code_gen = CCodeGen()
source = get_string(code_gen.dump_c, [])
assert source == "#include \"file.h\"\n#include <math.h>\n"
def test_empty_c_code_with_comment():
code_gen = CCodeGen()
source = get_string(code_gen.dump_c, [], header=True)
assert source[:82] == (
"/******************************************************************************\n *"
)
# " Code generated with sympy 0.7.1 "
assert source[158:] == ( "*\n"
" * *\n"
" * See http://www.sympy.org/ for more information. *\n"
" * *\n"
" * This file is part of 'project' *\n"
" ******************************************************************************/\n"
"#include \"file.h\"\n"
"#include <math.h>\n"
)
def test_empty_c_header():
code_gen = CCodeGen()
source = get_string(code_gen.dump_h, [])
assert source == "#ifndef PROJECT__FILE__H\n#define PROJECT__FILE__H\n#endif\n"
def test_simple_c_code():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
routine = Routine("test", expr)
code_gen = CCodeGen()
source = get_string(code_gen.dump_c, [routine])
expected = (
"#include \"file.h\"\n"
"#include <math.h>\n"
"double test(double x, double y, double z) {\n"
" return z*(x + y);\n"
"}\n"
)
assert source == expected
def test_numbersymbol_c_code():
routine = Routine("test", pi**Catalan)
code_gen = CCodeGen()
source = get_string(code_gen.dump_c, [routine])
expected = (
"#include \"file.h\"\n"
"#include <math.h>\n"
"double test() {\n"
" double const Catalan = 0.915965594177219;\n"
" return pow(M_PI, Catalan);\n"
"}\n"
)
assert source == expected
def test_c_code_argument_order():
x,y,z = symbols('x,y,z')
expr = x + y
routine = Routine("test", expr, argument_sequence=[z, x, y])
code_gen = CCodeGen()
source = get_string(code_gen.dump_c, [routine])
expected = (
"#include \"file.h\"\n"
"#include <math.h>\n"
"double test(double z, double x, double y) {\n"
" return x + y;\n"
"}\n"
)
assert source == expected
def test_simple_c_header():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
routine = Routine("test", expr)
code_gen = CCodeGen()
source = get_string(code_gen.dump_h, [routine])
expected = (
"#ifndef PROJECT__FILE__H\n"
"#define PROJECT__FILE__H\n"
"double test(double x, double y, double z);\n"
"#endif\n"
)
assert source == expected
def test_simple_c_codegen():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
result = codegen(("test", (x+y)*z), "C", "file", header=False, empty=False)
expected = [
("file.c",
"#include \"file.h\"\n"
"#include <math.h>\n"
"double test(double x, double y, double z) {\n"
" return z*(x + y);\n"
"}\n"),
("file.h",
"#ifndef PROJECT__FILE__H\n"
"#define PROJECT__FILE__H\n"
"double test(double x, double y, double z);\n"
"#endif\n")
]
assert result == expected
def test_multiple_results_c():
x,y,z = symbols('x,y,z')
expr1 = (x+y)*z
expr2 = (x-y)*z
routine = Routine(
"test",
[expr1,expr2]
)
code_gen = CCodeGen()
raises(CodeGenError, 'get_string(code_gen.dump_h, [routine])')
def test_no_results_c():
raises(ValueError, 'Routine("test", [])')
def test_ansi_math1_codegen():
# not included: log10
from sympy import (acos, asin, atan, ceiling, cos, cosh, floor, log, ln,
sin, sinh, sqrt, tan, tanh, N, Abs)
x = symbols('x')
name_expr = [
("test_fabs", Abs(x)),
("test_acos", acos(x)),
("test_asin", asin(x)),
("test_atan", atan(x)),
("test_ceil", ceiling(x)),
("test_cos", cos(x)),
("test_cosh", cosh(x)),
("test_floor", floor(x)),
("test_log", log(x)),
("test_ln", ln(x)),
("test_sin", sin(x)),
("test_sinh", sinh(x)),
("test_sqrt", sqrt(x)),
("test_tan", tan(x)),
("test_tanh", tanh(x)),
]
result = codegen(name_expr, "C", "file", header=False, empty=False)
assert result[0][0] == "file.c"
assert result[0][1] == (
'#include "file.h"\n#include <math.h>\n'
'double test_fabs(double x) {\n return fabs(x);\n}\n'
'double test_acos(double x) {\n return acos(x);\n}\n'
'double test_asin(double x) {\n return asin(x);\n}\n'
'double test_atan(double x) {\n return atan(x);\n}\n'
'double test_ceil(double x) {\n return ceil(x);\n}\n'
'double test_cos(double x) {\n return cos(x);\n}\n'
'double test_cosh(double x) {\n return cosh(x);\n}\n'
'double test_floor(double x) {\n return floor(x);\n}\n'
'double test_log(double x) {\n return log(x);\n}\n'
'double test_ln(double x) {\n return log(x);\n}\n'
'double test_sin(double x) {\n return sin(x);\n}\n'
'double test_sinh(double x) {\n return sinh(x);\n}\n'
'double test_sqrt(double x) {\n return sqrt(x);\n}\n'
'double test_tan(double x) {\n return tan(x);\n}\n'
'double test_tanh(double x) {\n return tanh(x);\n}\n'
)
assert result[1][0] == "file.h"
assert result[1][1] == (
'#ifndef PROJECT__FILE__H\n#define PROJECT__FILE__H\n'
'double test_fabs(double x);\ndouble test_acos(double x);\n'
'double test_asin(double x);\ndouble test_atan(double x);\n'
'double test_ceil(double x);\ndouble test_cos(double x);\n'
'double test_cosh(double x);\ndouble test_floor(double x);\n'
'double test_log(double x);\ndouble test_ln(double x);\n'
'double test_sin(double x);\ndouble test_sinh(double x);\n'
'double test_sqrt(double x);\ndouble test_tan(double x);\n'
'double test_tanh(double x);\n#endif\n'
)
def test_ansi_math2_codegen():
# not included: frexp, ldexp, modf, fmod
from sympy import atan2, N
x, y = symbols('x,y')
name_expr = [
("test_atan2", atan2(x,y)),
("test_pow", x**y),
]
result = codegen(name_expr, "C", "file", header=False, empty=False)
assert result[0][0] == "file.c"
assert result[0][1] == (
'#include "file.h"\n#include <math.h>\n'
'double test_atan2(double x, double y) {\n return atan2(x, y);\n}\n'
'double test_pow(double x, double y) {\n return pow(x, y);\n}\n'
)
assert result[1][0] == "file.h"
assert result[1][1] == (
'#ifndef PROJECT__FILE__H\n#define PROJECT__FILE__H\n'
'double test_atan2(double x, double y);\n'
'double test_pow(double x, double y);\n'
'#endif\n'
)
def test_complicated_codegen():
from sympy import sin, cos, tan, N
x,y,z = symbols('x,y,z')
name_expr = [
("test1", ((sin(x)+cos(y)+tan(z))**7).expand()),
("test2", cos(cos(cos(cos(cos(cos(cos(cos(x+y+z))))))))),
]
result = codegen(name_expr, "C", "file", header=False, empty=False)
assert result[0][0] == "file.c"
assert result[0][1] == (
'#include "file.h"\n#include <math.h>\n'
'double test1(double x, double y, double z) {\n'
' return '
'pow(sin(x), 7) + '
'7*pow(sin(x), 6)*cos(y) + '
'7*pow(sin(x), 6)*tan(z) + '
'21*pow(sin(x), 5)*pow(cos(y), 2) + '
'42*pow(sin(x), 5)*cos(y)*tan(z) + '
'21*pow(sin(x), 5)*pow(tan(z), 2) + '
'35*pow(sin(x), 4)*pow(cos(y), 3) + '
'105*pow(sin(x), 4)*pow(cos(y), 2)*tan(z) + '
'105*pow(sin(x), 4)*cos(y)*pow(tan(z), 2) + '
'35*pow(sin(x), 4)*pow(tan(z), 3) + '
'35*pow(sin(x), 3)*pow(cos(y), 4) + '
'140*pow(sin(x), 3)*pow(cos(y), 3)*tan(z) + '
'210*pow(sin(x), 3)*pow(cos(y), 2)*pow(tan(z), 2) + '
'140*pow(sin(x), 3)*cos(y)*pow(tan(z), 3) + '
'35*pow(sin(x), 3)*pow(tan(z), 4) + '
'21*pow(sin(x), 2)*pow(cos(y), 5) + '
'105*pow(sin(x), 2)*pow(cos(y), 4)*tan(z) + '
'210*pow(sin(x), 2)*pow(cos(y), 3)*pow(tan(z), 2) + '
'210*pow(sin(x), 2)*pow(cos(y), 2)*pow(tan(z), 3) + '
'105*pow(sin(x), 2)*cos(y)*pow(tan(z), 4) + '
'21*pow(sin(x), 2)*pow(tan(z), 5) + '
'7*sin(x)*pow(cos(y), 6) + '
'42*sin(x)*pow(cos(y), 5)*tan(z) + '
'105*sin(x)*pow(cos(y), 4)*pow(tan(z), 2) + '
'140*sin(x)*pow(cos(y), 3)*pow(tan(z), 3) + '
'105*sin(x)*pow(cos(y), 2)*pow(tan(z), 4) + '
'42*sin(x)*cos(y)*pow(tan(z), 5) + '
'7*sin(x)*pow(tan(z), 6) + '
'pow(cos(y), 7) + '
'7*pow(cos(y), 6)*tan(z) + '
'21*pow(cos(y), 5)*pow(tan(z), 2) + '
'35*pow(cos(y), 4)*pow(tan(z), 3) + '
'35*pow(cos(y), 3)*pow(tan(z), 4) + '
'21*pow(cos(y), 2)*pow(tan(z), 5) + '
'7*cos(y)*pow(tan(z), 6) + '
'pow(tan(z), 7);\n'
'}\n'
'double test2(double x, double y, double z) {\n'
' return cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))));\n'
'}\n'
)
assert result[1][0] == "file.h"
assert result[1][1] == (
'#ifndef PROJECT__FILE__H\n'
'#define PROJECT__FILE__H\n'
'double test1(double x, double y, double z);\n'
'double test2(double x, double y, double z);\n'
'#endif\n'
)
def test_loops_c():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n,m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
(f1, code), (f2, interface) = codegen(
('matrix_vector', Eq(y[i], A[i, j]*x[j])), "C", "file", header=False, empty=False)
assert f1 == 'file.c'
expected = (
'#include "file.h"\n'
'#include <math.h>\n'
'void matrix_vector(double *A, int m, int n, double *x, double *y) {\n'
' for (int i=0; i<m; i++){\n'
' y[i] = 0;\n'
' }\n'
' for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' y[i] = y[i] + %(rhs)s;\n'
' }\n'
' }\n'
'}\n'
)
assert (code == expected %{'rhs': 'A[i*n + j]*x[j]'} or
code == expected %{'rhs': 'A[j + i*n]*x[j]'} or
code == expected %{'rhs': 'x[j]*A[i*n + j]'} or
code == expected %{'rhs': 'x[j]*A[j + i*n]'})
assert f2 == 'file.h'
assert interface == (
'#ifndef PROJECT__FILE__H\n'
'#define PROJECT__FILE__H\n'
'void matrix_vector(double *A, int m, int n, double *x, double *y);\n'
'#endif\n'
)
def test_dummy_loops_c():
from sympy.tensor import IndexedBase, Idx
# the following line could also be
# [Dummy(s, integer=True) for s in 'im']
# or [Dummy(integer=True) for s in 'im']
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'#include "file.h"\n'
'#include <math.h>\n'
'void test_dummies(int m_%(mno)i, double *x, double *y) {\n'
' for (int i_%(ino)i=0; i_%(ino)i<m_%(mno)i; i_%(ino)i++){\n'
' y[i_%(ino)i] = x[i_%(ino)i];\n'
' }\n'
'}\n'
) % {'ino': i.label.dummy_index, 'mno': m.dummy_index}
r = Routine('test_dummies', Eq(y[i], x[i]))
c = CCodeGen()
code = get_string(c.dump_c, [r])
assert code == expected
def test_partial_loops_c():
# check that loop boundaries are determined by Idx, and array strides
# determined by shape of IndexedBase object.
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n,m,o,p = symbols('n m o p', integer=True)
A = IndexedBase('A', shape=(m, p))
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', (o, m - 5)) # Note: bounds are inclusive
j = Idx('j', n) # dimension n corresponds to bounds (0, n - 1)
(f1, code), (f2, interface) = codegen(
('matrix_vector', Eq(y[i], A[i, j]*x[j])), "C", "file", header=False, empty=False)
assert f1 == 'file.c'
expected = (
'#include "file.h"\n'
'#include <math.h>\n'
'void matrix_vector(double *A, int m, int n, int o, int p, double *x, double *y) {\n'
' for (int i=o; i<%(upperi)s; i++){\n'
' y[i] = 0;\n'
' }\n'
' for (int i=o; i<%(upperi)s; i++){\n'
' for (int j=0; j<n; j++){\n'
' y[i] = y[i] + %(rhs)s;\n'
' }\n'
' }\n'
'}\n'
) % {'upperi': m - 4, 'rhs': '%(rhs)s'}
assert (code == expected %{'rhs': 'A[i*p + j]*x[j]'} or
code == expected %{'rhs': 'A[j + i*p]*x[j]'} or
code == expected %{'rhs': 'x[j]*A[i*p + j]'} or
code == expected %{'rhs': 'x[j]*A[j + i*p]'})
assert f2 == 'file.h'
assert interface == (
'#ifndef PROJECT__FILE__H\n'
'#define PROJECT__FILE__H\n'
'void matrix_vector(double *A, int m, int n, int o, int p, double *x, double *y);\n'
'#endif\n'
)
def test_output_arg_c():
from sympy import sin, cos, Equality
x, y, z = symbols("x,y,z")
r = Routine("foo", [Equality(y, sin(x)), cos(x)])
c = CCodeGen()
result = c.write([r], "test", header=False, empty=False)
assert result[0][0] == "test.c"
expected = (
'#include "test.h"\n'
'#include <math.h>\n'
'double foo(double x, double &y) {\n'
' y = sin(x);\n'
' return cos(x);\n'
'}\n'
)
assert result[0][1] == expected
def test_empty_f_code():
code_gen = FCodeGen()
source = get_string(code_gen.dump_f95, [])
assert source == ""
def test_empty_f_code_with_header():
code_gen = FCodeGen()
source = get_string(code_gen.dump_f95, [], header=True)
assert source[:82] == (
"!******************************************************************************\n!*"
)
# " Code generated with sympy 0.7.1 "
assert source[158:] == ( "*\n"
"!* *\n"
"!* See http://www.sympy.org/ for more information. *\n"
"!* *\n"
"!* This file is part of 'project' *\n"
"!******************************************************************************\n"
)
def test_empty_f_header():
code_gen = FCodeGen()
source = get_string(code_gen.dump_h, [])
assert source == ""
def test_simple_f_code():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
routine = Routine("test", expr)
code_gen = FCodeGen()
source = get_string(code_gen.dump_f95, [routine])
expected = (
"REAL*8 function test(x, y, z)\n"
"implicit none\n"
"REAL*8, intent(in) :: x\n"
"REAL*8, intent(in) :: y\n"
"REAL*8, intent(in) :: z\n"
"test = z*(x + y)\n"
"end function\n"
)
assert source == expected
def test_numbersymbol_f_code():
routine = Routine("test", pi**Catalan)
code_gen = FCodeGen()
source = get_string(code_gen.dump_f95, [routine])
expected = (
"REAL*8 function test()\n"
"implicit none\n"
"REAL*8, parameter :: Catalan = 0.915965594177219d0\n"
"REAL*8, parameter :: pi = 3.14159265358979d0\n"
"test = pi**Catalan\n"
"end function\n"
)
assert source == expected
def test_f_code_argument_order():
x,y,z = symbols('x,y,z')
expr = x + y
routine = Routine("test", expr, argument_sequence=[z, x, y])
code_gen = FCodeGen()
source = get_string(code_gen.dump_f95, [routine])
expected = (
"REAL*8 function test(z, x, y)\n"
"implicit none\n"
"REAL*8, intent(in) :: z\n"
"REAL*8, intent(in) :: x\n"
"REAL*8, intent(in) :: y\n"
"test = x + y\n"
"end function\n"
)
assert source == expected
def test_simple_f_header():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
routine = Routine("test", expr)
code_gen = FCodeGen()
source = get_string(code_gen.dump_h, [routine])
expected = (
"interface\n"
"REAL*8 function test(x, y, z)\n"
"implicit none\n"
"REAL*8, intent(in) :: x\n"
"REAL*8, intent(in) :: y\n"
"REAL*8, intent(in) :: z\n"
"end function\n"
"end interface\n"
)
assert source == expected
def test_simple_f_codegen():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
result = codegen(("test", (x+y)*z), "F95", "file", header=False, empty=False)
expected = [
("file.f90",
"REAL*8 function test(x, y, z)\n"
"implicit none\n"
"REAL*8, intent(in) :: x\n"
"REAL*8, intent(in) :: y\n"
"REAL*8, intent(in) :: z\n"
"test = z*(x + y)\n"
"end function\n"),
("file.h",
"interface\n"
"REAL*8 function test(x, y, z)\n"
"implicit none\n"
"REAL*8, intent(in) :: x\n"
"REAL*8, intent(in) :: y\n"
"REAL*8, intent(in) :: z\n"
"end function\n"
"end interface\n")
]
assert result == expected
def test_multiple_results_f():
x,y,z = symbols('x,y,z')
expr1 = (x+y)*z
expr2 = (x-y)*z
routine = Routine(
"test",
[expr1,expr2]
)
code_gen = FCodeGen()
raises(CodeGenError, 'get_string(code_gen.dump_h, [routine])')
def test_no_results_f():
raises(ValueError, 'Routine("test", [])')
def test_intrinsic_math_codegen():
# not included: log10
from sympy import (acos, asin, atan, ceiling, cos, cosh, floor, log, ln,
sin, sinh, sqrt, tan, tanh, N, Abs)
x = symbols('x')
name_expr = [
("test_abs", Abs(x)),
("test_acos", acos(x)),
("test_asin", asin(x)),
("test_atan", atan(x)),
# ("test_ceil", ceiling(x)),
("test_cos", cos(x)),
("test_cosh", cosh(x)),
# ("test_floor", floor(x)),
("test_log", log(x)),
("test_ln", ln(x)),
("test_sin", sin(x)),
("test_sinh", sinh(x)),
("test_sqrt", sqrt(x)),
("test_tan", tan(x)),
("test_tanh", tanh(x)),
]
result = codegen(name_expr, "F95", "file", header=False, empty=False)
assert result[0][0] == "file.f90"
expected = (
'REAL*8 function test_abs(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_abs = Abs(x)\n'
'end function\n'
'REAL*8 function test_acos(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_acos = acos(x)\n'
'end function\n'
'REAL*8 function test_asin(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_asin = asin(x)\n'
'end function\n'
'REAL*8 function test_atan(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_atan = atan(x)\n'
'end function\n'
'REAL*8 function test_cos(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_cos = cos(x)\n'
'end function\n'
'REAL*8 function test_cosh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_cosh = cosh(x)\n'
'end function\n'
'REAL*8 function test_log(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_log = log(x)\n'
'end function\n'
'REAL*8 function test_ln(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_ln = log(x)\n'
'end function\n'
'REAL*8 function test_sin(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_sin = sin(x)\n'
'end function\n'
'REAL*8 function test_sinh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_sinh = sinh(x)\n'
'end function\n'
'REAL*8 function test_sqrt(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_sqrt = sqrt(x)\n'
'end function\n'
'REAL*8 function test_tan(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_tan = tan(x)\n'
'end function\n'
'REAL*8 function test_tanh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_tanh = tanh(x)\n'
'end function\n'
)
assert result[0][1] == expected
assert result[1][0] == "file.h"
expected = (
'interface\n'
'REAL*8 function test_abs(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_acos(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_asin(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_atan(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_cos(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_cosh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_log(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_ln(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_sin(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_sinh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_sqrt(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_tan(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_tanh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
)
assert result[1][1] == expected
def test_intrinsic_math2_codegen():
# not included: frexp, ldexp, modf, fmod
from sympy import atan2, N
x, y = symbols('x,y')
name_expr = [
("test_atan2", atan2(x,y)),
("test_pow", x**y),
]
result = codegen(name_expr, "F95", "file", header=False, empty=False)
assert result[0][0] == "file.f90"
expected = (
'REAL*8 function test_atan2(x, y)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'test_atan2 = atan2(x, y)\n'
'end function\n'
'REAL*8 function test_pow(x, y)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'test_pow = x**y\n'
'end function\n'
)
assert result[0][1] == expected
assert result[1][0] == "file.h"
expected = (
'interface\n'
'REAL*8 function test_atan2(x, y)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_pow(x, y)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'end function\n'
'end interface\n'
)
assert result[1][1] == expected
def test_complicated_codegen_f95():
from sympy import sin, cos, tan, N
x,y,z = symbols('x,y,z')
name_expr = [
("test1", ((sin(x)+cos(y)+tan(z))**7).expand()),
("test2", cos(cos(cos(cos(cos(cos(cos(cos(x+y+z))))))))),
]
result = codegen(name_expr, "F95", "file", header=False, empty=False)
assert result[0][0] == "file.f90"
expected = (
'REAL*8 function test1(x, y, z)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'REAL*8, intent(in) :: z\n'
'test1 = sin(x)**7 + 7*sin(x)**6*cos(y) + 7*sin(x)**6*tan(z) + 21*sin(x) &\n'
' **5*cos(y)**2 + 42*sin(x)**5*cos(y)*tan(z) + 21*sin(x)**5*tan(z) &\n'
' **2 + 35*sin(x)**4*cos(y)**3 + 105*sin(x)**4*cos(y)**2*tan(z) + &\n'
' 105*sin(x)**4*cos(y)*tan(z)**2 + 35*sin(x)**4*tan(z)**3 + 35*sin( &\n'
' x)**3*cos(y)**4 + 140*sin(x)**3*cos(y)**3*tan(z) + 210*sin(x)**3* &\n'
' cos(y)**2*tan(z)**2 + 140*sin(x)**3*cos(y)*tan(z)**3 + 35*sin(x) &\n'
' **3*tan(z)**4 + 21*sin(x)**2*cos(y)**5 + 105*sin(x)**2*cos(y)**4* &\n'
' tan(z) + 210*sin(x)**2*cos(y)**3*tan(z)**2 + 210*sin(x)**2*cos(y) &\n'
' **2*tan(z)**3 + 105*sin(x)**2*cos(y)*tan(z)**4 + 21*sin(x)**2*tan &\n'
' (z)**5 + 7*sin(x)*cos(y)**6 + 42*sin(x)*cos(y)**5*tan(z) + 105* &\n'
' sin(x)*cos(y)**4*tan(z)**2 + 140*sin(x)*cos(y)**3*tan(z)**3 + 105 &\n'
' *sin(x)*cos(y)**2*tan(z)**4 + 42*sin(x)*cos(y)*tan(z)**5 + 7*sin( &\n'
' x)*tan(z)**6 + cos(y)**7 + 7*cos(y)**6*tan(z) + 21*cos(y)**5*tan( &\n'
' z)**2 + 35*cos(y)**4*tan(z)**3 + 35*cos(y)**3*tan(z)**4 + 21*cos( &\n'
' y)**2*tan(z)**5 + 7*cos(y)*tan(z)**6 + tan(z)**7\n'
'end function\n'
'REAL*8 function test2(x, y, z)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'REAL*8, intent(in) :: z\n'
'test2 = cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))))\n'
'end function\n'
)
assert result[0][1] == expected
assert result[1][0] == "file.h"
expected = (
'interface\n'
'REAL*8 function test1(x, y, z)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'REAL*8, intent(in) :: z\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test2(x, y, z)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'REAL*8, intent(in) :: z\n'
'end function\n'
'end interface\n'
)
assert result[1][1] == expected
def test_loops():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m = symbols('n,m', integer=True)
A, x, y = map(IndexedBase, 'Axy')
i = Idx('i', m)
j = Idx('j', n)
(f1, code), (f2, interface) = codegen(
('matrix_vector', Eq(y[i], A[i, j]*x[j])), "F95", "file", header=False, empty=False)
assert f1 == 'file.f90'
expected = (
'subroutine matrix_vector(A, m, n, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'INTEGER*4, intent(in) :: n\n'
'REAL*8, intent(in), dimension(1:m, 1:n) :: A\n'
'REAL*8, intent(in), dimension(1:n) :: x\n'
'REAL*8, intent(out), dimension(1:m) :: y\n'
'INTEGER*4 :: i\n'
'INTEGER*4 :: j\n'
'do i = 1, m\n'
' y(i) = 0\n'
'end do\n'
'do i = 1, m\n'
' do j = 1, n\n'
' y(i) = y(i) + %(rhs)s\n'
' end do\n'
'end do\n'
'end subroutine\n'
) % {'rhs': 'A(i, j)*x(j)'}
assert expected == code
assert f2 == 'file.h'
assert interface == (
'interface\n'
'subroutine matrix_vector(A, m, n, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'INTEGER*4, intent(in) :: n\n'
'REAL*8, intent(in), dimension(1:m, 1:n) :: A\n'
'REAL*8, intent(in), dimension(1:n) :: x\n'
'REAL*8, intent(out), dimension(1:m) :: y\n'
'end subroutine\n'
'end interface\n'
)
def test_dummy_loops_f95():
from sympy.tensor import IndexedBase, Idx
# the following line could also be
# [Dummy(s, integer=True) for s in 'im']
# or [Dummy(integer=True) for s in 'im']
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'subroutine test_dummies(m_%(mcount)i, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m_%(mcount)i\n'
'REAL*8, intent(in), dimension(1:m_%(mcount)i) :: x\n'
'REAL*8, intent(out), dimension(1:m_%(mcount)i) :: y\n'
'INTEGER*4 :: i_%(icount)i\n'
'do i_%(icount)i = 1, m_%(mcount)i\n'
' y(i_%(icount)i) = x(i_%(icount)i)\n'
'end do\n'
'end subroutine\n'
) % {'icount': i.label.dummy_index, 'mcount': m.dummy_index}
r = Routine('test_dummies', Eq(y[i], x[i]))
c = FCodeGen()
code = get_string(c.dump_f95, [r])
assert code == expected
def test_loops_InOut():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
i,j,n,m = symbols('i,j,n,m', integer=True)
A,x,y = symbols('A,x,y')
A = IndexedBase(A)[Idx(i, m), Idx(j, n)]
x = IndexedBase(x)[Idx(j, n)]
y = IndexedBase(y)[Idx(i, m)]
(f1, code), (f2, interface) = codegen(
('matrix_vector', Eq(y, y + A*x)), "F95", "file", header=False, empty=False)
assert f1 == 'file.f90'
expected = (
'subroutine matrix_vector(A, m, n, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'INTEGER*4, intent(in) :: n\n'
'REAL*8, intent(in), dimension(1:m, 1:n) :: A\n'
'REAL*8, intent(in), dimension(1:n) :: x\n'
'REAL*8, intent(inout), dimension(1:m) :: y\n'
'INTEGER*4 :: i\n'
'INTEGER*4 :: j\n'
'do i = 1, m\n'
' do j = 1, n\n'
' y(i) = y(i) + %(rhs)s\n'
' end do\n'
'end do\n'
'end subroutine\n'
)
assert (code == expected % {'rhs': 'A(i, j)*x(j)'} or
code == expected % {'rhs': 'x(j)*A(i, j)'})
assert f2 == 'file.h'
assert interface == (
'interface\n'
'subroutine matrix_vector(A, m, n, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'INTEGER*4, intent(in) :: n\n'
'REAL*8, intent(in), dimension(1:m, 1:n) :: A\n'
'REAL*8, intent(in), dimension(1:n) :: x\n'
'REAL*8, intent(inout), dimension(1:m) :: y\n'
'end subroutine\n'
'end interface\n'
)
def test_partial_loops_f():
# check that loop boundaries are determined by Idx, and array strides
# determined by shape of IndexedBase object.
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n,m,o,p = symbols('n m o p', integer=True)
A = IndexedBase('A', shape=(m, p))
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', (o, m - 5)) # Note: bounds are inclusive
j = Idx('j', n) # dimension n corresponds to bounds (0, n - 1)
(f1, code), (f2, interface) = codegen(
('matrix_vector', Eq(y[i], A[i, j]*x[j])), "F95", "file", header=False, empty=False)
expected = (
'subroutine matrix_vector(A, m, n, o, p, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'INTEGER*4, intent(in) :: n\n'
'INTEGER*4, intent(in) :: o\n'
'INTEGER*4, intent(in) :: p\n'
'REAL*8, intent(in), dimension(1:m, 1:p) :: A\n'
'REAL*8, intent(in), dimension(1:n) :: x\n'
'REAL*8, intent(out), dimension(1:%(iup-ilow)s) :: y\n'
'INTEGER*4 :: i\n'
'INTEGER*4 :: j\n'
'do i = %(ilow)s, %(iup)s\n'
' y(i) = 0\n'
'end do\n'
'do i = %(ilow)s, %(iup)s\n'
' do j = 1, n\n'
' y(i) = y(i) + %(rhs)s\n'
' end do\n'
'end do\n'
'end subroutine\n'
) % {
'rhs': 'A(i, j)*x(j)',
'iup': str(m - 4),
'ilow': str(1+o),
'iup-ilow': str(m - 4 -o)
}
assert expected == code
def test_output_arg_f():
from sympy import sin, cos, Equality
x, y, z = symbols("x,y,z")
r = Routine("foo", [Equality(y, sin(x)), cos(x)])
c = FCodeGen()
result = c.write([r], "test", header=False, empty=False)
assert result[0][0] == "test.f90"
assert result[0][1] == (
'REAL*8 function foo(x, y)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(out) :: y\n'
'y = sin(x)\n'
'foo = cos(x)\n'
'end function\n'
)
def test_inline_function():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n,m = symbols('n m', integer=True)
A, x, y = map(IndexedBase, 'Axy')
i = Idx('i', m)
j = Idx('j', n)
p = FCodeGen()
func = implemented_function('func', Lambda(n, n*(n+1)))
routine = Routine('test_inline', Eq(y[i], func(x[i])))
code = get_string(p.dump_f95, [routine])
expected = (
'subroutine test_inline(m, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'REAL*8, intent(in), dimension(1:m) :: x\n'
'REAL*8, intent(out), dimension(1:m) :: y\n'
'INTEGER*4 :: i\n'
'do i = 1, m\n'
' y(i) = (1 + x(i))*x(i)\n'
'end do\n'
'end subroutine\n'
)
assert code == expected
def test_check_case():
x, X = symbols('x,X')
raises(CodeGenError, "codegen(('test', x*X), 'f95', 'prefix')")
def test_check_case_false_positive():
# The upper case/lower case exception should not be triggered by Sympy
# objects that differ only because of assumptions. (It may be useful to
# have a check for that as well, but here we only want to test against
# false positives with respect to case checking.)
x1 = symbols('x')
x2 = symbols('x', my_assumption=True)
try:
codegen(('test', x1*x2), 'f95', 'prefix')
except CodeGenError, e:
if e.args[0][0:21] == "Fortran ignores case.":
raise AssertionError("This exception should not be raised!")
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/sympy/utilities/tests/test_codegen.py
|
Python
|
agpl-3.0
| 39,167
|
import string
from django.utils.text import slugify
from django.utils.timezone import now
from lxml import html
from lxml.html import tostring
from lxml.html.clean import Cleaner
from cl.lib.string_utils import anonymize, trunc
from cl.search.models import OpinionCluster
from juriscraper.lib.string_utils import clean_string, harmonize, titlecase
import re
import subprocess
BROWSER = 'firefox'
def merge_cases_simple(new, target_id):
"""Add `new` to the database, merging with target_id
Merging is done by picking the best fields from each item.
"""
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !! THIS CODE IS OUT OF DATE AND UNMAINTAINED. FEEL FREE TO FIX IT, BUT !!
# !! DO NOT TRUST IT IN ITS CURRENT STATE. !!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
target = OpinionCluster.objects.get(pk=target_id)
print "Merging %s with" % new.case_name
print " %s" % target.case_name
cached_source = target.source # Original value is needed below.
if target.source == 'C':
target.source = 'LC'
elif target.source == 'R':
target.source = 'LR'
elif target.source == 'CR':
target.source = 'LCR'
# Add the URL if it's not a court one, replacing public.resource.org's
# info in some cases.
if cached_source == 'R':
target.download_url = new.download_url
# Recreate the slug from the new case name (this changes the URL, but the
# old will continue working)
target.slug = slugify(trunc(new.case_name, 75))
# Take the case name from the new item; they tend to be pretty good
target.case_name = new.case_name
# Add the docket number if the old doesn't exist, but keep the old if one
# does.
if not target.docket.docket_number:
target.docket.docket_number = new.docket.docket_number
# Get the citations from the new item (ditch the old).
target.federal_cite_one = new.federal_cite_one
target.federal_cite_two = new.federal_cite_two
target.federal_cite_three = new.federal_cite_three
target.state_cite_one = new.state_cite_one
target.state_cite_two = new.state_cite_two
target.state_cite_three = new.state_cite_three
target.state_cite_regional = new.state_cite_regional
target.specialty_cite_one = new.specialty_cite_one
target.scotus_early_cite = new.scotus_early_cite
target.lexis_cite = new.lexis_cite
target.westlaw_cite = new.westlaw_cite
target.neutral_cite = new.neutral_cite
# Add judge information if lacking. New is dirty, but better than none.
if not target.judges:
target.judges = new.judges
# Add the text.
target.html_lawbox, blocked = anonymize(new.html)
if blocked:
target.blocked = True
target.date_blocked = now()
target.extracted_by_ocr = False # No longer true for any LB case.
# save_doc_and_cite(target, index=False)
def merge_cases_complex(case, target_ids):
"""Merge data from PRO with multiple cases that seem to be a match.
The process here is a conservative one. We take *only* the information
from PRO that is not already in CL in any form, and add only that.
"""
# THIS CODE ONLY UPDATED IN THE MOST CURSORY FASHION. DO NOT TRUST IT.
for target_id in target_ids:
simulate = False
oc = OpinionCluster.objects.get(pk=target_id)
print "Merging %s with" % case.case_name
print " %s" % oc.case_name
oc.source = 'CR'
oc.west_cite = case.west_cite
if not simulate:
oc.save()
def find_same_docket_numbers(doc, candidates):
"""Identify the candidates that have the same docket numbers as doc after
each has been cleaned.
"""
new_docket_number = re.sub('(\D|0)', '', doc.docket.docket_number)
same_docket_numbers = []
for candidate in candidates:
old_docket_number = re.sub('(\D|0)', '', candidate.get('docketNumber', ''))
if all([len(new_docket_number) > 3, len(old_docket_number) > 3]):
if old_docket_number in new_docket_number:
same_docket_numbers.append(candidate)
return same_docket_numbers
def case_name_in_candidate(case_name_new, case_name_candidate):
"""When there is one candidate match, this compares their case names to see
if one is contained in the other, in the right order.
Returns True if so, else False.
"""
regex = re.compile('[%s]' % re.escape(string.punctuation))
case_name_new_words = regex.sub('', case_name_new.lower()).split()
case_name_candidate_words = regex.sub('', case_name_candidate.lower()).split()
index = 0
for word in case_name_new_words:
if len(word) <= 2:
continue
try:
index = case_name_candidate_words[index:].index(word)
except ValueError:
# The items were out of order or the item wasn't in the candidate.
return False
return True
def filter_by_stats(candidates, stats):
"""Looks at the candidates and their stats, and filters out obviously
different candidates.
"""
filtered_candidates = []
filtered_stats = {
'candidate_count': 0,
'case_name_similarities': [],
'length_diffs': [],
'gestalt_diffs': [],
'cos_sims': [],
}
for i in range(0, len(candidates)):
# Commented out because the casenames in public.resource.org can be so
# long this varies too much.
# if stats['case_name_similarities'][i] < 0.125:
# # The case name is wildly different
# continue
if stats['length_diffs'][i] > 400:
# The documents have wildly different lengths
continue
# Commented out because the headnotes sometimes included in Resource.org made this calculation vary too much.
#elif stats['gestalt_diffs'][i] < 0.4:
# # The contents are wildly different
# continue
elif stats['cos_sims'][i] < 0.90:
# Very different cosine similarities
continue
else:
# It's a reasonably close match.
filtered_candidates.append(candidates[i])
filtered_stats['case_name_similarities'].append(stats['case_name_similarities'][i])
filtered_stats['length_diffs'].append(stats['length_diffs'][i])
filtered_stats['gestalt_diffs'].append(stats['gestalt_diffs'][i])
filtered_stats['cos_sims'].append(stats['cos_sims'][i])
filtered_stats['candidate_count'] = len(filtered_candidates)
return filtered_candidates, filtered_stats
class Case(object):
def _get_case_name_and_status(self):
case_name = self.url_element.get('title').lower()
ca1regex = re.compile('(unpublished disposition )?notice: first circuit local rule 36.2\(b\)6 states unpublished opinions may be cited only in related cases.?')
ca2regex = re.compile('(unpublished disposition )?notice: second circuit local rule 0.23 states unreported opinions shall not be cited or otherwise used in unrelated cases.?')
ca2regex2 = re.compile('(unpublished disposition )?notice: this summary order may not be cited as precedential authority, but may be called to the attention of the court in a subsequent stage of this case, in a related case, or in any case for purposes of collateral estoppel or res judicata. see second circuit rule 0.23.?')
ca3regex = re.compile('(unpublished disposition )?notice: third circuit rule 21\(i\) states citations to federal decisions which have not been formally reported should identify the court, docket number and date.?')
ca4regex = re.compile('(unpublished disposition )?notice: fourth circuit (local rule 36\(c\)|i.o.p. 36.6) states that citation of unpublished dispositions is disfavored except for establishing res judicata, estoppel, or the law of the case and requires service of copies of cited unpublished dispositions of the fourth circuit.?')
ca5regex = re.compile('(unpublished disposition )?notice: fifth circuit local rule 47.5.3 states that unpublished opinions should normally be cited only when they establish the law of the case, are relied upon as a basis for res judicata or collateral estoppel, or involve related facts. if an unpublished opinion is cited, a copy shall be attached to each copy of the brief.?')
ca6regex = re.compile('(unpublished disposition )?notice: sixth circuit rule 24\(c\) states that citation of unpublished dispositions is disfavored except for establishing res judicata, estoppel, or the law of the case and requires service of copies of cited unpublished dispositions of the sixth circuit.?')
ca7regex = re.compile('(unpublished disposition )?notice: seventh circuit rule 53\(b\)\(2\) states unpublished orders shall not be cited or used as precedent except to support a claim of res judicata, collateral estoppel or law of the case in any federal court within the circuit.?')
ca8regex = re.compile('(unpublished disposition )?notice: eighth circuit rule 28a\(k\) governs citation of unpublished opinions and provides that (no party may cite an opinion not intended for publication unless the cases are related by identity between the parties or the causes of action|they are not precedent and generally should not be cited unless relevant to establishing the doctrines of res judicata, collateral estoppel, the law of the case, or if the opinion has persuasive value on a material issue and no published opinion would serve as well).?')
ca9regex = re.compile('(unpublished disposition )?notice: ninth circuit rule 36-3 provides that dispositions other than opinions or orders designated for publication are not precedential and should not be cited except when relevant under the doctrines of law of the case, res judicata, or collateral estoppel.?')
ca10regex = re.compile('(unpublished disposition )?notice: tenth circuit rule 36.3 states that unpublished opinions and orders and judgments have no precedential value and shall not be cited except for purposes of establishing the doctrines of the law of the case, res judicata, or collateral estoppel.?')
cadcregex = re.compile('(unpublished disposition )?notice: d.c. circuit local rule 11\(c\) states that unpublished orders, judgments, and explanatory memoranda may not be cited as precedents, but counsel may refer to unpublished dispositions when the binding or preclusive effect of the disposition, rather than its quality as precedent, is relevant.?')
cafcregex = re.compile('(unpublished disposition )?notice: federal circuit local rule 47.(6|8)\(b\) states that opinions and orders which are designated as not citable as precedent shall not be employed or cited as precedent. this does not preclude assertion of issues of claim preclusion, issue preclusion, judicial estoppel, law of the case or the like based on a decision of the court rendered in a nonprecedential opinion or order.?')
# Clean off special cases
if 'first circuit' in case_name:
case_name = re.sub(ca1regex, '', case_name)
status = 'Unpublished'
elif 'second circuit' in case_name:
case_name = re.sub(ca2regex, '', case_name)
case_name = re.sub(ca2regex2, '', case_name)
status = 'Unpublished'
elif 'third circuit' in case_name:
case_name = re.sub(ca3regex, '', case_name)
status = 'Unpublished'
elif 'fourth circuit' in case_name:
case_name = re.sub(ca4regex, '', case_name)
status = 'Unpublished'
elif 'fifth circuit' in case_name:
case_name = re.sub(ca5regex, '', case_name)
status = 'Unpublished'
elif 'sixth circuit' in case_name:
case_name = re.sub(ca6regex, '', case_name)
status = 'Unpublished'
elif 'seventh circuit' in case_name:
case_name = re.sub(ca7regex, '', case_name)
status = 'Unpublished'
elif 'eighth circuit' in case_name:
case_name = re.sub(ca8regex, '', case_name)
status = 'Unpublished'
elif 'ninth circuit' in case_name:
case_name = re.sub(ca9regex, '', case_name)
status = 'Unpublished'
elif 'tenth circuit' in case_name:
case_name = re.sub(ca10regex, '', case_name)
status = 'Unpublished'
elif 'd.c. circuit' in case_name:
case_name = re.sub(cadcregex, '', case_name)
status = 'Unpublished'
elif 'federal circuit' in case_name:
case_name = re.sub(cafcregex, '', case_name)
status = 'Unpublished'
else:
status = 'Published'
case_name = titlecase(harmonize(clean_string(case_name)))
if case_name == '' or case_name == 'unpublished disposition':
# No luck getting the case name
saved_case_name = self._check_fix_list(self.sha1_hash, self.case_name_dict)
if saved_case_name:
case_name = saved_case_name
else:
print self.url
if BROWSER:
subprocess.Popen([BROWSER, self.url], shell=False).communicate()
case_name = raw_input("Short case name: ")
self.case_name_fix_file.write("%s|%s\n" % (self.sha1_hash, case_name))
return case_name, status
def get_html_from_raw_text(raw_text):
"""Using the raw_text, creates four useful variables:
1. complete_html_tree: A tree of the complete HTML from the file, including <head> tags and whatever else.
2. clean_html_tree: A tree of the HTML after stripping bad stuff.
3. clean_html_str: A str of the HTML after stripping bad stuff.
4. body_text: A str of the text of the body of the document.
We require all of these because sometimes we need the complete HTML tree, other times we don't. We create them all
up front for performance reasons.
"""
complete_html_tree = html.fromstring(raw_text)
cleaner = Cleaner(style=True,
remove_tags=('a', 'body', 'font', 'noscript',),
kill_tags=('title',),)
clean_html_str = cleaner.clean_html(raw_text)
clean_html_tree = html.fromstring(clean_html_str)
body_text = tostring(clean_html_tree, method='text', encoding='unicode')
return clean_html_tree, complete_html_tree, clean_html_str, body_text
|
voutilad/courtlistener
|
cl/corpus_importer/dup_helpers.py
|
Python
|
agpl-3.0
| 14,568
|
# coding: utf-8
import datetime
from django.core.management.base import BaseCommand
# from django.template.loader import render_to_string
from django.db.models import Count
from scheduler.models import Need
from shiftmailer.models import Mailer
from shiftmailer.excelexport import GenerateExcelSheet
DATE_FORMAT = '%d.%m.%Y'
class Command(BaseCommand):
help = 'sends emails taken from addresses (.models.mailer) with a list of shifts for this day' \
'run my cronjob'
def add_arguments(self, parser):
parser.add_argument('--date', dest='print_date', default=datetime.date.today().strftime(DATE_FORMAT),
help='The date to generate scheduler for')
def handle(self, *args, **options):
mailer = Mailer.objects.all()
t = datetime.datetime.strptime(options['print_date'], DATE_FORMAT)
for mail in mailer:
needs = Need.objects.filter(location=mail.location).filter(
ending_time__year=t.strftime("%Y"),
ending_time__month=t.strftime("%m"),
ending_time__day=t.strftime("%d")) \
.order_by('topic', 'ending_time') \
.annotate(volunteer_count=Count('registrationprofile')) \
.select_related('topic', 'location') \
.prefetch_related('registrationprofile_set', 'registrationprofile_set__user')
# if it's not used anyway, we maybe shouldn't even render it? #
# message = render_to_string('shifts_today.html', locals())
iua = GenerateExcelSheet(shifts=needs, mailer=mail)
iua.send_file()
|
FriedrichK/volunteer_planner
|
shiftmailer/management/commands/mailer.py
|
Python
|
agpl-3.0
| 1,636
|
''' TBD '''
|
Phantasus/intelmq
|
intelmq/bots/collectors/xmpp/collector.py
|
Python
|
agpl-3.0
| 11
|
from __future__ import unicode_literals
from django.apps import AppConfig
class SystemConfig(AppConfig):
name = 'system'
|
inteos/IBAdmin
|
system/apps.py
|
Python
|
agpl-3.0
| 128
|
import numpy as np
import unittest
from ..aux_readers import NASA_2DVD_reader
class TestNasa2DvdReaderMc3eSubcase(unittest.TestCase):
"Test module for the NASA_2DVD_reader class in pydsd.aux_io.NASA_2DVD_reader for mc3e files"
def setUp(self):
filename = "testdata/nasa_gv_mc3e_2dvd_test.txt"
self.dsd = NASA_2DVD_reader.read_2dvd_dsd_nasa_gv(filename)
def test_can_read_sample_file(self):
self.assertIsNotNone(self.dsd, "File did not read in correctly, returned None")
def test_dsd_nd_exists(self):
self.assertIsNotNone(self.dsd.fields["Nd"], "DSD Object has no Nd field")
def test_dsd_nd_is_dict(self):
self.assertIsInstance(self.dsd.fields["Nd"], dict, "Nd was not a dictionary.")
def test_RR_works(self):
self.dsd.calculate_RR()
self.assertIsNotNone(
self.dsd.fields["rain_rate"],
"Rain Rate is not in fields after calculate_RR()",
)
self.assertEqual(
len(self.dsd.fields["rain_rate"]["data"]),
5,
"Wrong number of time samples in rain rate",
)
def test_can_run_calc_dsd_params(self):
self.dsd.calculate_dsd_parameterization()
self.assertIsNotNone(
self.dsd.fields["D0"],
"The Field D0 did not exist after dsd_parameterization check",
)
self.assertEqual(
len(self.dsd.fields["D0"]["data"]), 5, "Wrong number of samples in D0"
)
def test_time_same_length_as_Nd(self):
self.assertEqual(
len(self.dsd.time["data"]),
self.dsd.fields["Nd"]["data"].shape[0],
"Different number of samples for time and Nd",
)
|
josephhardinee/PyDisdrometer
|
pydsd/tests/testNasa2DVDReader_mc3e.py
|
Python
|
lgpl-2.1
| 1,716
|