repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
shownomercy/django
|
refs/heads/master
|
tests/expressions_case/__init__.py
|
12133432
| |
mammique/django
|
refs/heads/tp_alpha
|
tests/regressiontests/templates/templatetags/subpackage/__init__.py
|
12133432
| |
pygeek/django
|
refs/heads/master
|
django/contrib/sessions/backends/__init__.py
|
12133432
| |
pygeek/django
|
refs/heads/master
|
django/conf/locale/km/__init__.py
|
12133432
| |
nemunaire/nemubot
|
refs/heads/master
|
nemubot/tools/xmlparser/basic.py
|
2
|
# Nemubot is a smart and modulable IM bot.
# Copyright (C) 2012-2016 Mercier Pierre-Olivier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class ListNode:
"""XML node representing a Python dictionnnary
"""
def __init__(self, **kwargs):
self.items = list()
def addChild(self, name, child):
self.items.append(child)
return True
def __len__(self):
return len(self.items)
def __getitem__(self, item):
return self.items[item]
def __setitem__(self, item, v):
self.items[item] = v
def __contains__(self, item):
return item in self.items
def __repr__(self):
return self.items.__repr__()
def saveElement(self, store, tag="list"):
store.startElement(tag, {})
for i in self.items:
i.saveElement(store)
store.endElement(tag)
class DictNode:
"""XML node representing a Python dictionnnary
"""
def __init__(self, **kwargs):
self.items = dict()
self._cur = None
def startElement(self, name, attrs):
if self._cur is None and "key" in attrs:
self._cur = (attrs["key"], "")
return True
return False
def characters(self, content):
if self._cur is not None:
key, cnt = self._cur
if isinstance(cnt, str):
cnt += content
self._cur = key, cnt
def endElement(self, name):
if name is not None or self._cur is None:
return
key, cnt = self._cur
if isinstance(cnt, list) and len(cnt) == 1:
self.items[key] = cnt[0]
else:
self.items[key] = cnt
self._cur = None
return True
def addChild(self, name, child):
if self._cur is None:
return False
key, cnt = self._cur
if not isinstance(cnt, list):
cnt = []
cnt.append(child)
self._cur = key, cnt
return True
def __getitem__(self, item):
return self.items[item]
def __setitem__(self, item, v):
self.items[item] = v
def __contains__(self, item):
return item in self.items
def __repr__(self):
return self.items.__repr__()
def saveElement(self, store, tag="dict"):
store.startElement(tag, {})
for k, v in self.items.items():
store.startElement("item", {"key": k})
if isinstance(v, str):
store.characters(v)
else:
if hasattr(v, "__iter__"):
for i in v:
i.saveElement(store)
else:
v.saveElement(store)
store.endElement("item")
store.endElement(tag)
def __contain__(self, i):
return i in self.items
def __getitem__(self, i):
return self.items[i]
def __setitem__(self, i, c):
self.items[i] = c
def __delitem__(self, k):
del self.items[k]
def __iter__(self):
return self.items.__iter__()
def keys(self):
return self.items.keys()
def items(self):
return self.items.items()
|
ilay09/keystone
|
refs/heads/master
|
keystone/tests/unit/test_v3_federation.py
|
1
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import random
import subprocess
from testtools import matchers
import uuid
import fixtures
from lxml import etree
import mock
from oslo_serialization import jsonutils
from oslo_utils import importutils
import saml2
from saml2 import saml
from saml2 import sigver
from six.moves import http_client
from six.moves import range, urllib, zip
xmldsig = importutils.try_import("saml2.xmldsig")
if not xmldsig:
xmldsig = importutils.try_import("xmldsig")
from keystone.auth import controllers as auth_controllers
import keystone.conf
from keystone import exception
from keystone.federation import controllers as federation_controllers
from keystone.federation import idp as keystone_idp
from keystone import notifications
from keystone.tests import unit
from keystone.tests.unit import core
from keystone.tests.unit import federation_fixtures
from keystone.tests.unit import ksfixtures
from keystone.tests.unit import mapping_fixtures
from keystone.tests.unit import test_v3
from keystone.tests.unit import utils
from keystone.token import controllers as token_controller
from keystone.token.providers import common as token_common
CONF = keystone.conf.CONF
ROOTDIR = os.path.dirname(os.path.abspath(__file__))
XMLDIR = os.path.join(ROOTDIR, 'saml2/')
def dummy_validator(*args, **kwargs):
pass
class FederatedSetupMixin(object):
ACTION = 'authenticate'
IDP = 'ORG_IDP'
PROTOCOL = 'saml2'
AUTH_METHOD = 'saml2'
USER = 'user@ORGANIZATION'
ASSERTION_PREFIX = 'PREFIX_'
IDP_WITH_REMOTE = 'ORG_IDP_REMOTE'
REMOTE_IDS = ['entityID_IDP1', 'entityID_IDP2']
REMOTE_ID_ATTR = uuid.uuid4().hex
UNSCOPED_V3_SAML2_REQ = {
"identity": {
"methods": [AUTH_METHOD],
AUTH_METHOD: {
"identity_provider": IDP,
"protocol": PROTOCOL
}
}
}
def _check_domains_are_valid(self, token):
self.assertEqual('Federated', token['user']['domain']['id'])
self.assertEqual('Federated', token['user']['domain']['name'])
def _project(self, project):
return (project['id'], project['name'])
def _roles(self, roles):
return set([(r['id'], r['name']) for r in roles])
def _check_projects_and_roles(self, token, roles, projects):
"""Check whether the projects and the roles match."""
token_roles = token.get('roles')
if token_roles is None:
raise AssertionError('Roles not found in the token')
token_roles = self._roles(token_roles)
roles_ref = self._roles(roles)
self.assertEqual(token_roles, roles_ref)
token_projects = token.get('project')
if token_projects is None:
raise AssertionError('Projects not found in the token')
token_projects = self._project(token_projects)
projects_ref = self._project(projects)
self.assertEqual(token_projects, projects_ref)
def _check_scoped_token_attributes(self, token):
for obj in ('user', 'catalog', 'expires_at', 'issued_at',
'methods', 'roles'):
self.assertIn(obj, token)
os_federation = token['user']['OS-FEDERATION']
self.assertIn('groups', os_federation)
self.assertIn('identity_provider', os_federation)
self.assertIn('protocol', os_federation)
self.assertThat(os_federation, matchers.HasLength(3))
self.assertEqual(self.IDP, os_federation['identity_provider']['id'])
self.assertEqual(self.PROTOCOL, os_federation['protocol']['id'])
def _check_project_scoped_token_attributes(self, token, project_id):
self.assertEqual(project_id, token['project']['id'])
self._check_scoped_token_attributes(token)
def _check_domain_scoped_token_attributes(self, token, domain_id):
self.assertEqual(domain_id, token['domain']['id'])
self._check_scoped_token_attributes(token)
def assertValidMappedUser(self, token):
"""Check if user object meets all the criteria."""
user = token['user']
self.assertIn('id', user)
self.assertIn('name', user)
self.assertIn('domain', user)
self.assertIn('groups', user['OS-FEDERATION'])
self.assertIn('identity_provider', user['OS-FEDERATION'])
self.assertIn('protocol', user['OS-FEDERATION'])
# Make sure user_name is url safe
self.assertEqual(urllib.parse.quote(user['name']), user['name'])
def _issue_unscoped_token(self,
idp=None,
assertion='EMPLOYEE_ASSERTION',
environment=None):
api = federation_controllers.Auth()
environment = environment or {}
environment.update(getattr(mapping_fixtures, assertion))
request = self.make_request(environ=environment)
if idp is None:
idp = self.IDP
r = api.federated_authentication(request, idp, self.PROTOCOL)
return r
def idp_ref(self, id=None):
idp = {
'id': id or uuid.uuid4().hex,
'enabled': True,
'description': uuid.uuid4().hex
}
return idp
def proto_ref(self, mapping_id=None):
proto = {
'id': uuid.uuid4().hex,
'mapping_id': mapping_id or uuid.uuid4().hex
}
return proto
def mapping_ref(self, rules=None):
return {
'id': uuid.uuid4().hex,
'rules': rules or self.rules['rules']
}
def _scope_request(self, unscoped_token_id, scope, scope_id):
return {
'auth': {
'identity': {
'methods': [
'token'
],
'token': {
'id': unscoped_token_id
}
},
'scope': {
scope: {
'id': scope_id
}
}
}
}
def _inject_assertion(self, request, variant):
assertion = getattr(mapping_fixtures, variant)
request.context_dict['environment'].update(assertion)
def load_federation_sample_data(self):
"""Inject additional data."""
# Create and add domains
self.domainA = unit.new_domain_ref()
self.resource_api.create_domain(self.domainA['id'],
self.domainA)
self.domainB = unit.new_domain_ref()
self.resource_api.create_domain(self.domainB['id'],
self.domainB)
self.domainC = unit.new_domain_ref()
self.resource_api.create_domain(self.domainC['id'],
self.domainC)
self.domainD = unit.new_domain_ref()
self.resource_api.create_domain(self.domainD['id'],
self.domainD)
# Create and add projects
self.proj_employees = unit.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.proj_employees['id'],
self.proj_employees)
self.proj_customers = unit.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.proj_customers['id'],
self.proj_customers)
self.project_all = unit.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.project_all['id'],
self.project_all)
self.project_inherited = unit.new_project_ref(
domain_id=self.domainD['id'])
self.resource_api.create_project(self.project_inherited['id'],
self.project_inherited)
# Create and add groups
self.group_employees = unit.new_group_ref(domain_id=self.domainA['id'])
self.group_employees = (
self.identity_api.create_group(self.group_employees))
self.group_customers = unit.new_group_ref(domain_id=self.domainA['id'])
self.group_customers = (
self.identity_api.create_group(self.group_customers))
self.group_admins = unit.new_group_ref(domain_id=self.domainA['id'])
self.group_admins = self.identity_api.create_group(self.group_admins)
# Create and add roles
self.role_employee = unit.new_role_ref()
self.role_api.create_role(self.role_employee['id'], self.role_employee)
self.role_customer = unit.new_role_ref()
self.role_api.create_role(self.role_customer['id'], self.role_customer)
self.role_admin = unit.new_role_ref()
self.role_api.create_role(self.role_admin['id'], self.role_admin)
# Employees can access
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.project_all['id'])
# Customers can access
# * proj_customers
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
project_id=self.proj_customers['id'])
# Admins can access:
# * proj_customers
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_customers['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.project_all['id'])
# Customers can access:
# * domain A
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainA['id'])
# Customers can access projects via inheritance:
# * domain D
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainD['id'],
inherited_to_projects=True)
# Employees can access:
# * domain A
# * domain B
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainB['id'])
# Admins can access:
# * domain A
# * domain B
# * domain C
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainB['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainC['id'])
self.rules = {
'rules': [
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'Email',
},
{
'type': 'orgPersonType',
'any_one_of': [
'Employee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': self.ASSERTION_PREFIX + 'UserName'
},
{
'type': self.ASSERTION_PREFIX + 'Email',
},
{
'type': self.ASSERTION_PREFIX + 'orgPersonType',
'any_one_of': [
'SuperEmployee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'Email'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Customer'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_admins['id']
}
},
{
'group': {
'id': self.group_employees['id']
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'Email'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Admin',
'Chief'
]
}
]
},
{
'local': [
{
'group': {
'id': uuid.uuid4().hex
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'Email',
},
{
'type': 'FirstName',
'any_one_of': [
'Jill'
]
},
{
'type': 'LastName',
'any_one_of': [
'Smith'
]
}
]
},
{
'local': [
{
'group': {
'id': 'this_group_no_longer_exists'
}
},
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'Email',
},
{
'type': 'Email',
'any_one_of': [
'testacct@example.com'
]
},
{
'type': 'orgPersonType',
'any_one_of': [
'Tester'
]
}
]
},
# rules with local group names
{
"local": [
{
'user': {
'name': '{0}',
'id': '{1}'
}
},
{
"group": {
"name": self.group_customers['name'],
"domain": {
"name": self.domainA['name']
}
}
}
],
"remote": [
{
'type': 'UserName',
},
{
'type': 'Email',
},
{
"type": "orgPersonType",
"any_one_of": [
"CEO",
"CTO"
],
}
]
},
{
"local": [
{
'user': {
'name': '{0}',
'id': '{1}'
}
},
{
"group": {
"name": self.group_admins['name'],
"domain": {
"id": self.domainA['id']
}
}
}
],
"remote": [
{
"type": "UserName",
},
{
"type": "Email",
},
{
"type": "orgPersonType",
"any_one_of": [
"Managers"
]
}
]
},
{
"local": [
{
"user": {
"name": "{0}",
"id": "{1}"
}
},
{
"group": {
"name": "NON_EXISTING",
"domain": {
"id": self.domainA['id']
}
}
}
],
"remote": [
{
"type": "UserName",
},
{
"type": "Email",
},
{
"type": "UserName",
"any_one_of": [
"IamTester"
]
}
]
},
{
"local": [
{
"user": {
"type": "local",
"name": self.user['name'],
"domain": {
"id": self.user['domain_id']
}
}
},
{
"group": {
"id": self.group_customers['id']
}
}
],
"remote": [
{
"type": "UserType",
"any_one_of": [
"random"
]
}
]
},
{
"local": [
{
"user": {
"type": "local",
"name": self.user['name'],
"domain": {
"id": uuid.uuid4().hex
}
}
}
],
"remote": [
{
"type": "Position",
"any_one_of": [
"DirectorGeneral"
]
}
]
},
# rules for users with no groups
{
"local": [
{
'user': {
'name': '{0}',
'id': '{1}'
}
}
],
"remote": [
{
'type': 'UserName',
},
{
'type': 'Email',
},
{
'type': 'orgPersonType',
'any_one_of': [
'NoGroupsOrg'
]
}
]
}
]
}
# Add IDP
self.idp = self.idp_ref(id=self.IDP)
self.federation_api.create_idp(self.idp['id'],
self.idp)
# Add IDP with remote
self.idp_with_remote = self.idp_ref(id=self.IDP_WITH_REMOTE)
self.idp_with_remote['remote_ids'] = self.REMOTE_IDS
self.federation_api.create_idp(self.idp_with_remote['id'],
self.idp_with_remote)
# Add a mapping
self.mapping = self.mapping_ref()
self.federation_api.create_mapping(self.mapping['id'],
self.mapping)
# Add protocols
self.proto_saml = self.proto_ref(mapping_id=self.mapping['id'])
self.proto_saml['id'] = self.PROTOCOL
self.federation_api.create_protocol(self.idp['id'],
self.proto_saml['id'],
self.proto_saml)
# Add protocols IDP with remote
self.federation_api.create_protocol(self.idp_with_remote['id'],
self.proto_saml['id'],
self.proto_saml)
# Generate fake tokens
request = self.make_request()
self.tokens = {}
VARIANTS = ('EMPLOYEE_ASSERTION', 'CUSTOMER_ASSERTION',
'ADMIN_ASSERTION')
api = auth_controllers.Auth()
for variant in VARIANTS:
self._inject_assertion(request, variant)
r = api.authenticate_for_token(request, self.UNSCOPED_V3_SAML2_REQ)
self.tokens[variant] = r.headers.get('X-Subject-Token')
self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN = self._scope_request(
uuid.uuid4().hex, 'project', self.proj_customers['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE = self._scope_request(
self.tokens['EMPLOYEE_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'project',
self.proj_customers['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_INHERITED_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'project',
self.project_inherited['id'])
self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainA['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain',
self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_D_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainD['id'])
self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain', self.domainA['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain', self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain',
self.domainC['id'])
class FederatedIdentityProviderTests(test_v3.RestfulTestCase):
"""A test class for Identity Providers."""
idp_keys = ['description', 'enabled']
default_body = {'description': None, 'enabled': True}
def base_url(self, suffix=None):
if suffix is not None:
return '/OS-FEDERATION/identity_providers/' + str(suffix)
return '/OS-FEDERATION/identity_providers'
def _fetch_attribute_from_response(self, resp, parameter,
assert_is_not_none=True):
"""Fetch single attribute from TestResponse object."""
result = resp.result.get(parameter)
if assert_is_not_none:
self.assertIsNotNone(result)
return result
def _create_and_decapsulate_response(self, body=None):
"""Create IdP and fetch it's random id along with entity."""
default_resp = self._create_default_idp(body=body)
idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
self.assertIsNotNone(idp)
idp_id = idp.get('id')
return (idp_id, idp)
def _get_idp(self, idp_id):
"""Fetch IdP entity based on its id."""
url = self.base_url(suffix=idp_id)
resp = self.get(url)
return resp
def _create_default_idp(self, body=None,
expected_status=http_client.CREATED):
"""Create default IdP."""
url = self.base_url(suffix=uuid.uuid4().hex)
if body is None:
body = self._http_idp_input()
resp = self.put(url, body={'identity_provider': body},
expected_status=expected_status)
return resp
def _http_idp_input(self):
"""Create default input dictionary for IdP data."""
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
return body
def _assign_protocol_to_idp(self, idp_id=None, proto=None, url=None,
mapping_id=None, validate=True, **kwargs):
if url is None:
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
if idp_id is None:
idp_id, _ = self._create_and_decapsulate_response()
if proto is None:
proto = uuid.uuid4().hex
if mapping_id is None:
mapping_id = uuid.uuid4().hex
self._create_mapping(mapping_id)
body = {'mapping_id': mapping_id}
url = url % {'idp_id': idp_id, 'protocol_id': proto}
resp = self.put(url, body={'protocol': body}, **kwargs)
if validate:
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': mapping_id})
return (resp, idp_id, proto)
def _get_protocol(self, idp_id, protocol_id):
url = '%s/protocols/%s' % (idp_id, protocol_id)
url = self.base_url(suffix=url)
r = self.get(url)
return r
def _create_mapping(self, mapping_id):
mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER
mapping['id'] = mapping_id
url = '/OS-FEDERATION/mappings/%s' % mapping_id
self.put(url,
body={'mapping': mapping},
expected_status=http_client.CREATED)
def assertIdpDomainCreated(self, idp_id, domain_id):
domain = self.resource_api.get_domain(domain_id)
self.assertEqual(domain_id, domain['name'])
self.assertIn(idp_id, domain['description'])
def test_create_idp_without_domain_id(self):
"""Create the IdentityProvider entity associated to remote_ids."""
keys_to_check = list(self.idp_keys)
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
attr = self._fetch_attribute_from_response(resp, 'identity_provider')
self.assertIdpDomainCreated(attr['id'], attr['domain_id'])
def test_create_idp_with_domain_id(self):
keys_to_check = list(self.idp_keys)
keys_to_check.append('domain_id')
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
body['domain_id'] = domain['id']
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
def test_create_idp_domain_id_none(self):
keys_to_check = list(self.idp_keys)
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
body['domain_id'] = None
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
attr = self._fetch_attribute_from_response(resp, 'identity_provider')
self.assertIdpDomainCreated(attr['id'], attr['domain_id'])
def test_create_idp_domain_id_unique_constraint(self):
# create domain and add domain_id to keys to check
domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
keys_to_check = list(self.idp_keys)
keys_to_check.append('domain_id')
# create idp with the domain_id
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
body['domain_id'] = domain['id']
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
# create a 2nd idp with the same domain_id
url = self.base_url(suffix=uuid.uuid4().hex)
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
body['domain_id'] = domain['id']
resp = self.put(url, body={'identity_provider': body},
expected_status=http_client.CONFLICT)
resp_data = jsonutils.loads(resp.body)
self.assertIn('Duplicate entry',
resp_data.get('error', {}).get('message'))
def test_cannot_update_idp_domain(self):
# create new idp
body = self.default_body.copy()
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
# create domain and try to update the idp's domain
domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
body['domain_id'] = domain['id']
body = {'identity_provider': body}
url = self.base_url(suffix=idp_id)
self.patch(url, body=body, expected_status=http_client.BAD_REQUEST)
def test_create_idp_with_nonexistent_domain_id_fails(self):
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
body['domain_id'] = uuid.uuid4().hex
self._create_default_idp(body=body,
expected_status=http_client.NOT_FOUND)
def test_create_idp_remote(self):
"""Create the IdentityProvider entity associated to remote_ids."""
keys_to_check = list(self.idp_keys)
keys_to_check.append('remote_ids')
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
body['remote_ids'] = [uuid.uuid4().hex,
uuid.uuid4().hex,
uuid.uuid4().hex]
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
attr = self._fetch_attribute_from_response(resp, 'identity_provider')
self.assertIdpDomainCreated(attr['id'], attr['domain_id'])
def test_create_idp_remote_repeated(self):
"""Create two IdentityProvider entities with some remote_ids.
A remote_id is the same for both so the second IdP is not
created because of the uniqueness of the remote_ids
Expect HTTP 409 Conflict code for the latter call.
"""
body = self.default_body.copy()
repeated_remote_id = uuid.uuid4().hex
body['remote_ids'] = [uuid.uuid4().hex,
uuid.uuid4().hex,
uuid.uuid4().hex,
repeated_remote_id]
self._create_default_idp(body=body)
url = self.base_url(suffix=uuid.uuid4().hex)
body['remote_ids'] = [uuid.uuid4().hex,
repeated_remote_id]
resp = self.put(url, body={'identity_provider': body},
expected_status=http_client.CONFLICT)
resp_data = jsonutils.loads(resp.body)
self.assertIn('Duplicate remote ID',
resp_data.get('error', {}).get('message'))
def test_create_idp_remote_empty(self):
"""Create an IdP with empty remote_ids."""
keys_to_check = list(self.idp_keys)
keys_to_check.append('remote_ids')
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
body['remote_ids'] = []
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
def test_create_idp_remote_none(self):
"""Create an IdP with a None remote_ids."""
keys_to_check = list(self.idp_keys)
keys_to_check.append('remote_ids')
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
body['remote_ids'] = None
resp = self._create_default_idp(body=body)
expected = body.copy()
expected['remote_ids'] = []
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=expected)
def test_update_idp_remote_ids(self):
"""Update IdP's remote_ids parameter."""
body = self.default_body.copy()
body['remote_ids'] = [uuid.uuid4().hex]
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
self.assertIsNotNone(idp_id)
body['remote_ids'] = [uuid.uuid4().hex, uuid.uuid4().hex]
body = {'identity_provider': body}
resp = self.patch(url, body=body)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
body = body['identity_provider']
self.assertEqual(sorted(body['remote_ids']),
sorted(updated_idp.get('remote_ids')))
resp = self.get(url)
returned_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
self.assertEqual(sorted(body['remote_ids']),
sorted(returned_idp.get('remote_ids')))
def test_update_idp_clean_remote_ids(self):
"""Update IdP's remote_ids parameter with an empty list."""
body = self.default_body.copy()
body['remote_ids'] = [uuid.uuid4().hex]
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
self.assertIsNotNone(idp_id)
body['remote_ids'] = []
body = {'identity_provider': body}
resp = self.patch(url, body=body)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
body = body['identity_provider']
self.assertEqual(sorted(body['remote_ids']),
sorted(updated_idp.get('remote_ids')))
resp = self.get(url)
returned_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
self.assertEqual(sorted(body['remote_ids']),
sorted(returned_idp.get('remote_ids')))
def test_update_idp_remote_repeated(self):
"""Update an IdentityProvider entity reusing a remote_id.
A remote_id is the same for both so the second IdP is not
updated because of the uniqueness of the remote_ids.
Expect HTTP 409 Conflict code for the latter call.
"""
# Create first identity provider
body = self.default_body.copy()
repeated_remote_id = uuid.uuid4().hex
body['remote_ids'] = [uuid.uuid4().hex,
repeated_remote_id]
self._create_default_idp(body=body)
# Create second identity provider (without remote_ids)
body = self.default_body.copy()
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
body['remote_ids'] = [repeated_remote_id]
resp = self.patch(url, body={'identity_provider': body},
expected_status=http_client.CONFLICT)
resp_data = jsonutils.loads(resp.body)
self.assertIn('Duplicate remote ID',
resp_data['error']['message'])
def test_list_idps(self, iterations=5):
"""List all available IdentityProviders.
This test collects ids of created IdPs and
intersects it with the list of all available IdPs.
List of all IdPs can be a superset of IdPs created in this test,
because other tests also create IdPs.
"""
def get_id(resp):
r = self._fetch_attribute_from_response(resp,
'identity_provider')
return r.get('id')
ids = []
for _ in range(iterations):
id = get_id(self._create_default_idp())
ids.append(id)
ids = set(ids)
keys_to_check = self.idp_keys
keys_to_check.append('domain_id')
url = self.base_url()
resp = self.get(url)
self.assertValidListResponse(resp, 'identity_providers',
dummy_validator,
keys_to_check=keys_to_check)
entities = self._fetch_attribute_from_response(resp,
'identity_providers')
entities_ids = set([e['id'] for e in entities])
ids_intersection = entities_ids.intersection(ids)
self.assertEqual(ids_intersection, ids)
def test_filter_list_idp_by_id(self):
def get_id(resp):
r = self._fetch_attribute_from_response(resp,
'identity_provider')
return r.get('id')
idp1_id = get_id(self._create_default_idp())
idp2_id = get_id(self._create_default_idp())
# list the IdP, should get two IdP.
url = self.base_url()
resp = self.get(url)
entities = self._fetch_attribute_from_response(resp,
'identity_providers')
entities_ids = [e['id'] for e in entities]
self.assertItemsEqual(entities_ids, [idp1_id, idp2_id])
# filter the IdP by ID.
url = self.base_url() + '?id=' + idp1_id
resp = self.get(url)
filtered_service_list = resp.json['identity_providers']
self.assertThat(filtered_service_list, matchers.HasLength(1))
self.assertEqual(idp1_id, filtered_service_list[0].get('id'))
def test_filter_list_idp_by_enabled(self):
def get_id(resp):
r = self._fetch_attribute_from_response(resp,
'identity_provider')
return r.get('id')
idp1_id = get_id(self._create_default_idp())
body = self.default_body.copy()
body['enabled'] = False
idp2_id = get_id(self._create_default_idp(body=body))
# list the IdP, should get two IdP.
url = self.base_url()
resp = self.get(url)
entities = self._fetch_attribute_from_response(resp,
'identity_providers')
entities_ids = [e['id'] for e in entities]
self.assertItemsEqual(entities_ids, [idp1_id, idp2_id])
# filter the IdP by 'enabled'.
url = self.base_url() + '?enabled=True'
resp = self.get(url)
filtered_service_list = resp.json['identity_providers']
self.assertThat(filtered_service_list, matchers.HasLength(1))
self.assertEqual(idp1_id, filtered_service_list[0].get('id'))
def test_check_idp_uniqueness(self):
"""Add same IdP twice.
Expect HTTP 409 Conflict code for the latter call.
"""
url = self.base_url(suffix=uuid.uuid4().hex)
body = self._http_idp_input()
domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
body['domain_id'] = domain['id']
self.put(url, body={'identity_provider': body},
expected_status=http_client.CREATED)
resp = self.put(url, body={'identity_provider': body},
expected_status=http_client.CONFLICT)
resp_data = jsonutils.loads(resp.body)
self.assertIn('Duplicate entry',
resp_data.get('error', {}).get('message'))
def test_get_idp(self):
"""Create and later fetch IdP."""
body = self._http_idp_input()
domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
body['domain_id'] = domain['id']
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
resp = self.get(url)
# Strip keys out of `body` dictionary. This is done
# to be python 3 compatible
body_keys = list(body)
self.assertValidResponse(resp, 'identity_provider',
dummy_validator, keys_to_check=body_keys,
ref=body)
def test_get_nonexisting_idp(self):
"""Fetch nonexisting IdP entity.
Expected HTTP 404 Not Found status code.
"""
idp_id = uuid.uuid4().hex
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.get(url, expected_status=http_client.NOT_FOUND)
def test_delete_existing_idp(self):
"""Create and later delete IdP.
Expect HTTP 404 Not Found for the GET IdP call.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.delete(url)
self.get(url, expected_status=http_client.NOT_FOUND)
def test_delete_idp_also_deletes_assigned_protocols(self):
"""Deleting an IdP will delete its assigned protocol."""
# create default IdP
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp['id']
protocol_id = uuid.uuid4().hex
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
idp_url = self.base_url(suffix=idp_id)
# assign protocol to IdP
kwargs = {'expected_status': http_client.CREATED}
resp, idp_id, proto = self._assign_protocol_to_idp(
url=url,
idp_id=idp_id,
proto=protocol_id,
**kwargs)
# removing IdP will remove the assigned protocol as well
self.assertEqual(1, len(self.federation_api.list_protocols(idp_id)))
self.delete(idp_url)
self.get(idp_url, expected_status=http_client.NOT_FOUND)
self.assertEqual(0, len(self.federation_api.list_protocols(idp_id)))
def test_delete_nonexisting_idp(self):
"""Delete nonexisting IdP.
Expect HTTP 404 Not Found for the GET IdP call.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
self.delete(url, expected_status=http_client.NOT_FOUND)
def test_update_idp_mutable_attributes(self):
"""Update IdP's mutable parameters."""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
self.assertIsNotNone(idp_id)
_enabled = not default_idp.get('enabled')
body = {'remote_ids': [uuid.uuid4().hex, uuid.uuid4().hex],
'description': uuid.uuid4().hex,
'enabled': _enabled}
body = {'identity_provider': body}
resp = self.patch(url, body=body)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
body = body['identity_provider']
for key in body.keys():
if isinstance(body[key], list):
self.assertEqual(sorted(body[key]),
sorted(updated_idp.get(key)))
else:
self.assertEqual(body[key], updated_idp.get(key))
resp = self.get(url)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
for key in body.keys():
if isinstance(body[key], list):
self.assertEqual(sorted(body[key]),
sorted(updated_idp.get(key)))
else:
self.assertEqual(body[key], updated_idp.get(key))
def test_update_idp_immutable_attributes(self):
"""Update IdP's immutable parameters.
Expect HTTP BAD REQUEST.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
body = self._http_idp_input()
body['id'] = uuid.uuid4().hex
body['protocols'] = [uuid.uuid4().hex, uuid.uuid4().hex]
url = self.base_url(suffix=idp_id)
self.patch(url, body={'identity_provider': body},
expected_status=http_client.BAD_REQUEST)
def test_update_nonexistent_idp(self):
"""Update nonexistent IdP.
Expect HTTP 404 Not Found code.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
body = self._http_idp_input()
body['enabled'] = False
body = {'identity_provider': body}
self.patch(url, body=body, expected_status=http_client.NOT_FOUND)
def test_assign_protocol_to_idp(self):
"""Assign a protocol to existing IdP."""
self._assign_protocol_to_idp(expected_status=http_client.CREATED)
def test_protocol_composite_pk(self):
"""Test that Keystone can add two entities.
The entities have identical names, however, attached to different
IdPs.
1. Add IdP and assign it protocol with predefined name
2. Add another IdP and assign it a protocol with same name.
Expect HTTP 201 code
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': http_client.CREATED}
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
def test_protocol_idp_pk_uniqueness(self):
"""Test whether Keystone checks for unique idp/protocol values.
Add same protocol twice, expect Keystone to reject a latter call and
return HTTP 409 Conflict code.
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': http_client.CREATED}
resp, idp_id, proto = self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
kwargs = {'expected_status': http_client.CONFLICT}
self._assign_protocol_to_idp(
idp_id=idp_id, proto='saml2', validate=False, url=url, **kwargs
)
def test_assign_protocol_to_nonexistent_idp(self):
"""Assign protocol to IdP that doesn't exist.
Expect HTTP 404 Not Found code.
"""
idp_id = uuid.uuid4().hex
kwargs = {'expected_status': http_client.NOT_FOUND}
self._assign_protocol_to_idp(proto='saml2',
idp_id=idp_id,
validate=False,
**kwargs)
def test_get_protocol(self):
"""Create and later fetch protocol tied to IdP."""
resp, idp_id, proto = self._assign_protocol_to_idp(
expected_status=http_client.CREATED)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')['id']
url = "%s/protocols/%s" % (idp_id, proto_id)
url = self.base_url(suffix=url)
resp = self.get(url)
reference = {'id': proto_id}
# Strip keys out of `body` dictionary. This is done
# to be python 3 compatible
reference_keys = list(reference)
self.assertValidResponse(resp, 'protocol',
dummy_validator,
keys_to_check=reference_keys,
ref=reference)
def test_list_protocols(self):
"""Create set of protocols and later list them.
Compare input and output id sets.
"""
resp, idp_id, proto = self._assign_protocol_to_idp(
expected_status=http_client.CREATED)
iterations = random.randint(0, 16)
protocol_ids = []
for _ in range(iterations):
resp, _, proto = self._assign_protocol_to_idp(
idp_id=idp_id,
expected_status=http_client.CREATED)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')
proto_id = proto_id['id']
protocol_ids.append(proto_id)
url = "%s/protocols" % idp_id
url = self.base_url(suffix=url)
resp = self.get(url)
self.assertValidListResponse(resp, 'protocols',
dummy_validator,
keys_to_check=['id'])
entities = self._fetch_attribute_from_response(resp, 'protocols')
entities = set([entity['id'] for entity in entities])
protocols_intersection = entities.intersection(protocol_ids)
self.assertEqual(protocols_intersection, set(protocol_ids))
def test_update_protocols_attribute(self):
"""Update protocol's attribute."""
resp, idp_id, proto = self._assign_protocol_to_idp(
expected_status=http_client.CREATED)
new_mapping_id = uuid.uuid4().hex
self._create_mapping(mapping_id=new_mapping_id)
url = "%s/protocols/%s" % (idp_id, proto)
url = self.base_url(suffix=url)
body = {'mapping_id': new_mapping_id}
resp = self.patch(url, body={'protocol': body})
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': new_mapping_id}
)
def test_delete_protocol(self):
"""Delete protocol.
Expect HTTP 404 Not Found code for the GET call after the protocol is
deleted.
"""
url = self.base_url(suffix='/%(idp_id)s/'
'protocols/%(protocol_id)s')
resp, idp_id, proto = self._assign_protocol_to_idp(
expected_status=http_client.CREATED)
url = url % {'idp_id': idp_id,
'protocol_id': proto}
self.delete(url)
self.get(url, expected_status=http_client.NOT_FOUND)
class MappingCRUDTests(test_v3.RestfulTestCase):
"""A class for testing CRUD operations for Mappings."""
MAPPING_URL = '/OS-FEDERATION/mappings/'
def assertValidMappingListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'mappings',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMappingResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'mapping',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMapping(self, entity, ref=None):
self.assertIsNotNone(entity.get('id'))
self.assertIsNotNone(entity.get('rules'))
if ref:
self.assertEqual(entity['rules'], ref['rules'])
return entity
def _create_default_mapping_entry(self):
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(url,
body={'mapping': mapping_fixtures.MAPPING_LARGE},
expected_status=http_client.CREATED)
return resp
def _get_id_from_response(self, resp):
r = resp.result.get('mapping')
return r.get('id')
def test_mapping_create(self):
resp = self._create_default_mapping_entry()
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_list(self):
url = self.MAPPING_URL
self._create_default_mapping_entry()
resp = self.get(url)
entities = resp.result.get('mappings')
self.assertIsNotNone(entities)
self.assertResponseStatus(resp, http_client.OK)
self.assertValidListLinks(resp.result.get('links'))
self.assertEqual(1, len(entities))
def test_mapping_delete(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': str(mapping_id)}
resp = self.delete(url)
self.assertResponseStatus(resp, http_client.NO_CONTENT)
self.get(url, expected_status=http_client.NOT_FOUND)
def test_mapping_get(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_update(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.patch(url,
body={'mapping': mapping_fixtures.MAPPING_SMALL})
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
def test_delete_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.delete(url, expected_status=http_client.NOT_FOUND)
def test_get_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.get(url, expected_status=http_client.NOT_FOUND)
def test_create_mapping_bad_requirements(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_BAD_REQ})
def test_create_mapping_no_rules(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_NO_RULES})
def test_create_mapping_no_remote_objects(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_NO_REMOTE})
def test_create_mapping_bad_value(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_BAD_VALUE})
def test_create_mapping_missing_local(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_MISSING_LOCAL})
def test_create_mapping_missing_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_MISSING_TYPE})
def test_create_mapping_wrong_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_WRONG_TYPE})
def test_create_mapping_extra_remote_properties_not_any_of(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_NOT_ANY_OF
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping})
def test_create_mapping_extra_remote_properties_any_one_of(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_ANY_ONE_OF
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping})
def test_create_mapping_extra_remote_properties_just_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_JUST_TYPE
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping})
def test_create_mapping_empty_map(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': {}})
def test_create_mapping_extra_rules_properties(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping_fixtures.MAPPING_EXTRA_RULES_PROPS})
def test_create_mapping_with_blacklist_and_whitelist(self):
"""Test for adding whitelist and blacklist in the rule.
Server should respond with HTTP 400 Bad Request error upon discovering
both ``whitelist`` and ``blacklist`` keywords in the same rule.
"""
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_AND_BLACKLIST
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping})
def test_create_mapping_with_local_user_and_local_domain(self):
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(
url,
body={
'mapping': mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN
},
expected_status=http_client.CREATED)
self.assertValidMappingResponse(
resp, mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN)
def test_create_mapping_with_ephemeral(self):
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(
url,
body={'mapping': mapping_fixtures.MAPPING_EPHEMERAL_USER},
expected_status=http_client.CREATED)
self.assertValidMappingResponse(
resp, mapping_fixtures.MAPPING_EPHEMERAL_USER)
def test_create_mapping_with_bad_user_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
# get a copy of a known good map
bad_mapping = copy.deepcopy(mapping_fixtures.MAPPING_EPHEMERAL_USER)
# now sabotage the user type
bad_mapping['rules'][0]['local'][0]['user']['type'] = uuid.uuid4().hex
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': bad_mapping})
def test_create_shadow_mapping_without_roles_fails(self):
"""Validate that mappings with projects contain roles when created."""
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(
url,
body={'mapping': mapping_fixtures.MAPPING_PROJECTS_WITHOUT_ROLES},
expected_status=http_client.BAD_REQUEST
)
def test_update_shadow_mapping_without_roles_fails(self):
"""Validate that mappings with projects contain roles when updated."""
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(
url,
body={'mapping': mapping_fixtures.MAPPING_PROJECTS},
expected_status=http_client.CREATED
)
self.assertValidMappingResponse(
resp, mapping_fixtures.MAPPING_PROJECTS
)
self.patch(
url,
body={'mapping': mapping_fixtures.MAPPING_PROJECTS_WITHOUT_ROLES},
expected_status=http_client.BAD_REQUEST
)
def test_create_shadow_mapping_without_name_fails(self):
"""Validate project mappings contain the project name when created."""
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(
url,
body={'mapping': mapping_fixtures.MAPPING_PROJECTS_WITHOUT_NAME},
expected_status=http_client.BAD_REQUEST
)
def test_update_shadow_mapping_without_name_fails(self):
"""Validate project mappings contain the project name when updated."""
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(
url,
body={'mapping': mapping_fixtures.MAPPING_PROJECTS},
expected_status=http_client.CREATED
)
self.assertValidMappingResponse(
resp, mapping_fixtures.MAPPING_PROJECTS
)
self.patch(
url,
body={'mapping': mapping_fixtures.MAPPING_PROJECTS_WITHOUT_NAME},
expected_status=http_client.BAD_REQUEST
)
class FederatedTokenTests(test_v3.RestfulTestCase, FederatedSetupMixin):
def auth_plugin_config_override(self):
methods = ['saml2', 'token']
super(FederatedTokenTests, self).auth_plugin_config_override(methods)
def setUp(self):
super(FederatedTokenTests, self).setUp()
self._notifications = []
def fake_saml_notify(action, request, user_id, group_ids,
identity_provider, protocol, token_id, outcome):
note = {
'action': action,
'user_id': user_id,
'identity_provider': identity_provider,
'protocol': protocol,
'send_notification_called': True}
self._notifications.append(note)
self.useFixture(fixtures.MockPatchObject(
notifications,
'send_saml_audit_notification',
fake_saml_notify))
def _assert_last_notify(self, action, identity_provider, protocol,
user_id=None):
self.assertTrue(self._notifications)
note = self._notifications[-1]
if user_id:
self.assertEqual(note['user_id'], user_id)
self.assertEqual(note['action'], action)
self.assertEqual(note['identity_provider'], identity_provider)
self.assertEqual(note['protocol'], protocol)
self.assertTrue(note['send_notification_called'])
def load_fixtures(self, fixtures):
super(FederatedTokenTests, self).load_fixtures(fixtures)
self.load_federation_sample_data()
def test_issue_unscoped_token_notify(self):
self._issue_unscoped_token()
self._assert_last_notify(self.ACTION, self.IDP, self.PROTOCOL)
def test_issue_unscoped_token(self):
r = self._issue_unscoped_token()
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
self.assertValidMappedUser(r.json['token'])
def test_issue_unscoped_token_disabled_idp(self):
"""Check if authentication works with disabled identity providers.
Test plan:
1) Disable default IdP
2) Try issuing unscoped token for that IdP
3) Expect server to forbid authentication
"""
enabled_false = {'enabled': False}
self.federation_api.update_idp(self.IDP, enabled_false)
self.assertRaises(exception.Forbidden,
self._issue_unscoped_token)
def test_issue_unscoped_token_group_names_in_mapping(self):
r = self._issue_unscoped_token(assertion='ANOTHER_CUSTOMER_ASSERTION')
ref_groups = set([self.group_customers['id'], self.group_admins['id']])
token_resp = r.json_body
token_groups = token_resp['token']['user']['OS-FEDERATION']['groups']
token_groups = set([group['id'] for group in token_groups])
self.assertEqual(ref_groups, token_groups)
def test_issue_unscoped_tokens_nonexisting_group(self):
self.assertRaises(exception.MappedGroupNotFound,
self._issue_unscoped_token,
assertion='ANOTHER_TESTER_ASSERTION')
def test_issue_unscoped_token_with_remote_no_attribute(self):
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR:
self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_with_remote(self):
self.config_fixture.config(group='federation',
remote_id_attribute=self.REMOTE_ID_ATTR)
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR:
self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_with_saml2_remote(self):
self.config_fixture.config(group='saml2',
remote_id_attribute=self.REMOTE_ID_ATTR)
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR:
self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_with_remote_different(self):
self.config_fixture.config(group='federation',
remote_id_attribute=self.REMOTE_ID_ATTR)
self.assertRaises(exception.Forbidden,
self._issue_unscoped_token,
idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR: uuid.uuid4().hex
})
def test_issue_unscoped_token_with_remote_default_overwritten(self):
"""Test that protocol remote_id_attribute has higher priority.
Make sure the parameter stored under ``protocol`` section has higher
priority over parameter from default ``federation`` configuration
section.
"""
self.config_fixture.config(group='saml2',
remote_id_attribute=self.REMOTE_ID_ATTR)
self.config_fixture.config(group='federation',
remote_id_attribute=uuid.uuid4().hex)
r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
environment={
self.REMOTE_ID_ATTR:
self.REMOTE_IDS[0]
})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_with_remote_unavailable(self):
self.config_fixture.config(group='federation',
remote_id_attribute=self.REMOTE_ID_ATTR)
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token,
idp=self.IDP_WITH_REMOTE,
environment={
uuid.uuid4().hex: uuid.uuid4().hex
})
def test_issue_unscoped_token_with_remote_user_as_empty_string(self):
# make sure that REMOTE_USER set as the empty string won't interfere
r = self._issue_unscoped_token(environment={'REMOTE_USER': ''})
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_no_groups(self):
r = self._issue_unscoped_token(assertion='USER_NO_GROUPS_ASSERTION')
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
token_resp = r.json_body
token_groups = token_resp['token']['user']['OS-FEDERATION']['groups']
self.assertEqual(0, len(token_groups))
def test_issue_unscoped_token_malformed_environment(self):
"""Test whether non string objects are filtered out.
Put non string objects into the environment, inject
correct assertion and try to get an unscoped token.
Expect server not to fail on using split() method on
non string objects and return token id in the HTTP header.
"""
api = auth_controllers.Auth()
environ = {
'malformed_object': object(),
'another_bad_idea': tuple(range(10)),
'yet_another_bad_param': dict(zip(uuid.uuid4().hex, range(32)))
}
environ.update(mapping_fixtures.EMPLOYEE_ASSERTION)
request = self.make_request(environ=environ)
r = api.authenticate_for_token(request, self.UNSCOPED_V3_SAML2_REQ)
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_scope_to_project_once_notify(self):
r = self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
user_id = r.json['token']['user']['id']
self._assert_last_notify(self.ACTION, self.IDP, self.PROTOCOL, user_id)
def test_scope_to_project_once(self):
r = self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self._check_project_scoped_token_attributes(token_resp, project_id)
roles_ref = [self.role_employee]
projects_ref = self.proj_employees
self._check_projects_and_roles(token_resp, roles_ref, projects_ref)
self.assertValidMappedUser(token_resp)
def test_scope_token_with_idp_disabled(self):
"""Scope token issued by disabled IdP.
Try scoping the token issued by an IdP which is disabled now. Expect
server to refuse scoping operation.
This test confirms correct behaviour when IdP was enabled and unscoped
token was issued, but disabled before user tries to scope the token.
Here we assume the unscoped token was already issued and start from
the moment where IdP is being disabled and unscoped token is being
used.
Test plan:
1) Disable IdP
2) Try scoping unscoped token
"""
enabled_false = {'enabled': False}
self.federation_api.update_idp(self.IDP, enabled_false)
self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER,
expected_status=http_client.FORBIDDEN)
def test_scope_to_bad_project(self):
"""Scope unscoped token with a project we don't have access to."""
self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER,
expected_status=http_client.UNAUTHORIZED)
def test_scope_to_project_multiple_times(self):
"""Try to scope the unscoped token multiple times.
The new tokens should be scoped to:
* Customers' project
* Employees' project
"""
bodies = (self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN,
self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN)
project_ids = (self.proj_employees['id'],
self.proj_customers['id'])
for body, project_id_ref in zip(bodies, project_ids):
r = self.v3_create_token(body)
token_resp = r.result['token']
self._check_project_scoped_token_attributes(token_resp,
project_id_ref)
def test_scope_to_project_with_only_inherited_roles(self):
"""Try to scope token whose only roles are inherited."""
r = self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_INHERITED_FROM_CUSTOMER)
token_resp = r.result['token']
self._check_project_scoped_token_attributes(
token_resp, self.project_inherited['id'])
roles_ref = [self.role_customer]
projects_ref = self.project_inherited
self._check_projects_and_roles(token_resp, roles_ref, projects_ref)
self.assertValidMappedUser(token_resp)
def test_scope_token_from_nonexistent_unscoped_token(self):
"""Try to scope token from non-existent unscoped token."""
self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN,
expected_status=http_client.NOT_FOUND)
def test_issue_token_from_rules_without_user(self):
api = auth_controllers.Auth()
environ = copy.deepcopy(mapping_fixtures.BAD_TESTER_ASSERTION)
request = self.make_request(environ=environ)
self.assertRaises(exception.Unauthorized,
api.authenticate_for_token,
request, self.UNSCOPED_V3_SAML2_REQ)
def test_issue_token_with_nonexistent_group(self):
"""Inject assertion that matches rule issuing bad group id.
Expect server to find out that some groups are missing in the
backend and raise exception.MappedGroupNotFound exception.
"""
self.assertRaises(exception.MappedGroupNotFound,
self._issue_unscoped_token,
assertion='CONTRACTOR_ASSERTION')
def test_scope_to_domain_once(self):
r = self.v3_create_token(self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER)
token_resp = r.result['token']
self._check_domain_scoped_token_attributes(token_resp,
self.domainA['id'])
def test_scope_to_domain_multiple_tokens(self):
"""Issue multiple tokens scoping to different domains.
The new tokens should be scoped to:
* domainA
* domainB
* domainC
"""
bodies = (self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN,
self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN,
self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN)
domain_ids = (self.domainA['id'],
self.domainB['id'],
self.domainC['id'])
for body, domain_id_ref in zip(bodies, domain_ids):
r = self.v3_create_token(body)
token_resp = r.result['token']
self._check_domain_scoped_token_attributes(token_resp,
domain_id_ref)
def test_scope_to_domain_with_only_inherited_roles_fails(self):
"""Try to scope to a domain that has no direct roles."""
self.v3_create_token(
self.TOKEN_SCOPE_DOMAIN_D_FROM_CUSTOMER,
expected_status=http_client.UNAUTHORIZED)
def test_list_projects(self):
urls = ('/OS-FEDERATION/projects', '/auth/projects')
token = (self.tokens['CUSTOMER_ASSERTION'],
self.tokens['EMPLOYEE_ASSERTION'],
self.tokens['ADMIN_ASSERTION'])
projects_refs = (set([self.proj_customers['id'],
self.project_inherited['id']]),
set([self.proj_employees['id'],
self.project_all['id']]),
set([self.proj_employees['id'],
self.project_all['id'],
self.proj_customers['id'],
self.project_inherited['id']]))
for token, projects_ref in zip(token, projects_refs):
for url in urls:
r = self.get(url, token=token)
projects_resp = r.result['projects']
projects = set(p['id'] for p in projects_resp)
self.assertEqual(projects_ref, projects,
'match failed for url %s' % url)
# TODO(samueldmq): Create another test class for role inheritance tests.
# The advantage would be to reduce the complexity of this test class and
# have tests specific to this functionality grouped, easing readability and
# maintenability.
def test_list_projects_for_inherited_project_assignment(self):
# Create a subproject
subproject_inherited = unit.new_project_ref(
domain_id=self.domainD['id'],
parent_id=self.project_inherited['id'])
self.resource_api.create_project(subproject_inherited['id'],
subproject_inherited)
# Create an inherited role assignment
self.assignment_api.create_grant(
role_id=self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.project_inherited['id'],
inherited_to_projects=True)
# Define expected projects from employee assertion, which contain
# the created subproject
expected_project_ids = [self.project_all['id'],
self.proj_employees['id'],
subproject_inherited['id']]
# Assert expected projects for both available URLs
for url in ('/OS-FEDERATION/projects', '/auth/projects'):
r = self.get(url, token=self.tokens['EMPLOYEE_ASSERTION'])
project_ids = [project['id'] for project in r.result['projects']]
self.assertEqual(len(expected_project_ids), len(project_ids))
for expected_project_id in expected_project_ids:
self.assertIn(expected_project_id, project_ids,
'Projects match failed for url %s' % url)
def test_list_domains(self):
urls = ('/OS-FEDERATION/domains', '/auth/domains')
tokens = (self.tokens['CUSTOMER_ASSERTION'],
self.tokens['EMPLOYEE_ASSERTION'],
self.tokens['ADMIN_ASSERTION'])
# NOTE(henry-nash): domain D does not appear in the expected results
# since it only had inherited roles (which only apply to projects
# within the domain)
domain_refs = (set([self.domainA['id']]),
set([self.domainA['id'],
self.domainB['id']]),
set([self.domainA['id'],
self.domainB['id'],
self.domainC['id']]))
for token, domains_ref in zip(tokens, domain_refs):
for url in urls:
r = self.get(url, token=token)
domains_resp = r.result['domains']
domains = set(p['id'] for p in domains_resp)
self.assertEqual(domains_ref, domains,
'match failed for url %s' % url)
def test_full_workflow(self):
"""Test 'standard' workflow for granting access tokens.
* Issue unscoped token
* List available projects based on groups
* Scope token to one of available projects
"""
r = self._issue_unscoped_token()
token_resp = r.json_body['token']
# NOTE(lbragstad): Ensure only 'saml2' is in the method list.
self.assertListEqual(['saml2'], token_resp['methods'])
self.assertValidMappedUser(token_resp)
employee_unscoped_token_id = r.headers.get('X-Subject-Token')
r = self.get('/auth/projects', token=employee_unscoped_token_id)
projects = r.result['projects']
random_project = random.randint(0, len(projects) - 1)
project = projects[random_project]
v3_scope_request = self._scope_request(employee_unscoped_token_id,
'project', project['id'])
r = self.v3_create_token(v3_scope_request)
token_resp = r.result['token']
self.assertIn('token', token_resp['methods'])
self.assertIn('saml2', token_resp['methods'])
self._check_project_scoped_token_attributes(token_resp, project['id'])
def test_workflow_with_groups_deletion(self):
"""Test full workflow with groups deletion before token scoping.
The test scenario is as follows:
- Create group ``group``
- Create and assign roles to ``group`` and ``project_all``
- Patch mapping rules for existing IdP so it issues group id
- Issue unscoped token with ``group``'s id
- Delete group ``group``
- Scope token to ``project_all``
- Expect HTTP 500 response
"""
# create group and role
group = unit.new_group_ref(domain_id=self.domainA['id'])
group = self.identity_api.create_group(group)
role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
# assign role to group and project_admins
self.assignment_api.create_grant(role['id'],
group_id=group['id'],
project_id=self.project_all['id'])
rules = {
'rules': [
{
'local': [
{
'group': {
'id': group['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'LastName',
'any_one_of': [
'Account'
]
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='TESTER_ASSERTION')
token_id = r.headers.get('X-Subject-Token')
# delete group
self.identity_api.delete_group(group['id'])
# scope token to project_all, expect HTTP 500
scoped_token = self._scope_request(
token_id, 'project',
self.project_all['id'])
self.v3_create_token(
scoped_token, expected_status=http_client.INTERNAL_SERVER_ERROR)
def test_lists_with_missing_group_in_backend(self):
"""Test a mapping that points to a group that does not exist.
For explicit mappings, we expect the group to exist in the backend,
but for lists, specifically blacklists, a missing group is expected
as many groups will be specified by the IdP that are not Keystone
groups.
The test scenario is as follows:
- Create group ``EXISTS``
- Set mapping rules for existing IdP with a blacklist
that passes through as REMOTE_USER_GROUPS
- Issue unscoped token with on group ``EXISTS`` id in it
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
group = unit.new_group_ref(domain_id=domain_id, name='EXISTS')
group = self.identity_api.create_group(group)
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
def test_empty_blacklist_passess_all_values(self):
"""Test a mapping with empty blacklist specified.
Not adding a ``blacklist`` keyword to the mapping rules has the same
effect as adding an empty ``blacklist``.
In both cases, the mapping engine will not discard any groups that are
associated with apache environment variables.
This test checks scenario where an empty blacklist was specified.
Expected result is to allow any value.
The test scenario is as follows:
- Create group ``EXISTS``
- Create group ``NO_EXISTS``
- Set mapping rules for existing IdP with a blacklist
that passes through as REMOTE_USER_GROUPS
- Issue unscoped token with groups ``EXISTS`` and ``NO_EXISTS``
assigned
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
# Add a group "EXISTS"
group_exists = unit.new_group_ref(domain_id=domain_id, name='EXISTS')
group_exists = self.identity_api.create_group(group_exists)
# Add a group "NO_EXISTS"
group_no_exists = unit.new_group_ref(domain_id=domain_id,
name='NO_EXISTS')
group_no_exists = self.identity_api.create_group(group_no_exists)
group_ids = set([group_exists['id'], group_no_exists['id']])
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
"blacklist": []
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION')
assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups']
self.assertEqual(len(group_ids), len(assigned_group_ids))
for group in assigned_group_ids:
self.assertIn(group['id'], group_ids)
def test_not_adding_blacklist_passess_all_values(self):
"""Test a mapping without blacklist specified.
Not adding a ``blacklist`` keyword to the mapping rules has the same
effect as adding an empty ``blacklist``. In both cases all values will
be accepted and passed.
This test checks scenario where an blacklist was not specified.
Expected result is to allow any value.
The test scenario is as follows:
- Create group ``EXISTS``
- Create group ``NO_EXISTS``
- Set mapping rules for existing IdP with a blacklist
that passes through as REMOTE_USER_GROUPS
- Issue unscoped token with on groups ``EXISTS`` and ``NO_EXISTS``
assigned
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
# Add a group "EXISTS"
group_exists = unit.new_group_ref(domain_id=domain_id,
name='EXISTS')
group_exists = self.identity_api.create_group(group_exists)
# Add a group "NO_EXISTS"
group_no_exists = unit.new_group_ref(domain_id=domain_id,
name='NO_EXISTS')
group_no_exists = self.identity_api.create_group(group_no_exists)
group_ids = set([group_exists['id'], group_no_exists['id']])
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION')
assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups']
self.assertEqual(len(group_ids), len(assigned_group_ids))
for group in assigned_group_ids:
self.assertIn(group['id'], group_ids)
def test_empty_whitelist_discards_all_values(self):
"""Test that empty whitelist blocks all the values.
Not adding a ``whitelist`` keyword to the mapping value is different
than adding empty whitelist. The former case will simply pass all the
values, whereas the latter would discard all the values.
This test checks scenario where an empty whitelist was specified.
The expected result is that no groups are matched.
The test scenario is as follows:
- Create group ``EXISTS``
- Set mapping rules for existing IdP with an empty whitelist
that whould discard any values from the assertion
- Try issuing unscoped token, no groups were matched and that the
federated user does not have any group assigned.
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
group = unit.new_group_ref(domain_id=domain_id, name='EXISTS')
group = self.identity_api.create_group(group)
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
"whitelist": []
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION')
assigned_groups = r.json['token']['user']['OS-FEDERATION']['groups']
self.assertEqual(len(assigned_groups), 0)
def test_not_setting_whitelist_accepts_all_values(self):
"""Test that not setting whitelist passes.
Not adding a ``whitelist`` keyword to the mapping value is different
than adding empty whitelist. The former case will simply pass all the
values, whereas the latter would discard all the values.
This test checks a scenario where a ``whitelist`` was not specified.
Expected result is that no groups are ignored.
The test scenario is as follows:
- Create group ``EXISTS``
- Set mapping rules for existing IdP with an empty whitelist
that whould discard any values from the assertion
- Issue an unscoped token and make sure ephemeral user is a member of
two groups.
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
# Add a group "EXISTS"
group_exists = unit.new_group_ref(domain_id=domain_id,
name='EXISTS')
group_exists = self.identity_api.create_group(group_exists)
# Add a group "NO_EXISTS"
group_no_exists = unit.new_group_ref(domain_id=domain_id,
name='NO_EXISTS')
group_no_exists = self.identity_api.create_group(group_no_exists)
group_ids = set([group_exists['id'], group_no_exists['id']])
rules = {
'rules': [
{
"local": [
{
"user": {
"name": "{0}",
"id": "{0}"
}
}
],
"remote": [
{
"type": "REMOTE_USER"
}
]
},
{
"local": [
{
"groups": "{0}",
"domain": {"name": domain_name}
}
],
"remote": [
{
"type": "REMOTE_USER_GROUPS",
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION')
assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups']
self.assertEqual(len(group_ids), len(assigned_group_ids))
for group in assigned_group_ids:
self.assertIn(group['id'], group_ids)
def test_assertion_prefix_parameter(self):
"""Test parameters filtering based on the prefix.
With ``assertion_prefix`` set to fixed, non default value,
issue an unscoped token from assertion EMPLOYEE_ASSERTION_PREFIXED.
Expect server to return unscoped token.
"""
self.config_fixture.config(group='federation',
assertion_prefix=self.ASSERTION_PREFIX)
r = self._issue_unscoped_token(assertion='EMPLOYEE_ASSERTION_PREFIXED')
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_assertion_prefix_parameter_expect_fail(self):
"""Test parameters filtering based on the prefix.
With ``assertion_prefix`` default value set to empty string
issue an unscoped token from assertion EMPLOYEE_ASSERTION.
Next, configure ``assertion_prefix`` to value ``UserName``.
Try issuing unscoped token with EMPLOYEE_ASSERTION.
Expect server to raise exception.Unathorized exception.
"""
r = self._issue_unscoped_token()
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
self.config_fixture.config(group='federation',
assertion_prefix='UserName')
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token)
def test_v2_auth_with_federation_token_fails(self):
"""Test that using a federation token with v2 auth fails.
If an admin sets up a federated Keystone environment, and a user
incorrectly configures a service (like Nova) to only use v2 auth, the
returned message should be informative.
"""
r = self._issue_unscoped_token()
token_id = r.headers.get('X-Subject-Token')
v2_token_controller = token_controller.Auth()
self.assertRaises(exception.Unauthorized,
v2_token_controller.validate_token,
self.make_request(is_admin=True),
token_id)
def test_unscoped_token_has_user_domain(self):
r = self._issue_unscoped_token()
self._check_domains_are_valid(r.json_body['token'])
def test_scoped_token_has_user_domain(self):
r = self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
self._check_domains_are_valid(r.result['token'])
def test_issue_unscoped_token_for_local_user(self):
r = self._issue_unscoped_token(assertion='LOCAL_USER_ASSERTION')
token_resp = r.json_body['token']
self.assertListEqual(['saml2'], token_resp['methods'])
self.assertEqual(self.user['id'], token_resp['user']['id'])
self.assertEqual(self.user['name'], token_resp['user']['name'])
self.assertEqual(self.domain['id'], token_resp['user']['domain']['id'])
# Make sure the token is not scoped
self.assertNotIn('project', token_resp)
self.assertNotIn('domain', token_resp)
def test_issue_token_for_local_user_user_not_found(self):
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token,
assertion='ANOTHER_LOCAL_USER_ASSERTION')
def test_user_name_and_id_in_federation_token(self):
r = self._issue_unscoped_token(assertion='EMPLOYEE_ASSERTION')
token = r.json_body['token']
self.assertEqual(
mapping_fixtures.EMPLOYEE_ASSERTION['UserName'],
token['user']['name'])
self.assertNotEqual(token['user']['name'], token['user']['id'])
r = self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
token = r.json_body['token']
self.assertEqual(
mapping_fixtures.EMPLOYEE_ASSERTION['UserName'],
token['user']['name'])
self.assertNotEqual(token['user']['name'], token['user']['id'])
class FernetFederatedTokenTests(test_v3.RestfulTestCase, FederatedSetupMixin):
AUTH_METHOD = 'token'
def load_fixtures(self, fixtures):
super(FernetFederatedTokenTests, self).load_fixtures(fixtures)
self.load_federation_sample_data()
def config_overrides(self):
super(FernetFederatedTokenTests, self).config_overrides()
self.config_fixture.config(group='token', provider='fernet')
self.useFixture(
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_tokens',
CONF.fernet_tokens.max_active_keys
)
)
def auth_plugin_config_override(self):
methods = ['saml2', 'token', 'password']
super(FernetFederatedTokenTests,
self).auth_plugin_config_override(methods)
def test_federated_unscoped_token(self):
resp = self._issue_unscoped_token()
self.assertEqual(204, len(resp.headers['X-Subject-Token']))
self.assertValidMappedUser(resp.json_body['token'])
def test_federated_unscoped_token_with_multiple_groups(self):
assertion = 'ANOTHER_CUSTOMER_ASSERTION'
resp = self._issue_unscoped_token(assertion=assertion)
self.assertEqual(226, len(resp.headers['X-Subject-Token']))
self.assertValidMappedUser(resp.json_body['token'])
def test_validate_federated_unscoped_token(self):
resp = self._issue_unscoped_token()
unscoped_token = resp.headers.get('X-Subject-Token')
# assert that the token we received is valid
self.get('/auth/tokens/', headers={'X-Subject-Token': unscoped_token})
def test_fernet_full_workflow(self):
"""Test 'standard' workflow for granting Fernet access tokens.
* Issue unscoped token
* List available projects based on groups
* Scope token to one of available projects
"""
resp = self._issue_unscoped_token()
self.assertValidMappedUser(resp.json_body['token'])
unscoped_token = resp.headers.get('X-Subject-Token')
resp = self.get('/auth/projects', token=unscoped_token)
projects = resp.result['projects']
random_project = random.randint(0, len(projects) - 1)
project = projects[random_project]
v3_scope_request = self._scope_request(unscoped_token,
'project', project['id'])
resp = self.v3_create_token(v3_scope_request)
token_resp = resp.result['token']
self._check_project_scoped_token_attributes(token_resp, project['id'])
class FederatedTokenTestsMethodToken(FederatedTokenTests):
"""Test federation operation with unified scoping auth method.
Test all the operations with auth method set to ``token`` as a new, unified
way for scoping all the tokens.
"""
AUTH_METHOD = 'token'
def auth_plugin_config_override(self):
methods = ['saml2', 'token']
super(FederatedTokenTests,
self).auth_plugin_config_override(methods)
@utils.wip('This will fail because of bug #1501032. The returned method'
'list should contain "saml2". This is documented in bug '
'1501032.')
def test_full_workflow(self):
"""Test 'standard' workflow for granting access tokens.
* Issue unscoped token
* List available projects based on groups
* Scope token to one of available projects
"""
r = self._issue_unscoped_token()
token_resp = r.json_body['token']
# NOTE(lbragstad): Ensure only 'saml2' is in the method list.
self.assertListEqual(['saml2'], token_resp['methods'])
self.assertValidMappedUser(token_resp)
employee_unscoped_token_id = r.headers.get('X-Subject-Token')
r = self.get('/auth/projects', token=employee_unscoped_token_id)
projects = r.result['projects']
random_project = random.randint(0, len(projects) - 1)
project = projects[random_project]
v3_scope_request = self._scope_request(employee_unscoped_token_id,
'project', project['id'])
r = self.v3_authenticate_token(v3_scope_request)
token_resp = r.result['token']
self.assertIn('token', token_resp['methods'])
self.assertIn('saml2', token_resp['methods'])
self._check_project_scoped_token_attributes(token_resp, project['id'])
class FederatedUserTests(test_v3.RestfulTestCase, FederatedSetupMixin):
"""Test for federated users.
Tests new shadow users functionality
"""
def auth_plugin_config_override(self):
methods = ['saml2', 'token']
super(FederatedUserTests, self).auth_plugin_config_override(methods)
def setUp(self):
super(FederatedUserTests, self).setUp()
def load_fixtures(self, fixtures):
super(FederatedUserTests, self).load_fixtures(fixtures)
self.load_federation_sample_data()
def test_user_id_persistense(self):
"""Ensure user_id is persistend for multiple federated authn calls."""
r = self._issue_unscoped_token()
user_id = r.json_body['token']['user']['id']
self.assertNotEmpty(self.identity_api.get_user(user_id))
r = self._issue_unscoped_token()
user_id2 = r.json_body['token']['user']['id']
self.assertNotEmpty(self.identity_api.get_user(user_id2))
self.assertEqual(user_id, user_id2)
def test_user_role_assignment(self):
# create project and role
project_ref = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id)
self.resource_api.create_project(project_ref['id'], project_ref)
role_ref = unit.new_role_ref()
self.role_api.create_role(role_ref['id'], role_ref)
# authenticate via saml get back a user id
user_id, unscoped_token = self._authenticate_via_saml()
# exchange an unscoped token for a scoped token; resulting in
# unauthorized because the user doesn't have any role assignments
v3_scope_request = self._scope_request(unscoped_token, 'project',
project_ref['id'])
r = self.v3_create_token(v3_scope_request,
expected_status=http_client.UNAUTHORIZED)
# assign project role to federated user
self.assignment_api.add_role_to_user_and_project(
user_id, project_ref['id'], role_ref['id'])
# exchange an unscoped token for a scoped token
r = self.v3_create_token(v3_scope_request,
expected_status=http_client.CREATED)
scoped_token = r.headers['X-Subject-Token']
# ensure user can access resource based on role assignment
path = '/projects/%(project_id)s' % {'project_id': project_ref['id']}
r = self.v3_request(path=path, method='GET',
expected_status=http_client.OK,
token=scoped_token)
self.assertValidProjectResponse(r, project_ref)
# create a 2nd project
project_ref2 = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id)
self.resource_api.create_project(project_ref2['id'], project_ref2)
# ensure the user cannot access the 2nd resource (forbidden)
path = '/projects/%(project_id)s' % {'project_id': project_ref2['id']}
r = self.v3_request(path=path, method='GET',
expected_status=http_client.FORBIDDEN,
token=scoped_token)
def test_domain_scoped_user_role_assignment(self):
# create domain and role
domain_ref = unit.new_domain_ref()
self.resource_api.create_domain(domain_ref['id'], domain_ref)
role_ref = unit.new_role_ref()
self.role_api.create_role(role_ref['id'], role_ref)
# authenticate via saml get back a user id
user_id, unscoped_token = self._authenticate_via_saml()
# exchange an unscoped token for a scoped token; resulting in
# unauthorized because the user doesn't have any role assignments
v3_scope_request = self._scope_request(unscoped_token, 'domain',
domain_ref['id'])
r = self.v3_create_token(v3_scope_request,
expected_status=http_client.UNAUTHORIZED)
# assign domain role to user
self.assignment_api.create_grant(role_ref['id'],
user_id=user_id,
domain_id=domain_ref['id'])
# exchange an unscoped token for domain scoped token and test
r = self.v3_create_token(v3_scope_request,
expected_status=http_client.CREATED)
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
token_resp = r.result['token']
self.assertIn('domain', token_resp)
def test_auth_projects_matches_federation_projects(self):
# create project and role
project_ref = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id)
self.resource_api.create_project(project_ref['id'], project_ref)
role_ref = unit.new_role_ref()
self.role_api.create_role(role_ref['id'], role_ref)
# authenticate via saml get back a user id
user_id, unscoped_token = self._authenticate_via_saml()
# assign project role to federated user
self.assignment_api.add_role_to_user_and_project(
user_id, project_ref['id'], role_ref['id'])
# get auth projects
r = self.get('/auth/projects', token=unscoped_token)
auth_projects = r.result['projects']
# get federation projects
r = self.get('/OS-FEDERATION/projects', token=unscoped_token)
fed_projects = r.result['projects']
# compare
self.assertItemsEqual(auth_projects, fed_projects)
def test_auth_projects_matches_federation_projects_with_group_assign(self):
# create project, role, group
domain_id = CONF.identity.default_domain_id
project_ref = unit.new_project_ref(domain_id=domain_id)
self.resource_api.create_project(project_ref['id'], project_ref)
role_ref = unit.new_role_ref()
self.role_api.create_role(role_ref['id'], role_ref)
group_ref = unit.new_group_ref(domain_id=domain_id)
group_ref = self.identity_api.create_group(group_ref)
# authenticate via saml get back a user id
user_id, unscoped_token = self._authenticate_via_saml()
# assign role to group at project
self.assignment_api.create_grant(role_ref['id'],
group_id=group_ref['id'],
project_id=project_ref['id'],
domain_id=domain_id)
# add user to group
self.identity_api.add_user_to_group(user_id=user_id,
group_id=group_ref['id'])
# get auth projects
r = self.get('/auth/projects', token=unscoped_token)
auth_projects = r.result['projects']
# get federation projects
r = self.get('/OS-FEDERATION/projects', token=unscoped_token)
fed_projects = r.result['projects']
# compare
self.assertItemsEqual(auth_projects, fed_projects)
def test_auth_domains_matches_federation_domains(self):
# create domain and role
domain_ref = unit.new_domain_ref()
self.resource_api.create_domain(domain_ref['id'], domain_ref)
role_ref = unit.new_role_ref()
self.role_api.create_role(role_ref['id'], role_ref)
# authenticate via saml get back a user id and token
user_id, unscoped_token = self._authenticate_via_saml()
# assign domain role to user
self.assignment_api.create_grant(role_ref['id'],
user_id=user_id,
domain_id=domain_ref['id'])
# get auth domains
r = self.get('/auth/domains', token=unscoped_token)
auth_domains = r.result['domains']
# get federation domains
r = self.get('/OS-FEDERATION/domains', token=unscoped_token)
fed_domains = r.result['domains']
# compare
self.assertItemsEqual(auth_domains, fed_domains)
def test_auth_domains_matches_federation_domains_with_group_assign(self):
# create role, group, and domain
domain_ref = unit.new_domain_ref()
self.resource_api.create_domain(domain_ref['id'], domain_ref)
role_ref = unit.new_role_ref()
self.role_api.create_role(role_ref['id'], role_ref)
group_ref = unit.new_group_ref(domain_id=domain_ref['id'])
group_ref = self.identity_api.create_group(group_ref)
# authenticate via saml get back a user id and token
user_id, unscoped_token = self._authenticate_via_saml()
# assign domain role to group
self.assignment_api.create_grant(role_ref['id'],
group_id=group_ref['id'],
domain_id=domain_ref['id'])
# add user to group
self.identity_api.add_user_to_group(user_id=user_id,
group_id=group_ref['id'])
# get auth domains
r = self.get('/auth/domains', token=unscoped_token)
auth_domains = r.result['domains']
# get federation domains
r = self.get('/OS-FEDERATION/domains', token=unscoped_token)
fed_domains = r.result['domains']
# compare
self.assertItemsEqual(auth_domains, fed_domains)
def test_list_domains_for_user_duplicates(self):
# create role
role_ref = unit.new_role_ref()
self.role_api.create_role(role_ref['id'], role_ref)
# authenticate via saml get back a user id and token
user_id, unscoped_token = self._authenticate_via_saml()
# get federation group domains
r = self.get('/OS-FEDERATION/domains', token=unscoped_token)
group_domains = r.result['domains']
domain_from_group = group_domains[0]
# assign group domain and role to user, this should create a
# duplicate domain
self.assignment_api.create_grant(role_ref['id'],
user_id=user_id,
domain_id=domain_from_group['id'])
# get user domains and test for duplicates
r = self.get('/OS-FEDERATION/domains', token=unscoped_token)
user_domains = r.result['domains']
user_domain_ids = []
for domain in user_domains:
self.assertNotIn(domain['id'], user_domain_ids)
user_domain_ids.append(domain['id'])
def test_list_projects_for_user_duplicates(self):
# create role
role_ref = unit.new_role_ref()
self.role_api.create_role(role_ref['id'], role_ref)
# authenticate via saml get back a user id and token
user_id, unscoped_token = self._authenticate_via_saml()
# get federation group projects
r = self.get('/OS-FEDERATION/projects', token=unscoped_token)
group_projects = r.result['projects']
project_from_group = group_projects[0]
# assign group project and role to user, this should create a
# duplicate project
self.assignment_api.add_role_to_user_and_project(
user_id, project_from_group['id'], role_ref['id'])
# get user projects and test for duplicates
r = self.get('/OS-FEDERATION/projects', token=unscoped_token)
user_projects = r.result['projects']
user_project_ids = []
for project in user_projects:
self.assertNotIn(project['id'], user_project_ids)
user_project_ids.append(project['id'])
def test_delete_protocol_after_federated_authentication(self):
# Create a protocol
protocol = self.proto_ref(mapping_id=self.mapping['id'])
self.federation_api.create_protocol(
self.IDP, protocol['id'], protocol)
# Authenticate to create a new federated_user entry with a foreign
# key pointing to the protocol
r = self._issue_unscoped_token()
user_id = r.json_body['token']['user']['id']
self.assertNotEmpty(self.identity_api.get_user(user_id))
# Now we should be able to delete the protocol
self.federation_api.delete_protocol(self.IDP, protocol['id'])
def _authenticate_via_saml(self):
r = self._issue_unscoped_token()
unscoped_token = r.headers['X-Subject-Token']
token_resp = r.json_body['token']
self.assertValidMappedUser(token_resp)
return token_resp['user']['id'], unscoped_token
class ShadowMappingTests(test_v3.RestfulTestCase, FederatedSetupMixin):
"""Test class dedicated to auto-provisioning resources at login.
A shadow mapping is a mapping that contains extra properties about that
specific federated user's situation based on attributes from the assertion.
For example, a shadow mapping can tell us that a user should have specific
role assignments on certain projects within a domain. When a federated user
authenticates, the shadow mapping will create these entities before
returning the authenticated response to the user. This test class is
dedicated to testing specific aspects of shadow mapping when performing
federated authentication.
"""
def setUp(self):
super(ShadowMappingTests, self).setUp()
# update the mapping we have already setup to have specific projects
# and roles.
self.federation_api.update_mapping(
self.mapping['id'],
mapping_fixtures.MAPPING_PROJECTS
)
# The shadow mapping we're using in these tests contain a role named
# `member` and `observer` for the sake of using something other than
# `admin`. We'll need to create those before hand, otherwise the
# mapping will fail during authentication because the roles defined in
# the mapping do not exist yet. The shadow mapping mechanism currently
# doesn't support creating roles on-the-fly, but this could change in
# the future after we get some feedback from shadow mapping being used
# in real deployments. We also want to make sure we are dealing with
# global roles and not domain-scoped roles. We have specific tests
# below that test that behavior and the setup is done in the test.
member_role_ref = unit.new_role_ref(name='member')
assert member_role_ref['domain_id'] is None
self.member_role = self.role_api.create_role(
member_role_ref['id'], member_role_ref
)
observer_role_ref = unit.new_role_ref(name='observer')
assert observer_role_ref['domain_id'] is None
self.observer_role = self.role_api.create_role(
observer_role_ref['id'], observer_role_ref
)
# This is a mapping of the project name to the role that is supposed to
# be assigned to the user on that project from the shadow mapping.
self.expected_results = {
'Production': 'observer',
'Staging': 'member',
'Project for tbo': 'admin'
}
def auth_plugin_config_override(self):
methods = ['saml2', 'token']
super(ShadowMappingTests, self).auth_plugin_config_override(methods)
def load_fixtures(self, fixtures):
super(ShadowMappingTests, self).load_fixtures(fixtures)
self.load_federation_sample_data()
def test_shadow_mapping_creates_projects(self):
projects = self.resource_api.list_projects()
for project in projects:
self.assertNotIn(project['name'], self.expected_results)
response = self._issue_unscoped_token()
self.assertValidMappedUser(response.json_body['token'])
unscoped_token = response.headers.get('X-Subject-Token')
response = self.get('/auth/projects', token=unscoped_token)
projects = response.json_body['projects']
for project in projects:
project = self.resource_api.get_project_by_name(
project['name'],
self.idp['domain_id']
)
self.assertIn(project['name'], self.expected_results)
def test_shadow_mapping_create_projects_role_assignments(self):
response = self._issue_unscoped_token()
self.assertValidMappedUser(response.json_body['token'])
unscoped_token = response.headers.get('X-Subject-Token')
response = self.get('/auth/projects', token=unscoped_token)
projects = response.json_body['projects']
for project in projects:
# Ask for a scope token to each project in the mapping. Each token
# should contain a different role so let's check that is right,
# too.
scope = self._scope_request(
unscoped_token, 'project', project['id']
)
response = self.v3_create_token(scope)
project_name = response.json_body['token']['project']['name']
roles = response.json_body['token']['roles']
self.assertEqual(
self.expected_results[project_name], roles[0]['name']
)
def test_shadow_mapping_does_not_create_roles(self):
# If a role required by the mapping does not exist, then we should fail
# the mapping since shadow mapping currently does not support creating
# mappings on-the-fly.
self.role_api.delete_role(self.observer_role['id'])
self.assertRaises(exception.RoleNotFound, self._issue_unscoped_token)
def test_shadow_mapping_creates_project_in_identity_provider_domain(self):
response = self._issue_unscoped_token()
self.assertValidMappedUser(response.json_body['token'])
unscoped_token = response.headers.get('X-Subject-Token')
response = self.get('/auth/projects', token=unscoped_token)
projects = response.json_body['projects']
for project in projects:
self.assertEqual(project['domain_id'], self.idp['domain_id'])
def test_shadow_mapping_is_idempotent(self):
"""Test that projects remain idempotent for every federated auth."""
response = self._issue_unscoped_token()
self.assertValidMappedUser(response.json_body['token'])
unscoped_token = response.headers.get('X-Subject-Token')
response = self.get('/auth/projects', token=unscoped_token)
project_ids = [p['id'] for p in response.json_body['projects']]
response = self._issue_unscoped_token()
unscoped_token = response.headers.get('X-Subject-Token')
response = self.get('/auth/projects', token=unscoped_token)
projects = response.json_body['projects']
for project in projects:
self.assertIn(project['id'], project_ids)
def test_roles_outside_idp_domain_fail_mapping(self):
# Create a new domain
d = unit.new_domain_ref()
new_domain = self.resource_api.create_domain(d['id'], d)
# Delete the member role and recreate it in a different domain
self.role_api.delete_role(self.member_role['id'])
member_role_ref = unit.new_role_ref(
name='member',
domain_id=new_domain['id']
)
self.role_api.create_role(member_role_ref['id'], member_role_ref)
self.assertRaises(
exception.DomainSpecificRoleNotWithinIdPDomain,
self._issue_unscoped_token
)
def test_roles_in_idp_domain_can_be_assigned_from_mapping(self):
# Delete the member role and recreate it in the domain of the idp
self.role_api.delete_role(self.member_role['id'])
member_role_ref = unit.new_role_ref(
name='member',
domain_id=self.idp['domain_id']
)
self.role_api.create_role(member_role_ref['id'], member_role_ref)
response = self._issue_unscoped_token()
user_id = response.json_body['token']['user']['id']
unscoped_token = response.headers.get('X-Subject-Token')
response = self.get('/auth/projects', token=unscoped_token)
projects = response.json_body['projects']
staging_project = self.resource_api.get_project_by_name(
'Staging', self.idp['domain_id']
)
for project in projects:
# Even though the mapping successfully assigned the Staging project
# a member role for our user, the /auth/projects response doesn't
# include projects with only domain-specific role assignments.
self.assertNotEqual(project['name'], 'Staging')
domain_role_assignments = self.assignment_api.list_role_assignments(
user_id=user_id,
project_id=staging_project['id'],
strip_domain_roles=False
)
self.assertEqual(
staging_project['id'], domain_role_assignments[0]['project_id']
)
self.assertEqual(
user_id, domain_role_assignments[0]['user_id']
)
def test_mapping_with_groups_includes_projects_with_group_assignment(self):
# create a group called Observers
observer_group = unit.new_group_ref(
domain_id=self.idp['domain_id'],
name='Observers'
)
observer_group = self.identity_api.create_group(observer_group)
# make sure the Observers group has a role on the finance project
finance_project = unit.new_project_ref(
domain_id=self.idp['domain_id'],
name='Finance'
)
finance_project = self.resource_api.create_project(
finance_project['id'], finance_project
)
self.assignment_api.create_grant(
self.observer_role['id'],
group_id=observer_group['id'],
project_id=finance_project['id']
)
# update the mapping
group_rule = {
'group': {
'name': 'Observers',
'domain': {
'id': self.idp['domain_id']
}
}
}
updated_mapping = copy.deepcopy(mapping_fixtures.MAPPING_PROJECTS)
updated_mapping['rules'][0]['local'].append(group_rule)
self.federation_api.update_mapping(self.mapping['id'], updated_mapping)
response = self._issue_unscoped_token()
# user_id = response.json_body['token']['user']['id']
unscoped_token = response.headers.get('X-Subject-Token')
response = self.get('/auth/projects', token=unscoped_token)
projects = response.json_body['projects']
self.expected_results = {
# These assignments are all a result of a direct mapping from the
# shadow user to the newly created project.
'Production': 'observer',
'Staging': 'member',
'Project for tbo': 'admin',
# This is a result of the mapping engine maintaining its old
# behavior.
'Finance': 'observer'
}
for project in projects:
# Ask for a scope token to each project in the mapping. Each token
# should contain a different role so let's check that is right,
# too.
scope = self._scope_request(
unscoped_token, 'project', project['id']
)
response = self.v3_create_token(scope)
project_name = response.json_body['token']['project']['name']
roles = response.json_body['token']['roles']
self.assertEqual(
self.expected_results[project_name], roles[0]['name']
)
class JsonHomeTests(test_v3.RestfulTestCase, test_v3.JsonHomeTestMixin):
JSON_HOME_DATA = {
'https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION'
'/1.0/rel/identity_provider': {
'href-template': '/OS-FEDERATION/identity_providers/{idp_id}',
'href-vars': {
'idp_id': 'https://docs.openstack.org/api/openstack-identity/3'
'/ext/OS-FEDERATION/1.0/param/idp_id'
},
},
}
def _is_xmlsec1_installed():
p = subprocess.Popen(
['which', 'xmlsec1'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# invert the return code
return not bool(p.wait())
def _load_xml(filename):
with open(os.path.join(XMLDIR, filename), 'r') as xml:
return xml.read()
class SAMLGenerationTests(test_v3.RestfulTestCase):
SP_AUTH_URL = ('http://beta.com:5000/v3/OS-FEDERATION/identity_providers'
'/BETA/protocols/saml2/auth')
ASSERTION_FILE = 'signed_saml2_assertion.xml'
# The values of the following variables match the attributes values found
# in ASSERTION_FILE
ISSUER = 'https://acme.com/FIM/sps/openstack/saml20'
RECIPIENT = 'http://beta.com/Shibboleth.sso/SAML2/POST'
SUBJECT = 'test_user'
SUBJECT_DOMAIN = 'user_domain'
ROLES = ['admin', 'member']
PROJECT = 'development'
PROJECT_DOMAIN = 'project_domain'
SAML_GENERATION_ROUTE = '/auth/OS-FEDERATION/saml2'
ECP_GENERATION_ROUTE = '/auth/OS-FEDERATION/saml2/ecp'
ASSERTION_VERSION = "2.0"
SERVICE_PROVDIER_ID = 'ACME'
def sp_ref(self):
ref = {
'auth_url': self.SP_AUTH_URL,
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': self.RECIPIENT,
'relay_state_prefix': CONF.saml.relay_state_prefix,
}
return ref
def setUp(self):
super(SAMLGenerationTests, self).setUp()
self.signed_assertion = saml2.create_class_from_xml_string(
saml.Assertion, _load_xml(self.ASSERTION_FILE))
self.sp = self.sp_ref()
url = '/OS-FEDERATION/service_providers/' + self.SERVICE_PROVDIER_ID
self.put(url, body={'service_provider': self.sp},
expected_status=http_client.CREATED)
def test_samlize_token_values(self):
"""Test the SAML generator produces a SAML object.
Test the SAML generator directly by passing known arguments, the result
should be a SAML object that consistently includes attributes based on
the known arguments that were passed in.
"""
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT,
self.SUBJECT_DOMAIN,
self.ROLES, self.PROJECT,
self.PROJECT_DOMAIN)
assertion = response.assertion
self.assertIsNotNone(assertion)
self.assertIsInstance(assertion, saml.Assertion)
issuer = response.issuer
self.assertEqual(self.RECIPIENT, response.destination)
self.assertEqual(self.ISSUER, issuer.text)
user_attribute = assertion.attribute_statement[0].attribute[0]
self.assertEqual(self.SUBJECT, user_attribute.attribute_value[0].text)
user_domain_attribute = (
assertion.attribute_statement[0].attribute[1])
self.assertEqual(self.SUBJECT_DOMAIN,
user_domain_attribute.attribute_value[0].text)
role_attribute = assertion.attribute_statement[0].attribute[2]
for attribute_value in role_attribute.attribute_value:
self.assertIn(attribute_value.text, self.ROLES)
project_attribute = assertion.attribute_statement[0].attribute[3]
self.assertEqual(self.PROJECT,
project_attribute.attribute_value[0].text)
project_domain_attribute = (
assertion.attribute_statement[0].attribute[4])
self.assertEqual(self.PROJECT_DOMAIN,
project_domain_attribute.attribute_value[0].text)
def test_comma_in_certfile_path(self):
self.config_fixture.config(
group='saml',
certfile=CONF.saml.certfile + ',')
generator = keystone_idp.SAMLGenerator()
self.assertRaises(
exception.UnexpectedError,
generator.samlize_token,
self.ISSUER,
self.RECIPIENT,
self.SUBJECT,
self.SUBJECT_DOMAIN,
self.ROLES,
self.PROJECT,
self.PROJECT_DOMAIN)
def test_comma_in_keyfile_path(self):
self.config_fixture.config(
group='saml',
keyfile=CONF.saml.keyfile + ',')
generator = keystone_idp.SAMLGenerator()
self.assertRaises(
exception.UnexpectedError,
generator.samlize_token,
self.ISSUER,
self.RECIPIENT,
self.SUBJECT,
self.SUBJECT_DOMAIN,
self.ROLES,
self.PROJECT,
self.PROJECT_DOMAIN)
def test_verify_assertion_object(self):
"""Test that the Assertion object is built properly.
The Assertion doesn't need to be signed in this test, so
_sign_assertion method is patched and doesn't alter the assertion.
"""
with mock.patch.object(keystone_idp, '_sign_assertion',
side_effect=lambda x: x):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT,
self.SUBJECT_DOMAIN,
self.ROLES, self.PROJECT,
self.PROJECT_DOMAIN)
assertion = response.assertion
self.assertEqual(self.ASSERTION_VERSION, assertion.version)
def test_valid_saml_xml(self):
"""Test the generated SAML object can become valid XML.
Test the generator directly by passing known arguments, the result
should be a SAML object that consistently includes attributes based on
the known arguments that were passed in.
"""
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT,
self.SUBJECT_DOMAIN,
self.ROLES, self.PROJECT,
self.PROJECT_DOMAIN)
saml_str = response.to_string()
response = etree.fromstring(saml_str)
issuer = response[0]
assertion = response[2]
self.assertEqual(self.RECIPIENT, response.get('Destination'))
self.assertEqual(self.ISSUER, issuer.text)
user_attribute = assertion[4][0]
self.assertEqual(self.SUBJECT, user_attribute[0].text)
user_domain_attribute = assertion[4][1]
self.assertEqual(self.SUBJECT_DOMAIN, user_domain_attribute[0].text)
role_attribute = assertion[4][2]
for attribute_value in role_attribute:
self.assertIn(attribute_value.text, self.ROLES)
project_attribute = assertion[4][3]
self.assertEqual(self.PROJECT, project_attribute[0].text)
project_domain_attribute = assertion[4][4]
self.assertEqual(self.PROJECT_DOMAIN, project_domain_attribute[0].text)
def test_assertion_using_explicit_namespace_prefixes(self):
def mocked_subprocess_check_output(*popenargs, **kwargs):
# the last option is the assertion file to be signed
filename = popenargs[0][-1]
with open(filename, 'r') as f:
assertion_content = f.read()
# since we are not testing the signature itself, we can return
# the assertion as is without signing it
return assertion_content
with mock.patch.object(subprocess, 'check_output',
side_effect=mocked_subprocess_check_output):
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT,
self.SUBJECT_DOMAIN,
self.ROLES, self.PROJECT,
self.PROJECT_DOMAIN)
assertion_xml = response.assertion.to_string()
# The expected values in the assertions bellow need to be 'str' in
# Python 2 and 'bytes' in Python 3
# make sure we have the proper tag and prefix for the assertion
# namespace
self.assertIn(b'<saml:Assertion', assertion_xml)
self.assertIn(
('xmlns:saml="' + saml2.NAMESPACE + '"').encode('utf-8'),
assertion_xml)
self.assertIn(
('xmlns:xmldsig="' + xmldsig.NAMESPACE).encode('utf-8'),
assertion_xml)
def test_saml_signing(self):
"""Test that the SAML generator produces a SAML object.
Test the SAML generator directly by passing known arguments, the result
should be a SAML object that consistently includes attributes based on
the known arguments that were passed in.
"""
if not _is_xmlsec1_installed():
self.skipTest('xmlsec1 is not installed')
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
self.SUBJECT, self.SUBJECT_DOMAIN,
self.ROLES, self.PROJECT,
self.PROJECT_DOMAIN)
signature = response.assertion.signature
self.assertIsNotNone(signature)
self.assertIsInstance(signature, xmldsig.Signature)
idp_public_key = sigver.read_cert_from_file(CONF.saml.certfile, 'pem')
cert_text = signature.key_info.x509_data[0].x509_certificate.text
# NOTE(stevemar): Rather than one line of text, the certificate is
# printed with newlines for readability, we remove these so we can
# match it with the key that we used.
cert_text = cert_text.replace(os.linesep, '')
self.assertEqual(idp_public_key, cert_text)
def _create_generate_saml_request(self, token_id, sp_id):
return {
"auth": {
"identity": {
"methods": [
"token"
],
"token": {
"id": token_id
}
},
"scope": {
"service_provider": {
"id": sp_id
}
}
}
}
def _fetch_valid_token(self):
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
resp = self.v3_create_token(auth_data)
token_id = resp.headers.get('X-Subject-Token')
return token_id
def _fetch_domain_scoped_token(self):
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
user_domain_id=self.domain['id'])
resp = self.v3_create_token(auth_data)
token_id = resp.headers.get('X-Subject-Token')
return token_id
def test_not_project_scoped_token(self):
"""Ensure SAML generation fails when passing domain-scoped tokens.
The server should return a 403 Forbidden Action.
"""
self.config_fixture.config(group='saml', idp_entity_id=self.ISSUER)
token_id = self._fetch_domain_scoped_token()
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.FORBIDDEN)
def test_generate_saml_route(self):
"""Test that the SAML generation endpoint produces XML.
The SAML endpoint /v3/auth/OS-FEDERATION/saml2 should take as input,
a scoped token ID, and a Service Provider ID.
The controller should fetch details about the user from the token,
and details about the service provider from its ID.
This should be enough information to invoke the SAML generator and
provide a valid SAML (XML) document back.
"""
self.config_fixture.config(group='saml', idp_entity_id=self.ISSUER)
token_id = self._fetch_valid_token()
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
http_response = self.post(self.SAML_GENERATION_ROUTE, body=body,
response_content_type='text/xml',
expected_status=http_client.OK)
response = etree.fromstring(http_response.result)
issuer = response[0]
assertion = response[2]
self.assertEqual(self.RECIPIENT, response.get('Destination'))
self.assertEqual(self.ISSUER, issuer.text)
# NOTE(stevemar): We should test this against expected values,
# but the self.xyz attribute names are uuids, and we mock out
# the result. Ideally we should update the mocked result with
# some known data, and create the roles/project/user before
# these tests run.
user_attribute = assertion[4][0]
self.assertIsInstance(user_attribute[0].text, str)
user_domain_attribute = assertion[4][1]
self.assertIsInstance(user_domain_attribute[0].text, str)
role_attribute = assertion[4][2]
self.assertIsInstance(role_attribute[0].text, str)
project_attribute = assertion[4][3]
self.assertIsInstance(project_attribute[0].text, str)
project_domain_attribute = assertion[4][4]
self.assertIsInstance(project_domain_attribute[0].text, str)
def test_invalid_scope_body(self):
"""Test that missing the scope in request body raises an exception.
Raises exception.SchemaValidationError() - error 400 Bad Request
"""
token_id = uuid.uuid4().hex
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
del body['auth']['scope']
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.BAD_REQUEST)
def test_invalid_token_body(self):
"""Test that missing the token in request body raises an exception.
Raises exception.SchemaValidationError() - error 400 Bad Request
"""
token_id = uuid.uuid4().hex
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
del body['auth']['identity']['token']
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.BAD_REQUEST)
def test_sp_not_found(self):
"""Test SAML generation with an invalid service provider ID.
Raises exception.ServiceProviderNotFound() - error Not Found 404
"""
sp_id = uuid.uuid4().hex
token_id = self._fetch_valid_token()
body = self._create_generate_saml_request(token_id, sp_id)
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.NOT_FOUND)
def test_sp_disabled(self):
"""Try generating assertion for disabled Service Provider."""
# Disable Service Provider
sp_ref = {'enabled': False}
self.federation_api.update_sp(self.SERVICE_PROVDIER_ID, sp_ref)
token_id = self._fetch_valid_token()
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.FORBIDDEN)
def test_token_not_found(self):
"""Test that an invalid token in the request body raises an exception.
Raises exception.TokenNotFound() - error Not Found 404
"""
token_id = uuid.uuid4().hex
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
self.post(self.SAML_GENERATION_ROUTE, body=body,
expected_status=http_client.NOT_FOUND)
def test_generate_ecp_route(self):
"""Test that the ECP generation endpoint produces XML.
The ECP endpoint /v3/auth/OS-FEDERATION/saml2/ecp should take the same
input as the SAML generation endpoint (scoped token ID + Service
Provider ID).
The controller should return a SAML assertion that is wrapped in a
SOAP envelope.
"""
self.config_fixture.config(group='saml', idp_entity_id=self.ISSUER)
token_id = self._fetch_valid_token()
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
with mock.patch.object(keystone_idp, '_sign_assertion',
return_value=self.signed_assertion):
http_response = self.post(self.ECP_GENERATION_ROUTE, body=body,
response_content_type='text/xml',
expected_status=http_client.OK)
env_response = etree.fromstring(http_response.result)
header = env_response[0]
# Verify the relay state starts with 'ss:mem'
prefix = CONF.saml.relay_state_prefix
self.assertThat(header[0].text, matchers.StartsWith(prefix))
# Verify that the content in the body matches the expected assertion
body = env_response[1]
response = body[0]
issuer = response[0]
assertion = response[2]
self.assertEqual(self.RECIPIENT, response.get('Destination'))
self.assertEqual(self.ISSUER, issuer.text)
user_attribute = assertion[4][0]
self.assertIsInstance(user_attribute[0].text, str)
user_domain_attribute = assertion[4][1]
self.assertIsInstance(user_domain_attribute[0].text, str)
role_attribute = assertion[4][2]
self.assertIsInstance(role_attribute[0].text, str)
project_attribute = assertion[4][3]
self.assertIsInstance(project_attribute[0].text, str)
project_domain_attribute = assertion[4][4]
self.assertIsInstance(project_domain_attribute[0].text, str)
@mock.patch('saml2.create_class_from_xml_string')
@mock.patch('oslo_utils.fileutils.write_to_tempfile')
@mock.patch.object(subprocess, 'check_output')
def test_sign_assertion(self, check_output_mock,
write_to_tempfile_mock, create_class_mock):
write_to_tempfile_mock.return_value = 'tmp_path'
check_output_mock.return_value = 'fakeoutput'
keystone_idp._sign_assertion(self.signed_assertion)
create_class_mock.assert_called_with(saml.Assertion, 'fakeoutput')
@mock.patch('oslo_utils.fileutils.write_to_tempfile')
@mock.patch.object(subprocess, 'check_output')
def test_sign_assertion_exc(self, check_output_mock,
write_to_tempfile_mock):
# If the command fails the command output is logged.
write_to_tempfile_mock.return_value = 'tmp_path'
sample_returncode = 1
sample_output = self.getUniqueString()
check_output_mock.side_effect = subprocess.CalledProcessError(
returncode=sample_returncode, cmd=CONF.saml.xmlsec1_binary,
output=sample_output)
logger_fixture = self.useFixture(fixtures.LoggerFixture())
self.assertRaises(exception.SAMLSigningError,
keystone_idp._sign_assertion,
self.signed_assertion)
expected_log = (
"Error when signing assertion, reason: Command '%s' returned "
"non-zero exit status %s %s\n" %
(CONF.saml.xmlsec1_binary, sample_returncode, sample_output))
self.assertEqual(expected_log, logger_fixture.output)
@mock.patch('oslo_utils.fileutils.write_to_tempfile')
def test_sign_assertion_fileutils_exc(self, write_to_tempfile_mock):
exception_msg = 'fake'
write_to_tempfile_mock.side_effect = Exception(exception_msg)
logger_fixture = self.useFixture(fixtures.LoggerFixture())
self.assertRaises(exception.SAMLSigningError,
keystone_idp._sign_assertion,
self.signed_assertion)
expected_log = (
'Error when signing assertion, reason: %s\n' % exception_msg)
self.assertEqual(expected_log, logger_fixture.output)
class IdPMetadataGenerationTests(test_v3.RestfulTestCase):
"""A class for testing Identity Provider Metadata generation."""
METADATA_URL = '/OS-FEDERATION/saml2/metadata'
def setUp(self):
super(IdPMetadataGenerationTests, self).setUp()
self.generator = keystone_idp.MetadataGenerator()
def config_overrides(self):
super(IdPMetadataGenerationTests, self).config_overrides()
self.config_fixture.config(
group='saml',
idp_entity_id=federation_fixtures.IDP_ENTITY_ID,
idp_sso_endpoint=federation_fixtures.IDP_SSO_ENDPOINT,
idp_organization_name=federation_fixtures.IDP_ORGANIZATION_NAME,
idp_organization_display_name=(
federation_fixtures.IDP_ORGANIZATION_DISPLAY_NAME),
idp_organization_url=federation_fixtures.IDP_ORGANIZATION_URL,
idp_contact_company=federation_fixtures.IDP_CONTACT_COMPANY,
idp_contact_name=federation_fixtures.IDP_CONTACT_GIVEN_NAME,
idp_contact_surname=federation_fixtures.IDP_CONTACT_SURNAME,
idp_contact_email=federation_fixtures.IDP_CONTACT_EMAIL,
idp_contact_telephone=(
federation_fixtures.IDP_CONTACT_TELEPHONE_NUMBER),
idp_contact_type=federation_fixtures.IDP_CONTACT_TYPE)
def test_check_entity_id(self):
metadata = self.generator.generate_metadata()
self.assertEqual(federation_fixtures.IDP_ENTITY_ID, metadata.entity_id)
def test_metadata_validity(self):
"""Call md.EntityDescriptor method that does internal verification."""
self.generator.generate_metadata().verify()
def test_serialize_metadata_object(self):
"""Check whether serialization doesn't raise any exceptions."""
self.generator.generate_metadata().to_string()
# TODO(marek-denis): Check values here
def test_check_idp_sso(self):
metadata = self.generator.generate_metadata()
idpsso_descriptor = metadata.idpsso_descriptor
self.assertIsNotNone(metadata.idpsso_descriptor)
self.assertEqual(federation_fixtures.IDP_SSO_ENDPOINT,
idpsso_descriptor.single_sign_on_service.location)
self.assertIsNotNone(idpsso_descriptor.organization)
organization = idpsso_descriptor.organization
self.assertEqual(federation_fixtures.IDP_ORGANIZATION_DISPLAY_NAME,
organization.organization_display_name.text)
self.assertEqual(federation_fixtures.IDP_ORGANIZATION_NAME,
organization.organization_name.text)
self.assertEqual(federation_fixtures.IDP_ORGANIZATION_URL,
organization.organization_url.text)
self.assertIsNotNone(idpsso_descriptor.contact_person)
contact_person = idpsso_descriptor.contact_person
self.assertEqual(federation_fixtures.IDP_CONTACT_GIVEN_NAME,
contact_person.given_name.text)
self.assertEqual(federation_fixtures.IDP_CONTACT_SURNAME,
contact_person.sur_name.text)
self.assertEqual(federation_fixtures.IDP_CONTACT_EMAIL,
contact_person.email_address.text)
self.assertEqual(federation_fixtures.IDP_CONTACT_TELEPHONE_NUMBER,
contact_person.telephone_number.text)
self.assertEqual(federation_fixtures.IDP_CONTACT_TYPE,
contact_person.contact_type)
def test_metadata_no_organization(self):
self.config_fixture.config(
group='saml',
idp_organization_display_name=None,
idp_organization_url=None,
idp_organization_name=None)
metadata = self.generator.generate_metadata()
idpsso_descriptor = metadata.idpsso_descriptor
self.assertIsNotNone(metadata.idpsso_descriptor)
self.assertIsNone(idpsso_descriptor.organization)
self.assertIsNotNone(idpsso_descriptor.contact_person)
def test_metadata_no_contact_person(self):
self.config_fixture.config(
group='saml',
idp_contact_name=None,
idp_contact_surname=None,
idp_contact_email=None,
idp_contact_telephone=None)
metadata = self.generator.generate_metadata()
idpsso_descriptor = metadata.idpsso_descriptor
self.assertIsNotNone(metadata.idpsso_descriptor)
self.assertIsNotNone(idpsso_descriptor.organization)
self.assertEqual([], idpsso_descriptor.contact_person)
def test_metadata_invalid_contact_type(self):
self.config_fixture.config(
group='saml',
idp_contact_type="invalid")
self.assertRaises(exception.ValidationError,
self.generator.generate_metadata)
def test_metadata_invalid_idp_sso_endpoint(self):
self.config_fixture.config(
group='saml',
idp_sso_endpoint=None)
self.assertRaises(exception.ValidationError,
self.generator.generate_metadata)
def test_metadata_invalid_idp_entity_id(self):
self.config_fixture.config(
group='saml',
idp_entity_id=None)
self.assertRaises(exception.ValidationError,
self.generator.generate_metadata)
def test_get_metadata_with_no_metadata_file_configured(self):
self.get(self.METADATA_URL,
expected_status=http_client.INTERNAL_SERVER_ERROR)
def test_get_metadata(self):
self.config_fixture.config(
group='saml', idp_metadata_path=XMLDIR + '/idp_saml2_metadata.xml')
r = self.get(self.METADATA_URL, response_content_type='text/xml')
self.assertEqual('text/xml', r.headers.get('Content-Type'))
reference_file = _load_xml('idp_saml2_metadata.xml')
# `reference_file` needs to be converted to bytes to be able to be
# compared to `r.result` in the case of Python 3.
reference_file = str.encode(reference_file)
self.assertEqual(reference_file, r.result)
class ServiceProviderTests(test_v3.RestfulTestCase):
"""A test class for Service Providers."""
MEMBER_NAME = 'service_provider'
COLLECTION_NAME = 'service_providers'
SERVICE_PROVIDER_ID = 'ACME'
SP_KEYS = ['auth_url', 'id', 'enabled', 'description',
'relay_state_prefix', 'sp_url']
def setUp(self):
super(ServiceProviderTests, self).setUp()
# Add a Service Provider
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.SP_REF = self.sp_ref()
self.SERVICE_PROVIDER = self.put(
url, body={'service_provider': self.SP_REF},
expected_status=http_client.CREATED).result
def sp_ref(self):
ref = {
'auth_url': 'https://' + uuid.uuid4().hex + '.com',
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': 'https://' + uuid.uuid4().hex + '.com',
'relay_state_prefix': CONF.saml.relay_state_prefix
}
return ref
def base_url(self, suffix=None):
if suffix is not None:
return '/OS-FEDERATION/service_providers/' + str(suffix)
return '/OS-FEDERATION/service_providers'
def _create_default_sp(self, body=None):
"""Create default Service Provider."""
url = self.base_url(suffix=uuid.uuid4().hex)
if body is None:
body = self.sp_ref()
resp = self.put(url, body={'service_provider': body},
expected_status=http_client.CREATED)
return resp
def test_get_service_provider(self):
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
resp = self.get(url)
self.assertValidEntity(resp.result['service_provider'],
keys_to_check=self.SP_KEYS)
def test_get_service_provider_fail(self):
url = self.base_url(suffix=uuid.uuid4().hex)
self.get(url, expected_status=http_client.NOT_FOUND)
def test_create_service_provider(self):
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
resp = self.put(url, body={'service_provider': sp},
expected_status=http_client.CREATED)
self.assertValidEntity(resp.result['service_provider'],
keys_to_check=self.SP_KEYS)
@unit.skip_if_cache_disabled('federation')
def test_create_service_provider_invalidates_cache(self):
# List all service providers and make sure we only have one in the
# list. This service provider is from testing setup.
resp = self.get(self.base_url(), expected_status=http_client.OK)
self.assertThat(
resp.json_body['service_providers'],
matchers.HasLength(1)
)
# Create a new service provider.
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
self.put(url, body={'service_provider': sp},
expected_status=http_client.CREATED)
# List all service providers again and make sure we have two in the
# returned list.
resp = self.get(self.base_url(), expected_status=http_client.OK)
self.assertThat(
resp.json_body['service_providers'],
matchers.HasLength(2)
)
@unit.skip_if_cache_disabled('federation')
def test_delete_service_provider_invalidates_cache(self):
# List all service providers and make sure we only have one in the
# list. This service provider is from testing setup.
resp = self.get(self.base_url(), expected_status=http_client.OK)
self.assertThat(
resp.json_body['service_providers'],
matchers.HasLength(1)
)
# Create a new service provider.
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
self.put(url, body={'service_provider': sp},
expected_status=http_client.CREATED)
# List all service providers again and make sure we have two in the
# returned list.
resp = self.get(self.base_url(), expected_status=http_client.OK)
self.assertThat(
resp.json_body['service_providers'],
matchers.HasLength(2)
)
# Delete the service provider we created, which should invalidate the
# service provider cache. Get the list of service providers again and
# if the cache invalidated properly then we should only have one
# service provider in the list.
self.delete(url, expected_status=http_client.NO_CONTENT)
resp = self.get(self.base_url(), expected_status=http_client.OK)
self.assertThat(
resp.json_body['service_providers'],
matchers.HasLength(1)
)
@unit.skip_if_cache_disabled('federation')
def test_update_service_provider_invalidates_cache(self):
# List all service providers and make sure we only have one in the
# list. This service provider is from testing setup.
resp = self.get(self.base_url(), expected_status=http_client.OK)
self.assertThat(
resp.json_body['service_providers'],
matchers.HasLength(1)
)
# Create a new service provider.
service_provider_id = uuid.uuid4().hex
url = self.base_url(suffix=service_provider_id)
sp = self.sp_ref()
self.put(url, body={'service_provider': sp},
expected_status=http_client.CREATED)
# List all service providers again and make sure we have two in the
# returned list.
resp = self.get(self.base_url(), expected_status=http_client.OK)
self.assertThat(
resp.json_body['service_providers'],
matchers.HasLength(2)
)
# Update the service provider we created, which should invalidate the
# service provider cache. Get the list of service providers again and
# if the cache invalidated properly then we see the value we updated.
updated_description = uuid.uuid4().hex
body = {'service_provider': {'description': updated_description}}
self.patch(url, body=body, expected_status=http_client.OK)
resp = self.get(self.base_url(), expected_status=http_client.OK)
self.assertThat(
resp.json_body['service_providers'],
matchers.HasLength(2)
)
for sp in resp.json_body['service_providers']:
if sp['id'] == service_provider_id:
self.assertEqual(sp['description'], updated_description)
def test_create_sp_relay_state_default(self):
"""Create an SP without relay state, should default to `ss:mem`."""
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
del sp['relay_state_prefix']
resp = self.put(url, body={'service_provider': sp},
expected_status=http_client.CREATED)
sp_result = resp.result['service_provider']
self.assertEqual(CONF.saml.relay_state_prefix,
sp_result['relay_state_prefix'])
def test_create_sp_relay_state_non_default(self):
"""Create an SP with custom relay state."""
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
non_default_prefix = uuid.uuid4().hex
sp['relay_state_prefix'] = non_default_prefix
resp = self.put(url, body={'service_provider': sp},
expected_status=http_client.CREATED)
sp_result = resp.result['service_provider']
self.assertEqual(non_default_prefix,
sp_result['relay_state_prefix'])
def test_create_service_provider_fail(self):
"""Try adding SP object with unallowed attribute."""
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
sp[uuid.uuid4().hex] = uuid.uuid4().hex
self.put(url, body={'service_provider': sp},
expected_status=http_client.BAD_REQUEST)
def test_list_service_providers(self):
"""Test listing of service provider objects.
Add two new service providers. List all available service providers.
Expect to get list of three service providers (one created by setUp())
Test if attributes match.
"""
ref_service_providers = {
uuid.uuid4().hex: self.sp_ref(),
uuid.uuid4().hex: self.sp_ref(),
}
for id, sp in ref_service_providers.items():
url = self.base_url(suffix=id)
self.put(url, body={'service_provider': sp},
expected_status=http_client.CREATED)
# Insert ids into service provider object, we will compare it with
# responses from server and those include 'id' attribute.
ref_service_providers[self.SERVICE_PROVIDER_ID] = self.SP_REF
for id, sp in ref_service_providers.items():
sp['id'] = id
url = self.base_url()
resp = self.get(url)
service_providers = resp.result
for service_provider in service_providers['service_providers']:
id = service_provider['id']
self.assertValidEntity(
service_provider, ref=ref_service_providers[id],
keys_to_check=self.SP_KEYS)
def test_update_service_provider(self):
"""Update existing service provider.
Update default existing service provider and make sure it has been
properly changed.
"""
new_sp_ref = self.sp_ref()
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
resp = self.patch(url, body={'service_provider': new_sp_ref})
patch_result = resp.result
new_sp_ref['id'] = self.SERVICE_PROVIDER_ID
self.assertValidEntity(patch_result['service_provider'],
ref=new_sp_ref,
keys_to_check=self.SP_KEYS)
resp = self.get(url)
get_result = resp.result
self.assertDictEqual(patch_result['service_provider'],
get_result['service_provider'])
def test_update_service_provider_immutable_parameters(self):
"""Update immutable attributes in service provider.
In this particular case the test will try to change ``id`` attribute.
The server should return an HTTP 403 Forbidden error code.
"""
new_sp_ref = {'id': uuid.uuid4().hex}
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.patch(url, body={'service_provider': new_sp_ref},
expected_status=http_client.BAD_REQUEST)
def test_update_service_provider_unknown_parameter(self):
new_sp_ref = self.sp_ref()
new_sp_ref[uuid.uuid4().hex] = uuid.uuid4().hex
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.patch(url, body={'service_provider': new_sp_ref},
expected_status=http_client.BAD_REQUEST)
def test_update_service_provider_returns_not_found(self):
new_sp_ref = self.sp_ref()
new_sp_ref['description'] = uuid.uuid4().hex
url = self.base_url(suffix=uuid.uuid4().hex)
self.patch(url, body={'service_provider': new_sp_ref},
expected_status=http_client.NOT_FOUND)
def test_update_sp_relay_state(self):
"""Update an SP with custom relay state."""
new_sp_ref = self.sp_ref()
non_default_prefix = uuid.uuid4().hex
new_sp_ref['relay_state_prefix'] = non_default_prefix
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
resp = self.patch(url, body={'service_provider': new_sp_ref})
sp_result = resp.result['service_provider']
self.assertEqual(non_default_prefix,
sp_result['relay_state_prefix'])
def test_delete_service_provider(self):
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.delete(url)
def test_delete_service_provider_returns_not_found(self):
url = self.base_url(suffix=uuid.uuid4().hex)
self.delete(url, expected_status=http_client.NOT_FOUND)
def test_filter_list_sp_by_id(self):
def get_id(resp):
sp = resp.result.get('service_provider')
return sp.get('id')
sp1_id = get_id(self._create_default_sp())
sp2_id = get_id(self._create_default_sp())
# list the SP, should get SPs.
url = self.base_url()
resp = self.get(url)
sps = resp.result.get('service_providers')
entities_ids = [e['id'] for e in sps]
self.assertIn(sp1_id, entities_ids)
self.assertIn(sp2_id, entities_ids)
# filter the SP by 'id'. Only SP1 should appear.
url = self.base_url() + '?id=' + sp1_id
resp = self.get(url)
sps = resp.result.get('service_providers')
entities_ids = [e['id'] for e in sps]
self.assertIn(sp1_id, entities_ids)
self.assertNotIn(sp2_id, entities_ids)
def test_filter_list_sp_by_enabled(self):
def get_id(resp):
sp = resp.result.get('service_provider')
return sp.get('id')
sp1_id = get_id(self._create_default_sp())
sp2_ref = self.sp_ref()
sp2_ref['enabled'] = False
sp2_id = get_id(self._create_default_sp(body=sp2_ref))
# list the SP, should get two SPs.
url = self.base_url()
resp = self.get(url)
sps = resp.result.get('service_providers')
entities_ids = [e['id'] for e in sps]
self.assertIn(sp1_id, entities_ids)
self.assertIn(sp2_id, entities_ids)
# filter the SP by 'enabled'. Only SP1 should appear.
url = self.base_url() + '?enabled=True'
resp = self.get(url)
sps = resp.result.get('service_providers')
entities_ids = [e['id'] for e in sps]
self.assertIn(sp1_id, entities_ids)
self.assertNotIn(sp2_id, entities_ids)
class WebSSOTests(FederatedTokenTests):
"""A class for testing Web SSO."""
SSO_URL = '/auth/OS-FEDERATION/websso/'
SSO_TEMPLATE_NAME = 'sso_callback_template.html'
SSO_TEMPLATE_PATH = os.path.join(core.dirs.etc(), SSO_TEMPLATE_NAME)
TRUSTED_DASHBOARD = 'http://horizon.com'
ORIGIN = urllib.parse.quote_plus(TRUSTED_DASHBOARD)
PROTOCOL_REMOTE_ID_ATTR = uuid.uuid4().hex
def setUp(self):
super(WebSSOTests, self).setUp()
self.api = federation_controllers.Auth()
def config_overrides(self):
super(WebSSOTests, self).config_overrides()
self.config_fixture.config(
group='federation',
trusted_dashboard=[self.TRUSTED_DASHBOARD],
sso_callback_template=self.SSO_TEMPLATE_PATH,
remote_id_attribute=self.REMOTE_ID_ATTR)
def test_render_callback_template(self):
token_id = uuid.uuid4().hex
resp = self.api.render_html_response(self.TRUSTED_DASHBOARD, token_id)
# The expected value in the assertions bellow need to be 'str' in
# Python 2 and 'bytes' in Python 3
self.assertIn(token_id.encode('utf-8'), resp.body)
self.assertIn(self.TRUSTED_DASHBOARD.encode('utf-8'), resp.body)
def test_federated_sso_auth(self):
environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0],
'QUERY_STRING': 'origin=%s' % self.ORIGIN}
environment.update(mapping_fixtures.EMPLOYEE_ASSERTION)
request = self.make_request(environ=environment)
resp = self.api.federated_sso_auth(request, self.PROTOCOL)
# `resp.body` will be `str` in Python 2 and `bytes` in Python 3
# which is why expected value: `self.TRUSTED_DASHBOARD`
# needs to be encoded
self.assertIn(self.TRUSTED_DASHBOARD.encode('utf-8'), resp.body)
def test_get_sso_origin_host_case_insensitive(self):
# test lowercase hostname in trusted_dashboard
environ = {'QUERY_STRING': 'origin=http://horizon.com'}
request = self.make_request(environ=environ)
host = self.api._get_sso_origin_host(request)
self.assertEqual("http://horizon.com", host)
# test uppercase hostname in trusted_dashboard
self.config_fixture.config(group='federation',
trusted_dashboard=['http://Horizon.com'])
host = self.api._get_sso_origin_host(request)
self.assertEqual("http://horizon.com", host)
def test_federated_sso_auth_with_protocol_specific_remote_id(self):
self.config_fixture.config(
group=self.PROTOCOL,
remote_id_attribute=self.PROTOCOL_REMOTE_ID_ATTR)
environment = {self.PROTOCOL_REMOTE_ID_ATTR: self.REMOTE_IDS[0],
'QUERY_STRING': 'origin=%s' % self.ORIGIN}
environment.update(mapping_fixtures.EMPLOYEE_ASSERTION)
request = self.make_request(environ=environment)
resp = self.api.federated_sso_auth(request, self.PROTOCOL)
# `resp.body` will be `str` in Python 2 and `bytes` in Python 3
# which is why expected value: `self.TRUSTED_DASHBOARD`
# needs to be encoded
self.assertIn(self.TRUSTED_DASHBOARD.encode('utf-8'), resp.body)
def test_federated_sso_auth_bad_remote_id(self):
environment = {self.REMOTE_ID_ATTR: self.IDP,
'QUERY_STRING': 'origin=%s' % self.ORIGIN}
environment.update(mapping_fixtures.EMPLOYEE_ASSERTION)
request = self.make_request(environ=environment)
self.assertRaises(exception.IdentityProviderNotFound,
self.api.federated_sso_auth,
request, self.PROTOCOL)
def test_federated_sso_missing_query(self):
environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
environment.update(mapping_fixtures.EMPLOYEE_ASSERTION)
request = self.make_request(environ=environment)
self.assertRaises(exception.ValidationError,
self.api.federated_sso_auth,
request, self.PROTOCOL)
def test_federated_sso_missing_query_bad_remote_id(self):
environment = {self.REMOTE_ID_ATTR: self.IDP}
environment.update(mapping_fixtures.EMPLOYEE_ASSERTION)
request = self.make_request(environ=environment)
self.assertRaises(exception.ValidationError,
self.api.federated_sso_auth,
request, self.PROTOCOL)
def test_federated_sso_untrusted_dashboard(self):
environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0],
'QUERY_STRING': 'origin=%s' % uuid.uuid4().hex}
environment.update(mapping_fixtures.EMPLOYEE_ASSERTION)
request = self.make_request(environ=environment)
self.assertRaises(exception.Unauthorized,
self.api.federated_sso_auth,
request, self.PROTOCOL)
def test_federated_sso_untrusted_dashboard_bad_remote_id(self):
environment = {self.REMOTE_ID_ATTR: self.IDP,
'QUERY_STRING': 'origin=%s' % uuid.uuid4().hex}
environment.update(mapping_fixtures.EMPLOYEE_ASSERTION)
request = self.make_request(environ=environment)
self.assertRaises(exception.Unauthorized,
self.api.federated_sso_auth,
request, self.PROTOCOL)
def test_federated_sso_missing_remote_id(self):
environment = copy.deepcopy(mapping_fixtures.EMPLOYEE_ASSERTION)
request = self.make_request(environ=environment,
query_string='origin=%s' % self.ORIGIN)
self.assertRaises(exception.Unauthorized,
self.api.federated_sso_auth,
request, self.PROTOCOL)
def test_identity_provider_specific_federated_authentication(self):
environment = {self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}
environment.update(mapping_fixtures.EMPLOYEE_ASSERTION)
request = self.make_request(environ=environment,
query_string='origin=%s' % self.ORIGIN)
resp = self.api.federated_idp_specific_sso_auth(request,
self.idp['id'],
self.PROTOCOL)
# `resp.body` will be `str` in Python 2 and `bytes` in Python 3
# which is why the expected value: `self.TRUSTED_DASHBOARD`
# needs to be encoded
self.assertIn(self.TRUSTED_DASHBOARD.encode('utf-8'), resp.body)
class K2KServiceCatalogTests(test_v3.RestfulTestCase):
SP1 = 'SP1'
SP2 = 'SP2'
SP3 = 'SP3'
def setUp(self):
super(K2KServiceCatalogTests, self).setUp()
sp = self.sp_ref()
self.federation_api.create_sp(self.SP1, sp)
self.sp_alpha = {self.SP1: sp}
sp = self.sp_ref()
self.federation_api.create_sp(self.SP2, sp)
self.sp_beta = {self.SP2: sp}
sp = self.sp_ref()
self.federation_api.create_sp(self.SP3, sp)
self.sp_gamma = {self.SP3: sp}
self.token_v3_helper = token_common.V3TokenDataHelper()
def sp_response(self, id, ref):
ref.pop('enabled')
ref.pop('description')
ref.pop('relay_state_prefix')
ref['id'] = id
return ref
def sp_ref(self):
ref = {
'auth_url': uuid.uuid4().hex,
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': uuid.uuid4().hex,
'relay_state_prefix': CONF.saml.relay_state_prefix,
}
return ref
def _validate_service_providers(self, token, ref):
token_data = token['token']
self.assertIn('service_providers', token_data)
self.assertIsNotNone(token_data['service_providers'])
service_providers = token_data.get('service_providers')
self.assertEqual(len(ref), len(service_providers))
for entity in service_providers:
id = entity.get('id')
ref_entity = self.sp_response(id, ref.get(id))
self.assertDictEqual(entity, ref_entity)
def test_service_providers_in_token(self):
"""Check if service providers are listed in service catalog."""
token = self.token_v3_helper.get_token_data(self.user_id, ['password'])
ref = {}
for r in (self.sp_alpha, self.sp_beta, self.sp_gamma):
ref.update(r)
self._validate_service_providers(token, ref)
def test_service_provides_in_token_disabled_sp(self):
"""Test behaviour with disabled service providers.
Disabled service providers should not be listed in the service
catalog.
"""
# disable service provider ALPHA
sp_ref = {'enabled': False}
self.federation_api.update_sp(self.SP1, sp_ref)
token = self.token_v3_helper.get_token_data(self.user_id, ['password'])
ref = {}
for r in (self.sp_beta, self.sp_gamma):
ref.update(r)
self._validate_service_providers(token, ref)
def test_no_service_providers_in_token(self):
"""Test service catalog with disabled service providers.
There should be no entry ``service_providers`` in the catalog.
Test passes providing no attribute was raised.
"""
sp_ref = {'enabled': False}
for sp in (self.SP1, self.SP2, self.SP3):
self.federation_api.update_sp(sp, sp_ref)
token = self.token_v3_helper.get_token_data(self.user_id, ['password'])
self.assertNotIn('service_providers', token['token'],
message=('Expected Service Catalog not to have '
'service_providers'))
|
yqm/sl4a
|
refs/heads/master
|
python/src/Lib/json/tests/test_pass3.py
|
55
|
from unittest import TestCase
import json
# from http://json.org/JSON_checker/test/pass3.json
JSON = r'''
{
"JSON Test Pattern pass3": {
"The outermost value": "must be an object or array.",
"In this test": "It is an object."
}
}
'''
class TestPass3(TestCase):
def test_parse(self):
# test in/out equivalence and parsing
res = json.loads(JSON)
out = json.dumps(res)
self.assertEquals(res, json.loads(out))
|
marissazhou/django
|
refs/heads/master
|
tests/lookup/tests.py
|
89
|
from __future__ import unicode_literals
import collections
from datetime import datetime
from operator import attrgetter
from unittest import skipUnless
from django.core.exceptions import FieldError
from django.db import connection
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from .models import Article, Author, Game, MyISAMArticle, Player, Season, Tag
class LookupTests(TestCase):
def setUp(self):
# Create a few Authors.
self.au1 = Author(name='Author 1')
self.au1.save()
self.au2 = Author(name='Author 2')
self.au2.save()
# Create a couple of Articles.
self.a1 = Article(headline='Article 1', pub_date=datetime(2005, 7, 26), author=self.au1)
self.a1.save()
self.a2 = Article(headline='Article 2', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a2.save()
self.a3 = Article(headline='Article 3', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a3.save()
self.a4 = Article(headline='Article 4', pub_date=datetime(2005, 7, 28), author=self.au1)
self.a4.save()
self.a5 = Article(headline='Article 5', pub_date=datetime(2005, 8, 1, 9, 0), author=self.au2)
self.a5.save()
self.a6 = Article(headline='Article 6', pub_date=datetime(2005, 8, 1, 8, 0), author=self.au2)
self.a6.save()
self.a7 = Article(headline='Article 7', pub_date=datetime(2005, 7, 27), author=self.au2)
self.a7.save()
# Create a few Tags.
self.t1 = Tag(name='Tag 1')
self.t1.save()
self.t1.articles.add(self.a1, self.a2, self.a3)
self.t2 = Tag(name='Tag 2')
self.t2.save()
self.t2.articles.add(self.a3, self.a4, self.a5)
self.t3 = Tag(name='Tag 3')
self.t3.save()
self.t3.articles.add(self.a5, self.a6, self.a7)
def test_exists(self):
# We can use .exists() to check that there are some
self.assertTrue(Article.objects.exists())
for a in Article.objects.all():
a.delete()
# There should be none now!
self.assertFalse(Article.objects.exists())
def test_lookup_int_as_str(self):
# Integer value can be queried using string
self.assertQuerysetEqual(Article.objects.filter(id__iexact=str(self.a1.id)),
['<Article: Article 1>'])
@skipUnlessDBFeature('supports_date_lookup_using_string')
def test_lookup_date_as_str(self):
# A date lookup can be performed using a string search
self.assertQuerysetEqual(Article.objects.filter(pub_date__startswith='2005'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
def test_iterator(self):
# Each QuerySet gets iterator(), which is a generator that "lazily"
# returns results using database-level iteration.
self.assertIsInstance(Article.objects.iterator(), collections.Iterator)
self.assertQuerysetEqual(Article.objects.iterator(),
[
'Article 5',
'Article 6',
'Article 4',
'Article 2',
'Article 3',
'Article 7',
'Article 1',
],
transform=attrgetter('headline'))
# iterator() can be used on any QuerySet.
self.assertQuerysetEqual(
Article.objects.filter(headline__endswith='4').iterator(),
['Article 4'],
transform=attrgetter('headline'))
def test_count(self):
# count() returns the number of objects matching search criteria.
self.assertEqual(Article.objects.count(), 7)
self.assertEqual(Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3)
self.assertEqual(Article.objects.filter(headline__startswith='Blah blah').count(), 0)
# count() should respect sliced query sets.
articles = Article.objects.all()
self.assertEqual(articles.count(), 7)
self.assertEqual(articles[:4].count(), 4)
self.assertEqual(articles[1:100].count(), 6)
self.assertEqual(articles[10:100].count(), 0)
# Date and date/time lookups can also be done with strings.
self.assertEqual(Article.objects.filter(pub_date__exact='2005-07-27 00:00:00').count(), 3)
def test_in_bulk(self):
# in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects.
arts = Article.objects.in_bulk([self.a1.id, self.a2.id])
self.assertEqual(arts[self.a1.id], self.a1)
self.assertEqual(arts[self.a2.id], self.a2)
self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk({self.a3.id}), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk([1000]), {})
self.assertEqual(Article.objects.in_bulk([]), {})
self.assertEqual(Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1})
self.assertEqual(Article.objects.in_bulk(iter([])), {})
self.assertRaises(TypeError, Article.objects.in_bulk)
self.assertRaises(TypeError, Article.objects.in_bulk, headline__startswith='Blah')
def test_values(self):
# values() returns a list of dictionaries instead of object instances --
# and you can specify which fields you want to retrieve.
identity = lambda x: x
self.assertQuerysetEqual(Article.objects.values('headline'),
[
{'headline': 'Article 5'},
{'headline': 'Article 6'},
{'headline': 'Article 4'},
{'headline': 'Article 2'},
{'headline': 'Article 3'},
{'headline': 'Article 7'},
{'headline': 'Article 1'},
],
transform=identity)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values('id'),
[{'id': self.a2.id}, {'id': self.a3.id}, {'id': self.a7.id}],
transform=identity)
self.assertQuerysetEqual(Article.objects.values('id', 'headline'),
[
{'id': self.a5.id, 'headline': 'Article 5'},
{'id': self.a6.id, 'headline': 'Article 6'},
{'id': self.a4.id, 'headline': 'Article 4'},
{'id': self.a2.id, 'headline': 'Article 2'},
{'id': self.a3.id, 'headline': 'Article 3'},
{'id': self.a7.id, 'headline': 'Article 7'},
{'id': self.a1.id, 'headline': 'Article 1'},
],
transform=identity)
# You can use values() with iterator() for memory savings,
# because iterator() uses database-level iteration.
self.assertQuerysetEqual(Article.objects.values('id', 'headline').iterator(),
[
{'headline': 'Article 5', 'id': self.a5.id},
{'headline': 'Article 6', 'id': self.a6.id},
{'headline': 'Article 4', 'id': self.a4.id},
{'headline': 'Article 2', 'id': self.a2.id},
{'headline': 'Article 3', 'id': self.a3.id},
{'headline': 'Article 7', 'id': self.a7.id},
{'headline': 'Article 1', 'id': self.a1.id},
],
transform=identity)
# The values() method works with "extra" fields specified in extra(select).
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_one'),
[
{'id': self.a5.id, 'id_plus_one': self.a5.id + 1},
{'id': self.a6.id, 'id_plus_one': self.a6.id + 1},
{'id': self.a4.id, 'id_plus_one': self.a4.id + 1},
{'id': self.a2.id, 'id_plus_one': self.a2.id + 1},
{'id': self.a3.id, 'id_plus_one': self.a3.id + 1},
{'id': self.a7.id, 'id_plus_one': self.a7.id + 1},
{'id': self.a1.id, 'id_plus_one': self.a1.id + 1},
],
transform=identity)
data = {
'id_plus_one': 'id+1',
'id_plus_two': 'id+2',
'id_plus_three': 'id+3',
'id_plus_four': 'id+4',
'id_plus_five': 'id+5',
'id_plus_six': 'id+6',
'id_plus_seven': 'id+7',
'id_plus_eight': 'id+8',
}
self.assertQuerysetEqual(
Article.objects.filter(id=self.a1.id).extra(select=data).values(*data.keys()),
[{
'id_plus_one': self.a1.id + 1,
'id_plus_two': self.a1.id + 2,
'id_plus_three': self.a1.id + 3,
'id_plus_four': self.a1.id + 4,
'id_plus_five': self.a1.id + 5,
'id_plus_six': self.a1.id + 6,
'id_plus_seven': self.a1.id + 7,
'id_plus_eight': self.a1.id + 8,
}], transform=identity)
# You can specify fields from forward and reverse relations, just like filter().
self.assertQuerysetEqual(
Article.objects.values('headline', 'author__name'),
[
{'headline': self.a5.headline, 'author__name': self.au2.name},
{'headline': self.a6.headline, 'author__name': self.au2.name},
{'headline': self.a4.headline, 'author__name': self.au1.name},
{'headline': self.a2.headline, 'author__name': self.au1.name},
{'headline': self.a3.headline, 'author__name': self.au1.name},
{'headline': self.a7.headline, 'author__name': self.au2.name},
{'headline': self.a1.headline, 'author__name': self.au1.name},
], transform=identity)
self.assertQuerysetEqual(
Author.objects.values('name', 'article__headline').order_by('name', 'article__headline'),
[
{'name': self.au1.name, 'article__headline': self.a1.headline},
{'name': self.au1.name, 'article__headline': self.a2.headline},
{'name': self.au1.name, 'article__headline': self.a3.headline},
{'name': self.au1.name, 'article__headline': self.a4.headline},
{'name': self.au2.name, 'article__headline': self.a5.headline},
{'name': self.au2.name, 'article__headline': self.a6.headline},
{'name': self.au2.name, 'article__headline': self.a7.headline},
], transform=identity)
self.assertQuerysetEqual(
(
Author.objects
.values('name', 'article__headline', 'article__tag__name')
.order_by('name', 'article__headline', 'article__tag__name')
),
[
{'name': self.au1.name, 'article__headline': self.a1.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a2.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t2.name},
{'name': self.au1.name, 'article__headline': self.a4.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a6.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a7.headline, 'article__tag__name': self.t3.name},
], transform=identity)
# However, an exception FieldDoesNotExist will be thrown if you specify
# a non-existent field name in values() (a field that is neither in the
# model nor in extra(select)).
self.assertRaises(FieldError,
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values,
'id', 'id_plus_two')
# If you don't specify field names to values(), all are returned.
self.assertQuerysetEqual(Article.objects.filter(id=self.a5.id).values(),
[{
'id': self.a5.id,
'author_id': self.au2.id,
'headline': 'Article 5',
'pub_date': datetime(2005, 8, 1, 9, 0)
}], transform=identity)
def test_values_list(self):
# values_list() is similar to values(), except that the results are
# returned as a list of tuples, rather than a list of dictionaries.
# Within each tuple, the order of the elements is the same as the order
# of fields in the values_list() call.
identity = lambda x: x
self.assertQuerysetEqual(Article.objects.values_list('headline'),
[
('Article 5',),
('Article 6',),
('Article 4',),
('Article 2',),
('Article 3',),
('Article 7',),
('Article 1',),
], transform=identity)
self.assertQuerysetEqual(Article.objects.values_list('id').order_by('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity)
self.assertQuerysetEqual(
Article.objects.values_list('id', flat=True).order_by('id'),
[self.a1.id, self.a2.id, self.a3.id, self.a4.id, self.a5.id, self.a6.id, self.a7.id],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id_plus_one', 'id'),
[
(self.a1.id + 1, self.a1.id),
(self.a2.id + 1, self.a2.id),
(self.a3.id + 1, self.a3.id),
(self.a4.id + 1, self.a4.id),
(self.a5.id + 1, self.a5.id),
(self.a6.id + 1, self.a6.id),
(self.a7.id + 1, self.a7.id)
],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id', 'id_plus_one'),
[
(self.a1.id, self.a1.id + 1),
(self.a2.id, self.a2.id + 1),
(self.a3.id, self.a3.id + 1),
(self.a4.id, self.a4.id + 1),
(self.a5.id, self.a5.id + 1),
(self.a6.id, self.a6.id + 1),
(self.a7.id, self.a7.id + 1)
],
transform=identity)
self.assertQuerysetEqual(
(
Author.objects
.values_list('name', 'article__headline', 'article__tag__name')
.order_by('name', 'article__headline', 'article__tag__name')
),
[
(self.au1.name, self.a1.headline, self.t1.name),
(self.au1.name, self.a2.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t2.name),
(self.au1.name, self.a4.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t3.name),
(self.au2.name, self.a6.headline, self.t3.name),
(self.au2.name, self.a7.headline, self.t3.name),
], transform=identity)
self.assertRaises(TypeError, Article.objects.values_list, 'id', 'headline', flat=True)
def test_get_next_previous_by(self):
# Every DateField and DateTimeField creates get_next_by_FOO() and
# get_previous_by_FOO() methods. In the case of identical date values,
# these methods will use the ID as a fallback check. This guarantees
# that no records are skipped or duplicated.
self.assertEqual(repr(self.a1.get_next_by_pub_date()),
'<Article: Article 2>')
self.assertEqual(repr(self.a2.get_next_by_pub_date()),
'<Article: Article 3>')
self.assertEqual(repr(self.a2.get_next_by_pub_date(headline__endswith='6')),
'<Article: Article 6>')
self.assertEqual(repr(self.a3.get_next_by_pub_date()),
'<Article: Article 7>')
self.assertEqual(repr(self.a4.get_next_by_pub_date()),
'<Article: Article 6>')
self.assertRaises(Article.DoesNotExist, self.a5.get_next_by_pub_date)
self.assertEqual(repr(self.a6.get_next_by_pub_date()),
'<Article: Article 5>')
self.assertEqual(repr(self.a7.get_next_by_pub_date()),
'<Article: Article 4>')
self.assertEqual(repr(self.a7.get_previous_by_pub_date()),
'<Article: Article 3>')
self.assertEqual(repr(self.a6.get_previous_by_pub_date()),
'<Article: Article 4>')
self.assertEqual(repr(self.a5.get_previous_by_pub_date()),
'<Article: Article 6>')
self.assertEqual(repr(self.a4.get_previous_by_pub_date()),
'<Article: Article 7>')
self.assertEqual(repr(self.a3.get_previous_by_pub_date()),
'<Article: Article 2>')
self.assertEqual(repr(self.a2.get_previous_by_pub_date()),
'<Article: Article 1>')
def test_escaping(self):
# Underscores, percent signs and backslashes have special meaning in the
# underlying SQL code, but Django handles the quoting of them automatically.
a8 = Article(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
a8.save()
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article_'),
['<Article: Article_ with underscore>'])
a9 = Article(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
a9.save()
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article%'),
['<Article: Article% with percent sign>'])
a10 = Article(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
a10.save()
self.assertQuerysetEqual(Article.objects.filter(headline__contains='\\'),
['<Article: Article with \ backslash>'])
def test_exclude(self):
Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
# exclude() is the opposite of filter() when doing lookups:
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='Article').exclude(headline__contains='with'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.exclude(headline__startswith="Article_"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.exclude(headline="Article 7"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 1>',
])
def test_none(self):
# none() returns a QuerySet that behaves like any other QuerySet object
self.assertQuerysetEqual(Article.objects.none(), [])
self.assertQuerysetEqual(
Article.objects.none().filter(headline__startswith='Article'), [])
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article').none(), [])
self.assertEqual(Article.objects.none().count(), 0)
self.assertEqual(
Article.objects.none().update(headline="This should not take effect"), 0)
self.assertQuerysetEqual(
[article for article in Article.objects.none().iterator()],
[])
def test_in(self):
# using __in with an empty list should return an empty query set
self.assertQuerysetEqual(Article.objects.filter(id__in=[]), [])
self.assertQuerysetEqual(Article.objects.exclude(id__in=[]),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
def test_error_messages(self):
# Programming errors are pointed out with nice error messages
try:
Article.objects.filter(pub_date_year='2005').count()
self.fail('FieldError not raised')
except FieldError as ex:
self.assertEqual(str(ex), "Cannot resolve keyword 'pub_date_year' "
"into field. Choices are: author, author_id, headline, "
"id, pub_date, tag")
try:
Article.objects.filter(headline__starts='Article')
self.fail('FieldError not raised')
except FieldError as ex:
self.assertEqual(
str(ex), "Unsupported lookup 'starts' for CharField "
"or join on the field not permitted.")
def test_regex(self):
# Create some articles with a bit more interesting headlines for testing field lookups:
for a in Article.objects.all():
a.delete()
now = datetime.now()
a1 = Article(pub_date=now, headline='f')
a1.save()
a2 = Article(pub_date=now, headline='fo')
a2.save()
a3 = Article(pub_date=now, headline='foo')
a3.save()
a4 = Article(pub_date=now, headline='fooo')
a4.save()
a5 = Article(pub_date=now, headline='hey-Foo')
a5.save()
a6 = Article(pub_date=now, headline='bar')
a6.save()
a7 = Article(pub_date=now, headline='AbBa')
a7.save()
a8 = Article(pub_date=now, headline='baz')
a8.save()
a9 = Article(pub_date=now, headline='baxZ')
a9.save()
# zero-or-more
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fo*'),
['<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'fo*'),
[
'<Article: f>',
'<Article: fo>',
'<Article: foo>',
'<Article: fooo>',
'<Article: hey-Foo>',
])
# one-or-more
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fo+'),
['<Article: fo>', '<Article: foo>', '<Article: fooo>'])
# wildcard
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fooo?'),
['<Article: foo>', '<Article: fooo>'])
# leading anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'^b'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'^a'),
['<Article: AbBa>'])
# trailing anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'z$'),
['<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'z$'),
['<Article: baxZ>', '<Article: baz>'])
# character sets
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba[rz]'),
['<Article: bar>', '<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba.[RxZ]'),
['<Article: baxZ>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'ba[RxZ]'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>'])
# and more articles:
a10 = Article(pub_date=now, headline='foobar')
a10.save()
a11 = Article(pub_date=now, headline='foobaz')
a11.save()
a12 = Article(pub_date=now, headline='ooF')
a12.save()
a13 = Article(pub_date=now, headline='foobarbaz')
a13.save()
a14 = Article(pub_date=now, headline='zoocarfaz')
a14.save()
a15 = Article(pub_date=now, headline='barfoobaz')
a15.save()
a16 = Article(pub_date=now, headline='bazbaRFOO')
a16.save()
# alternation
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
'<Article: ooF>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'^foo(f|b)'),
['<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>'])
# greedy matching
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'b.*az'),
[
'<Article: barfoobaz>',
'<Article: baz>',
'<Article: bazbaRFOO>',
'<Article: foobarbaz>',
'<Article: foobaz>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'b.*ar'),
[
'<Article: bar>',
'<Article: barfoobaz>',
'<Article: bazbaRFOO>',
'<Article: foobar>',
'<Article: foobarbaz>',
])
@skipUnlessDBFeature('supports_regex_backreferencing')
def test_regex_backreferencing(self):
# grouping and backreferences
now = datetime.now()
a10 = Article(pub_date=now, headline='foobar')
a10.save()
a11 = Article(pub_date=now, headline='foobaz')
a11.save()
a12 = Article(pub_date=now, headline='ooF')
a12.save()
a13 = Article(pub_date=now, headline='foobarbaz')
a13.save()
a14 = Article(pub_date=now, headline='zoocarfaz')
a14.save()
a15 = Article(pub_date=now, headline='barfoobaz')
a15.save()
a16 = Article(pub_date=now, headline='bazbaRFOO')
a16.save()
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'b(.).*b\1'),
['<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>'])
def test_regex_null(self):
"""
Ensure that a regex lookup does not fail on null/None values
"""
Season.objects.create(year=2012, gt=None)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^$'), [])
def test_regex_non_string(self):
"""
Ensure that a regex lookup does not fail on non-string fields
"""
Season.objects.create(year=2013, gt=444)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^444$'),
['<Season: 2013>'])
def test_regex_non_ascii(self):
"""
Ensure that a regex lookup does not trip on non-ASCII characters.
"""
Player.objects.create(name='\u2660')
Player.objects.get(name__regex='\u2660')
def test_nonfield_lookups(self):
"""
Ensure that a lookup query containing non-fields raises the proper
exception.
"""
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah=99)
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah__exact=99)
with self.assertRaises(FieldError):
Article.objects.filter(blahblah=99)
def test_lookup_collision(self):
"""
Ensure that genuine field names don't collide with built-in lookup
types ('year', 'gt', 'range', 'in' etc.).
Refs #11670.
"""
# Here we're using 'gt' as a code number for the year, e.g. 111=>2009.
season_2009 = Season.objects.create(year=2009, gt=111)
season_2009.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2010 = Season.objects.create(year=2010, gt=222)
season_2010.games.create(home="Houston Astros", away="Chicago Cubs")
season_2010.games.create(home="Houston Astros", away="Milwaukee Brewers")
season_2010.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011 = Season.objects.create(year=2011, gt=333)
season_2011.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011.games.create(home="Houston Astros", away="Milwaukee Brewers")
hunter_pence = Player.objects.create(name="Hunter Pence")
hunter_pence.games = Game.objects.filter(season__year__in=[2009, 2010])
pudge = Player.objects.create(name="Ivan Rodriquez")
pudge.games = Game.objects.filter(season__year=2009)
pedro_feliz = Player.objects.create(name="Pedro Feliz")
pedro_feliz.games = Game.objects.filter(season__year__in=[2011])
johnson = Player.objects.create(name="Johnson")
johnson.games = Game.objects.filter(season__year__in=[2011])
# Games in 2010
self.assertEqual(Game.objects.filter(season__year=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__gt=222).count(), 3)
self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3)
# Games in 2011
self.assertEqual(Game.objects.filter(season__year=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__gt=333).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2)
self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2)
# Games played in 2010 and 2011
self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5)
self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5)
# Players who played in 2009
self.assertEqual(Player.objects.filter(games__season__year=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=111).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2)
# Players who played in 2010
self.assertEqual(Player.objects.filter(games__season__year=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt=222).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1)
# Players who played in 2011
self.assertEqual(Player.objects.filter(games__season__year=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=333).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2)
def test_chain_date_time_lookups(self):
self.assertQuerysetEqual(
Article.objects.filter(pub_date__month__gt=7),
['<Article: Article 5>', '<Article: Article 6>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__day__gte=27),
['<Article: Article 2>', '<Article: Article 3>',
'<Article: Article 4>', '<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__hour__lt=8),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__minute__lte=0),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 5>', '<Article: Article 6>',
'<Article: Article 7>'],
ordered=False
)
class LookupTransactionTests(TransactionTestCase):
available_apps = ['lookup']
@skipUnless(connection.vendor == 'mysql', 'requires MySQL')
def test_mysql_lookup_search(self):
# To use fulltext indexes on MySQL either version 5.6 is needed, or one must use
# MyISAM tables. Neither of these combinations is currently available on CI, so
# lets manually create a MyISAM table for Article model.
with connection.cursor() as cursor:
cursor.execute(
"CREATE TEMPORARY TABLE myisam_article ("
" id INTEGER PRIMARY KEY AUTO_INCREMENT, "
" headline VARCHAR(100) NOT NULL "
") ENGINE MYISAM")
dr = MyISAMArticle.objects.create(headline='Django Reinhardt')
MyISAMArticle.objects.create(headline='Ringo Star')
# NOTE: Needs to be created after the article has been saved.
cursor.execute(
'CREATE FULLTEXT INDEX myisam_article_ft ON myisam_article (headline)')
self.assertQuerysetEqual(
MyISAMArticle.objects.filter(headline__search='Reinhardt'),
[dr], lambda x: x)
|
yakky/django
|
refs/heads/master
|
tests/migrations/models.py
|
386
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps.registry import Apps
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class CustomModelBase(models.base.ModelBase):
pass
class ModelWithCustomBase(six.with_metaclass(CustomModelBase, models.Model)):
pass
@python_2_unicode_compatible
class UnicodeModel(models.Model):
title = models.CharField('ÚÑÍ¢ÓÐÉ', max_length=20, default='“Ðjáñgó”')
class Meta:
# Disable auto loading of this model as we load it on our own
apps = Apps()
verbose_name = 'úñí©óðé µóðéø'
verbose_name_plural = 'úñí©óðé µóðéøß'
def __str__(self):
return self.title
class Unserializable(object):
"""
An object that migration doesn't know how to serialize.
"""
pass
class UnserializableModel(models.Model):
title = models.CharField(max_length=20, default=Unserializable())
class Meta:
# Disable auto loading of this model as we load it on our own
apps = Apps()
class UnmigratedModel(models.Model):
"""
A model that is in a migration-less app (which this app is
if its migrations directory has not been repointed)
"""
pass
class EmptyManager(models.Manager):
use_in_migrations = True
class FoodQuerySet(models.query.QuerySet):
pass
class BaseFoodManager(models.Manager):
def __init__(self, a, b, c=1, d=2):
super(BaseFoodManager, self).__init__()
self.args = (a, b, c, d)
class FoodManager(BaseFoodManager.from_queryset(FoodQuerySet)):
use_in_migrations = True
class NoMigrationFoodManager(BaseFoodManager.from_queryset(FoodQuerySet)):
pass
|
jackrzhang/zulip
|
refs/heads/master
|
zerver/views/zephyr.py
|
2
|
from typing import Any, List, Dict, Optional, Callable, Tuple, Iterable, Sequence
from django.conf import settings
from django.http import HttpResponse, HttpRequest
from django.utils.translation import ugettext as _
from zerver.decorator import authenticated_json_view
from zerver.lib.ccache import make_ccache
from zerver.lib.request import has_request_variables, REQ, JsonableError
from zerver.lib.response import json_success, json_error
from zerver.lib.users import get_api_key
from zerver.models import UserProfile
import base64
import logging
import subprocess
import ujson
# Hack for mit.edu users whose Kerberos usernames don't match what they zephyr
# as. The key is for Kerberos and the value is for zephyr.
kerberos_alter_egos = {
'golem': 'ctl',
}
@authenticated_json_view
@has_request_variables
def webathena_kerberos_login(request: HttpRequest, user_profile: UserProfile,
cred: str=REQ(default=None)) -> HttpResponse:
global kerberos_alter_egos
if cred is None:
return json_error(_("Could not find Kerberos credential"))
if not user_profile.realm.webathena_enabled:
return json_error(_("Webathena login not enabled"))
try:
parsed_cred = ujson.loads(cred)
user = parsed_cred["cname"]["nameString"][0]
if user in kerberos_alter_egos:
user = kerberos_alter_egos[user]
assert(user == user_profile.email.split("@")[0])
ccache = make_ccache(parsed_cred)
except Exception:
return json_error(_("Invalid Kerberos cache"))
# TODO: Send these data via (say) rabbitmq
try:
api_key = get_api_key(user_profile)
subprocess.check_call(["ssh", settings.PERSONAL_ZMIRROR_SERVER, "--",
"/home/zulip/python-zulip-api/zulip/integrations/zephyr/process_ccache",
user,
api_key,
base64.b64encode(ccache).decode("utf-8")])
except Exception:
logging.exception("Error updating the user's ccache")
return json_error(_("We were unable to setup mirroring for you"))
return json_success()
|
zafarali/emdp
|
refs/heads/master
|
emdp/gridworld/plotting.py
|
1
|
from .helper_utilities import unflatten_state
from .env import GridWorldMDP
import numpy as np
class GridWorldPlotter(object):
def __init__(self, grid_size, has_absorbing_state=True):
"""
Utility to plot gridworlds
:param grid_size: size of the gridworld
:param has_absorbing_state: boolean representing if the gridworld has an absorbing state
"""
if isinstance(grid_size, (GridWorldMDP,)):
raise TypeError('grid_size cannot be a GridWorldMDP. '
'To instantiate from GridWorldMDP use GridWorldPlotter.from_mdp()')
assert type(grid_size) is int, 'Gridworld size must be int'
self.size = grid_size
self.has_absorbing_state = has_absorbing_state
# TODO: store where the rewards are so we can plot them.
def _unflatten(self, onehot_state):
return unflatten_state(onehot_state, self.size, self.has_absorbing_state)
@staticmethod
def from_mdp(mdp):
# TODO: obtain reward specifications
if not isinstance(mdp, (GridWorldMDP,)):
raise TypeError('Only GridWorldMDPs can be used with GridWorldPlotters')
return GridWorldPlotter(mdp.size, mdp.has_absorbing_state)
def plot_grid(self, ax):
"""
Plots the skeleton of the grid world
:param ax:
:return:
"""
for i in range(self.size + 1):
ax.plot(np.arange(self.size + 1) - 0.5, np.ones(self.size + 1) * i - 0.5, color='k')
for i in range(self.size + 1):
ax.plot(np.ones(self.size + 1) * i - 0.5, np.arange(self.size + 1) - 0.5, color='k')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.grid(False)
return ax
def plot_trajectories(self, ax, trajectories, dont_unflatten=False, jitter_scale=1):
"""
Plots a individual trajectory paths with some jitter.
:param ax: The axes to plot this on
:param trajectories: a list of trajectories. Each trajectory is a list of states (numpy arrays)
These states should be obtained by using the mdp.step() operation. To prevent
this automatic conversion use `dont_unflatten`
:param dont_unflatten: will not automatically unflatten the trajectories into (x,y) pairs.
(!) this assumes you have already unflattened them!
:return:
"""
if not dont_unflatten:
trajectories_unflat = list(self.unflat_trajectories(trajectories))
else:
trajectories_unflat = trajectories
for trajectory_unflattened in trajectories_unflat:
x, y = list(zip(*trajectory_unflattened))
x = np.array(x) + jitter_scale * np.random.rand(len(x)) / (2 * self.size)
y = np.array(y) + jitter_scale * np.random.rand(len(x)) / (2 * self.size)
ax.plot(x, y)
return ax
def plot_environment(self, ax, wall_locs=None, plot_grid=False):
"""
Plots the environment with walls.
:param ax: The axes to plot this on
:param wall_locs: Locations of the walls for plotting them in a different color..
:return:
"""
# plot states with background color white
state_background = np.ones((self.size, self.size))
# plot walls in lame way -- set them to some hand-engineered color
wall_img = np.zeros((self.size, self.size, 4))
if wall_locs is not None:
for state in wall_locs:
y_coord = state[0]
x_coord = state[1]
wall_img[y_coord, x_coord, 0] = 0.0 # R
wall_img[y_coord, x_coord, 1] = 0.0 # G
wall_img[y_coord, x_coord, 2] = 0.0 # B
wall_img[y_coord, x_coord, 3] = 1.0 # alpha
# render heatmap and overlay the walls image
imshow_ax = ax.imshow(state_background, interpolation=None)
imshow_ax = ax.imshow(wall_img, interpolation=None)
ax.grid(False)
# Switch on flag if you want to plot grid
if plot_grid:
for i in range(self.size + 1):
ax.plot(np.arange(self.size + 1) - 0.5, np.ones(self.size + 1) * i - 0.5, color='k')
for i in range(self.size + 1):
ax.plot(np.ones(self.size + 1) * i - 0.5, np.arange(self.size + 1) - 0.5, color='k')
ax.set_xlabel('x')
ax.set_ylabel('y')
return ax, imshow_ax
def plot_heatmap(self, ax, trajectories, dont_unflatten=False, wall_locs=None):
"""
Plots a state-visitation heatmap with walls.
:param ax: The axes to plot this on.
:param trajectories: a list of trajectories. Each trajectory is a list of states (numpy arrays)
These states should be obtained by using the mdp.step() operation. To prevent
this automatic conversion use `dont_unflatten`
:param dont_unflatten: will not automatically unflatten the trajectories into (x,y) pairs.
(!) this assumes you have already unflattened them!
:param wall_locs: Locations of the walls for plotting them in a different color..
:return:
"""
if not dont_unflatten:
trajectories_unflat = list(self.unflat_trajectories(trajectories))
else:
trajectories_unflat = trajectories
state_visitations = np.zeros((self.size, self.size))
# plot actual state visitation heatmap
for trajectory in trajectories_unflat:
for state in trajectory:
x_coord =state[0]
y_coord = state[1]
state_visitations[y_coord, x_coord] += 1.
# plot walls in lame way -- set them to some hand-engineered color
wall_img = np.zeros((self.size, self.size, 4))
if wall_locs is not None:
mid_visits = (np.max(state_visitations) - np.min(state_visitations)) / 2.
for state in wall_locs:
y_coord = state[0]
x_coord = state[1]
wall_img[y_coord, x_coord, 0] = 0.6 # R
wall_img[y_coord, x_coord, 1] = 0.4 # G
wall_img[y_coord, x_coord, 2] = 0.4 # B
wall_img[y_coord, x_coord, 3] = 1.0 # alpha
# render heatmap and overlay the walls image
imshow_ax = ax.imshow(state_visitations, interpolation=None)
imshow_ax = ax.imshow(wall_img, interpolation=None)
ax.grid(False)
return ax, imshow_ax
def unflat_trajectories(self, trajectories):
"""
Returns a generator where the trajectories have been unflattened.
:param trajectories:
:return:
"""
return map(lambda traj: list(map(self._unflatten, traj)), trajectories)
|
opendatateam/udata
|
refs/heads/master
|
udata/models/taglist_field.py
|
2
|
from slugify import slugify
from mongoengine.fields import ListField, StringField
class TagListField(ListField):
def __init__(self, **kwargs):
self.tags = []
super(TagListField, self).__init__(StringField(), **kwargs)
def clean(self, value):
return sorted(list(set([slugify(v, to_lower=True) for v in value])))
def to_python(self, value):
return super(TagListField, self).to_python(self.clean(value))
def to_mongo(self, value):
return super(TagListField, self).to_mongo(self.clean(value))
|
davidmontgom/pyvmomi-community-samples
|
refs/heads/master
|
samples/virtual_machine_device_info.py
|
10
|
#!/usr/bin/env python
# VMware vSphere Python SDK
# Copyright (c) 2008-2014 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import atexit
import argparse
import getpass
from pyVim import connect
# Demonstrates:
# =============
# * How to write python 2.7 and 3.3 compatible code in one script
# * How to parse arguments in a python script
# * How to pretty print format a dictionary
# * How to connect to a vSphere instance
# * How to search for virtual machines efficiently
# * How to interrogate virtual machine hardware info
# * How to determine the data type of a dynamic object instance
# * How to build a summary of a virtual device & virtual disk
# * How to interrogate a datastore and its hosts mounts
#
# Not shown, how to ask a datastore for all the virtual machines it 'owns'
#
# Sample output:
#
# $ virtual_machine_device_info.py -s vcsa -u my_user -i 172.16.254.101
#
# Found Virtual Machine
# =====================
# guest OS name : Ubuntu Linux (64-bit)
# name : box
# last booted timestamp : 2014-10-13 01:45:57.647340+00:00
# bios UUID : 420264ab-848b-1586-b589-b9bd3a71b3aa
# path to VM : [storage0] box/box.vmx
# guest OS id : ubuntu64Guest
# host name : esx_host_01
# instance UUID : 500221fe-3473-60ff-fab2-1811600208a0
# Devices:
# --------
# label: IDE 0
# ------------------
# device type : vim.vm.device.VirtualIDEController
# backing type : NoneType
# key : 200
# summary : IDE 0
# label: IDE 1
# ------------------
# device type : vim.vm.device.VirtualIDEController
# backing type : NoneType
# key : 201
# summary : IDE 1
# label: PS2 controller 0
# ------------------
# device type : vim.vm.device.VirtualPS2Controller
# backing type : NoneType
# key : 300
# summary : PS2 controller 0
# label: PCI controller 0
# ------------------
# device type : vim.vm.device.VirtualPCIController
# backing type : NoneType
# key : 100
# summary : PCI controller 0
# label: SIO controller 0
# ------------------
# device type : vim.vm.device.VirtualSIOController
# backing type : NoneType
# key : 400
# summary : SIO controller 0
# label: Keyboard
# ------------------
# device type : vim.vm.device.VirtualKeyboard
# backing type : NoneType
# key : 600
# summary : Keyboard
# label: Pointing device
# ------------------
# device type : vim.vm.device.VirtualPointingDevice
# backing type : vim.vm.device.VirtualPointingDevice.DeviceBackingInfo
# key : 700
# summary : Pointing device; Device
# ------------------
# label: Video card
# ------------------
# device type : vim.vm.device.VirtualVideoCard
# backing type : NoneType
# key : 500
# summary : Video card
# label: VMCI device
# ------------------
# device type : vim.vm.device.VirtualVMCIDevice
# backing type : NoneType
# key : 12000
# summary : Device on the virtual machine PCI bus that provides supp
# label: SCSI controller 0
# ------------------
# device type : vim.vm.device.VirtualLsiLogicController
# backing type : NoneType
# key : 1000
# summary : LSI Logic
# label: Hard disk 1
# ------------------
# device type : vim.vm.device.VirtualDisk
# backing type : vim.vm.device.VirtualDisk.FlatVer2BackingInfo
# key : 2000
# summary : 16,777,216 KB
# datastore
# name: storage0
# host: esx_host_01
# summary
# url: ds:///vmfs/volumes/501fa6d9-8907f56a-fa19-782bcb74158e/
# freeSpace: 5750390784
# file system: VMFS
# capacity: 494726545408
# fileName: [storage0] box/box.vmdk
# device ID: None
# ------------------
# label: CD/DVD drive 1
# ------------------
# device type : vim.vm.device.VirtualCdrom
# backing type : vim.vm.device.VirtualCdrom.AtapiBackingInfo
# key : 3002
# summary : ATAPI /vmfs/devices/cdrom/mpx.vmhba0:C0:T0:L0
# ------------------
# label: Network adapter 1
# ------------------
# device type : vim.vm.device.VirtualE1000
# backing type : vim.vm.device.VirtualEthernetCard.NetworkBackingInfo
# key : 4000
# summary : VM Network
# ------------------
# label: Floppy drive 1
# ------------------
# device type : vim.vm.device.VirtualFloppy
# backing type : vim.vm.device.VirtualFloppy.RemoteDeviceBackingInfo
# key : 8000
# summary : Remote
# ------------------
# =====================
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--host',
required=True,
action='store',
help='Remote host to connect to')
parser.add_argument('-o', '--port',
required=False,
action='store',
help="port to use, default 443", default=443)
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use when connecting to host')
parser.add_argument('-d', '--uuid',
required=False,
action='store',
help='Instance UUID (not BIOS id) of a VM to find.')
parser.add_argument('-i', '--ip',
required=False,
action='store',
help='IP address of the VM to search for')
args = parser.parse_args()
password = None
if args.password is None:
password = getpass.getpass(
prompt='Enter password for host %s and user %s: ' %
(args.host, args.user))
args = parser.parse_args()
if password:
args.password = password
return args
args = get_args()
# form a connection...
si = connect.SmartConnect(host=args.host, user=args.user, pwd=args.password,
port=args.port)
# Note: from daemons use a shutdown hook to do this, not the atexit
atexit.register(connect.Disconnect, si)
# http://pubs.vmware.com/vsphere-55/topic/com.vmware.wssdk.apiref.doc/vim.SearchIndex.html
search_index = si.content.searchIndex
# without exception find managed objects using durable identifiers that the
# search index can find easily. This is much better than caching information
# that is non-durable and potentially buggy.
vm = None
if args.uuid:
vm = search_index.FindByUuid(None, args.uuid, True, True)
elif args.ip:
vm = search_index.FindByIp(None, args.ip, True)
if not vm:
print("Could not a virtual machine to examine.")
exit(1)
print("Found Virtual Machine")
print("=====================")
details = {'name': vm.summary.config.name,
'instance UUID': vm.summary.config.instanceUuid,
'bios UUID': vm.summary.config.uuid,
'path to VM': vm.summary.config.vmPathName,
'guest OS id': vm.summary.config.guestId,
'guest OS name': vm.summary.config.guestFullName,
'host name': vm.runtime.host.name,
'last booted timestamp': vm.runtime.bootTime}
for name, value in details.items():
print(" {0:{width}{base}}: {1}".format(name, value, width=25, base='s'))
print(" Devices:")
print(" --------")
for device in vm.config.hardware.device:
# diving into each device, we pull out a few interesting bits
dev_details = {'key': device.key,
'summary': device.deviceInfo.summary,
'device type': type(device).__name__,
'backing type': type(device.backing).__name__}
print(" label: {0}".format(device.deviceInfo.label))
print(" ------------------")
for name, value in dev_details.items():
print(" {0:{width}{base}}: {1}".format(name, value,
width=15, base='s'))
if device.backing is None:
continue
# the following is a bit of a hack, but it lets us build a summary
# without making many assumptions about the backing type, if the
# backing type has a file name we *know* it's sitting on a datastore
# and will have to have all of the following attributes.
if hasattr(device.backing, 'fileName'):
datastore = device.backing.datastore
if datastore:
print(" datastore")
print(" name: {0}".format(datastore.name))
# there may be multiple hosts, the host property
# is a host mount info type not a host system type
# but we can navigate to the host system from there
for host_mount in datastore.host:
host_system = host_mount.key
print(" host: {0}".format(host_system.name))
print(" summary")
summary = {'capacity': datastore.summary.capacity,
'freeSpace': datastore.summary.freeSpace,
'file system': datastore.summary.type,
'url': datastore.summary.url}
for key, val in summary.items():
print(" {0}: {1}".format(key, val))
print(" fileName: {0}".format(device.backing.fileName))
print(" device ID: {0}".format(device.backing.backingObjectId))
print(" ------------------")
print("=====================")
exit()
|
broxtronix/thunder
|
refs/heads/master
|
python/thunder/rdds/fileio/tifffile.py
|
9
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
# Copyright (c) 2008-2014, Christoph Gohlke
# Copyright (c) 2008-2014, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read and write image data from and to TIFF files.
Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
SGI, ImageJ, MicroManager, FluoView, SEQ and GEL files.
Only a subset of the TIFF specification is supported, mainly uncompressed
and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
Specifically, reading JPEG and CCITT compressed image data or EXIF, IPTC, GPS,
and XMP metadata is not implemented.
Only primary info records are read for STK, FluoView, MicroManager, and
NIH image formats.
TIFF, the Tagged Image File Format, is under the control of Adobe Systems.
BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SGI, SEQ, GEL,
and OME-TIFF, are custom extensions defined by Molecular Devices (Universal
Imaging Corporation), Carl Zeiss MicroImaging, Olympus, Silicon Graphics
International, Media Cybernetics, Molecular Dynamics, and the Open Microscopy
Environment consortium respectively.
For command line usage run ``python tifffile.py --help``
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2014.08.24
Requirements
------------
* `CPython 2.7 or 3.4 <http://www.python.org>`_
* `Numpy 1.8.2 <http://www.numpy.org>`_
* `Matplotlib 1.4 <http://www.matplotlib.org>`_ (optional for plotting)
* `Tifffile.c 2013.11.05 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for faster decoding of PackBits and LZW encoded strings)
Notes
-----
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
Other Python packages and modules for reading bio-scientific TIFF files:
* `Imread <http://luispedro.org/software/imread>`_
* `PyLibTiff <http://code.google.com/p/pylibtiff>`_
* `SimpleITK <http://www.simpleitk.org>`_
* `PyLSM <https://launchpad.net/pylsm>`_
* `PyMca.TiffIO.py <http://pymca.sourceforge.net/>`_ (same as fabio.TiffIO)
* `BioImageXD.Readers <http://www.bioimagexd.net/>`_
* `Cellcognition.io <http://cellcognition.org/>`_
* `CellProfiler.bioformats
<https://github.com/CellProfiler/python-bioformats>`_
Acknowledgements
----------------
* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
* Wim Lewis for a bug fix and some read_cz_lsm functions.
* Hadrien Mary for help on reading MicroManager files.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010).
Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011
(5) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
(7) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
(8) Micro-Manager File Formats.
http://www.micro-manager.org/wiki/Micro-Manager_File_Formats
(9) Tags for TIFF and Related Specifications. Digital Preservation.
http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml
Examples
--------
>>> data = numpy.random.rand(5, 301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> numpy.testing.assert_array_equal(image, data)
>>> with TiffFile('temp.tif') as tif:
... images = tif.asarray()
... for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
This version has been modified as follows from the original tifffile.py
by Christoph Gohlke, available at http://www.lfd.uci.edu/~gohlke/:
* warning message about failure to find C extensions suppressed
"""
from __future__ import division, print_function
import sys
import os
import re
import glob
import math
import zlib
import time
import json
import struct
import warnings
import tempfile
import datetime
import collections
from fractions import Fraction
from xml.etree import cElementTree as etree
import numpy
# try:
# import _tifffile
# except ImportError:
# warnings.warn(
# "failed to import the optional _tifffile C extension module.\n"
# "Loading of some compressed images will be slow.\n"
# "Tifffile.c can be obtained at http://www.lfd.uci.edu/~gohlke/")
__version__ = '2014.08.24'
__docformat__ = 'restructuredtext en'
__all__ = ('imsave', 'imread', 'imshow', 'TiffFile', 'TiffWriter',
'TiffSequence')
def imsave(filename, data, **kwargs):
"""Write image data to TIFF file.
Refer to the TiffWriter class and member functions for documentation.
Parameters
----------
filename : str
Name of file to write.
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
kwargs : dict
Parameters 'byteorder', 'bigtiff', and 'software' are passed to
the TiffWriter class.
Parameters 'photometric', 'planarconfig', 'resolution',
'description', 'compress', 'volume', and 'extratags' are passed to
the TiffWriter.save function.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> description = u'{"shape": %s}' % str(list(data.shape))
>>> imsave('temp.tif', data, compress=6,
... extratags=[(270, 's', 0, description, True)])
"""
tifargs = {}
for key in ('byteorder', 'bigtiff', 'software', 'writeshape'):
if key in kwargs:
tifargs[key] = kwargs[key]
del kwargs[key]
if 'writeshape' not in kwargs:
kwargs['writeshape'] = True
if 'bigtiff' not in tifargs and data.size*data.dtype.itemsize > 2000*2**20:
tifargs['bigtiff'] = True
with TiffWriter(filename, **tifargs) as tif:
tif.save(data, **kwargs)
class TiffWriter(object):
"""Write image data to TIFF file.
TiffWriter instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> with TiffWriter('temp.tif', bigtiff=True) as tif:
... for i in range(data.shape[0]):
... tif.save(data[i], compress=6)
"""
TYPES = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
TAGS = {
'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'fill_order': 266,
'document_name': 269, 'image_description': 270, 'strip_offsets': 273,
'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'tile_width': 322, 'tile_length': 323, 'tile_offsets': 324,
'tile_byte_counts': 325, 'extra_samples': 338, 'sample_format': 339,
'image_depth': 32997, 'tile_depth': 32998}
def __init__(self, filename, bigtiff=False, byteorder=None,
software='tifffile.py'):
"""Create a new TIFF file for writing.
Use bigtiff=True when creating files greater than 2 GB.
Parameters
----------
filename : str
Name of file to write.
bigtiff : bool
If True, the BigTIFF format is used.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
software : str
Name of the software used to create the image.
Saved with the first page only.
"""
if byteorder not in (None, '<', '>'):
raise ValueError("invalid byteorder %s" % byteorder)
if byteorder is None:
byteorder = '<' if sys.byteorder == 'little' else '>'
self._byteorder = byteorder
self._software = software
self._fh = open(filename, 'wb')
self._fh.write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
self._bigtiff = True
self._offset_size = 8
self._tag_size = 20
self._numtag_format = 'Q'
self._offset_format = 'Q'
self._val_format = '8s'
self._fh.write(struct.pack(byteorder+'HHH', 43, 8, 0))
else:
self._bigtiff = False
self._offset_size = 4
self._tag_size = 12
self._numtag_format = 'H'
self._offset_format = 'I'
self._val_format = '4s'
self._fh.write(struct.pack(byteorder+'H', 42))
# first IFD
self._ifd_offset = self._fh.tell()
self._fh.write(struct.pack(byteorder+self._offset_format, 0))
def save(self, data, photometric=None, planarconfig=None, resolution=None,
description=None, volume=False, writeshape=False, compress=0,
extratags=()):
"""Write image data to TIFF file.
Image data are written in one stripe per plane.
Dimensions larger than 2 to 4 (depending on photometric mode, planar
configuration, and SGI mode) are flattened and saved as separate pages.
The 'sample_format' and 'bits_per_sample' TIFF tags are derived from
the data type.
Parameters
----------
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
photometric : {'minisblack', 'miniswhite', 'rgb'}
The color space of the image data.
By default this setting is inferred from the data shape.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
description : str
The subject of the image. Saved with the first page only.
compress : int
Values from 0 to 9 controlling the level of zlib compression.
If 0, data are written uncompressed (default).
volume : bool
If True, volume data are stored in one tile (if applicable) using
the SGI image_depth and tile_depth tags.
Image width and depth must be multiple of 16.
Few software can read this format, e.g. MeVisLab.
writeshape : bool
If True, write the data shape to the image_description tag
if necessary and no other description is given.
extratags: sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in 'value' in Python struct format.
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
count : int
Number of data values. Not used for string values.
value : sequence
'Count' values compatible with 'dtype'.
writeonce : bool
If True, the tag is written to the first page only.
"""
if photometric not in (None, 'minisblack', 'miniswhite', 'rgb'):
raise ValueError("invalid photometric %s" % photometric)
if planarconfig not in (None, 'contig', 'planar'):
raise ValueError("invalid planarconfig %s" % planarconfig)
if not 0 <= compress <= 9:
raise ValueError("invalid compression level %s" % compress)
fh = self._fh
byteorder = self._byteorder
numtag_format = self._numtag_format
val_format = self._val_format
offset_format = self._offset_format
offset_size = self._offset_size
tag_size = self._tag_size
data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C')
data_shape = shape = data.shape
data = numpy.atleast_2d(data)
# normalize shape of data
samplesperpixel = 1
extrasamples = 0
if volume and data.ndim < 3:
volume = False
if photometric is None:
if planarconfig:
photometric = 'rgb'
elif data.ndim > 2 and shape[-1] in (3, 4):
photometric = 'rgb'
elif volume and data.ndim > 3 and shape[-4] in (3, 4):
photometric = 'rgb'
elif data.ndim > 2 and shape[-3] in (3, 4):
photometric = 'rgb'
else:
photometric = 'minisblack'
if planarconfig and len(shape) <= (3 if volume else 2):
planarconfig = None
photometric = 'minisblack'
if photometric == 'rgb':
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if len(shape) < 4:
volume = False
if planarconfig is None:
if shape[-1] in (3, 4):
planarconfig = 'contig'
elif shape[-4 if volume else -3] in (3, 4):
planarconfig = 'planar'
elif shape[-1] > shape[-4 if volume else -3]:
planarconfig = 'planar'
else:
planarconfig = 'contig'
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
if samplesperpixel > 3:
extrasamples = samplesperpixel - 3
elif planarconfig and len(shape) > (3 if volume else 2):
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
# remove trailing 1s
while len(shape) > 2 and shape[-1] == 1:
shape = shape[:-1]
if len(shape) < 3:
volume = False
if False and (
len(shape) > (3 if volume else 2) and shape[-1] < 5 and
all(shape[-1] < i
for i in shape[(-4 if volume else -3):-1])):
# DISABLED: non-standard TIFF, e.g. (220, 320, 2)
planarconfig = 'contig'
samplesperpixel = shape[-1]
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
else:
data = data.reshape(
(-1, 1) + shape[(-3 if volume else -2):] + (1,))
if samplesperpixel == 2:
warnings.warn("writing non-standard TIFF (samplesperpixel 2)")
if volume and (data.shape[-2] % 16 or data.shape[-3] % 16):
warnings.warn("volume width or length are not multiple of 16")
volume = False
data = numpy.swapaxes(data, 1, 2)
data = data.reshape(
(data.shape[0] * data.shape[1],) + data.shape[2:])
# data.shape is now normalized 5D or 6D, depending on volume
# (pages, planar_samples, (depth,) height, width, contig_samples)
assert len(data.shape) in (5, 6)
shape = data.shape
bytestr = bytes if sys.version[0] == '2' else (
lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x)
tags = [] # list of (code, ifdentry, ifdvalue, writeonce)
if volume:
# use tiles to save volume data
tag_byte_counts = TiffWriter.TAGS['tile_byte_counts']
tag_offsets = TiffWriter.TAGS['tile_offsets']
else:
# else use strips
tag_byte_counts = TiffWriter.TAGS['strip_byte_counts']
tag_offsets = TiffWriter.TAGS['strip_offsets']
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
def addtag(code, dtype, count, value, writeonce=False):
# Compute ifdentry & ifdvalue bytes from code, dtype, count, value.
# Append (code, ifdentry, ifdvalue, writeonce) to tags list.
code = int(TiffWriter.TAGS.get(code, code))
try:
tifftype = TiffWriter.TYPES[dtype]
except KeyError:
raise ValueError("unknown dtype %s" % dtype)
rawcount = count
if dtype == 's':
value = bytestr(value) + b'\0'
count = rawcount = len(value)
value = (value, )
if len(dtype) > 1:
count *= int(dtype[:-1])
dtype = dtype[-1]
ifdentry = [pack('HH', code, tifftype),
pack(offset_format, rawcount)]
ifdvalue = None
if count == 1:
if isinstance(value, (tuple, list)):
value = value[0]
ifdentry.append(pack(val_format, pack(dtype, value)))
elif struct.calcsize(dtype) * count <= offset_size:
ifdentry.append(pack(val_format,
pack(str(count)+dtype, *value)))
else:
ifdentry.append(pack(offset_format, 0))
ifdvalue = pack(str(count)+dtype, *value)
tags.append((code, b''.join(ifdentry), ifdvalue, writeonce))
def rational(arg, max_denominator=1000000):
# return nominator and denominator from float or two integers
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if self._software:
addtag('software', 's', 0, self._software, writeonce=True)
self._software = None # only save to first page
if description:
addtag('image_description', 's', 0, description, writeonce=True)
elif writeshape and shape[0] > 1 and shape != data_shape:
addtag('image_description', 's', 0,
"shape=(%s)" % (",".join('%i' % i for i in data_shape)),
writeonce=True)
addtag('datetime', 's', 0,
datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"),
writeonce=True)
addtag('compression', 'H', 1, 32946 if compress else 1)
addtag('orientation', 'H', 1, 1)
addtag('image_width', 'I', 1, shape[-2])
addtag('image_length', 'I', 1, shape[-3])
if volume:
addtag('image_depth', 'I', 1, shape[-4])
addtag('tile_depth', 'I', 1, shape[-4])
addtag('tile_width', 'I', 1, shape[-2])
addtag('tile_length', 'I', 1, shape[-3])
addtag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2)
addtag('sample_format', 'H', 1,
{'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
addtag('photometric', 'H', 1,
{'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric])
addtag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig and samplesperpixel > 1:
addtag('planar_configuration', 'H', 1, 1
if planarconfig == 'contig' else 2)
addtag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8, ) * samplesperpixel)
else:
addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb' and extrasamples == 1:
addtag('extra_samples', 'H', 1, 1) # associated alpha channel
else:
addtag('extra_samples', 'H', extrasamples, (0,) * extrasamples)
if resolution:
addtag('x_resolution', '2I', 1, rational(resolution[0]))
addtag('y_resolution', '2I', 1, rational(resolution[1]))
addtag('resolution_unit', 'H', 1, 2)
addtag('rows_per_strip', 'I', 1,
shape[-3] * (shape[-4] if volume else 1))
# use one strip or tile per plane
strip_byte_counts = (data[0, 0].size * data.dtype.itemsize,) * shape[1]
addtag(tag_byte_counts, offset_format, shape[1], strip_byte_counts)
addtag(tag_offsets, offset_format, shape[1], (0, ) * shape[1])
# add extra tags from users
for t in extratags:
addtag(*t)
# the entries in an IFD must be sorted in ascending order by tag code
tags = sorted(tags, key=lambda x: x[0])
if not self._bigtiff and (fh.tell() + data.size*data.dtype.itemsize
> 2**31-1):
raise ValueError("data too large for non-bigtiff file")
for pageindex in range(shape[0]):
# update pointer at ifd_offset
pos = fh.tell()
fh.seek(self._ifd_offset)
fh.write(pack(offset_format, pos))
fh.seek(pos)
# write ifdentries
fh.write(pack(numtag_format, len(tags)))
tag_offset = fh.tell()
fh.write(b''.join(t[1] for t in tags))
self._ifd_offset = fh.tell()
fh.write(pack(offset_format, 0)) # offset to next IFD
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(tags):
if tag[2]:
pos = fh.tell()
fh.seek(tag_offset + tagindex*tag_size + offset_size + 4)
fh.write(pack(offset_format, pos))
fh.seek(pos)
if tag[0] == tag_offsets:
strip_offsets_offset = pos
elif tag[0] == tag_byte_counts:
strip_byte_counts_offset = pos
fh.write(tag[2])
# write image data
data_offset = fh.tell()
if compress:
strip_byte_counts = []
for plane in data[pageindex]:
plane = zlib.compress(plane, compress)
strip_byte_counts.append(len(plane))
fh.write(plane)
else:
# if this fails try update Python/numpy
data[pageindex].tofile(fh)
fh.flush()
# update strip and tile offsets and byte_counts if necessary
pos = fh.tell()
for tagindex, tag in enumerate(tags):
if tag[0] == tag_offsets: # strip or tile offsets
if tag[2]:
fh.seek(strip_offsets_offset)
strip_offset = data_offset
for size in strip_byte_counts:
fh.write(pack(offset_format, strip_offset))
strip_offset += size
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, data_offset))
elif tag[0] == tag_byte_counts: # strip or tile byte_counts
if compress:
if tag[2]:
fh.seek(strip_byte_counts_offset)
for size in strip_byte_counts:
fh.write(pack(offset_format, size))
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, strip_byte_counts[0]))
break
fh.seek(pos)
fh.flush()
# remove tags that should be written only once
if pageindex == 0:
tags = [t for t in tags if not t[-1]]
def close(self):
self._fh.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def imread(files, **kwargs):
"""Return image data from TIFF file(s) as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
files : str or list
File name, glob pattern, or list of file names.
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages in file to return as array.
multifile : bool
If True (default), OME-TIFF data may include pages from multiple files.
pattern : str
Regular expression pattern that matches axes names and indices in
file names.
kwargs : dict
Additional parameters passed to the TiffFile or TiffSequence asarray
function.
Examples
--------
>>> im = imread('test.tif', key=0)
>>> im.shape
(256, 256, 4)
>>> ims = imread(['test.tif', 'test.tif'])
>>> ims.shape
(2, 256, 256, 4)
"""
kwargs_file = {}
if 'multifile' in kwargs:
kwargs_file['multifile'] = kwargs['multifile']
del kwargs['multifile']
else:
kwargs_file['multifile'] = True
kwargs_seq = {}
if 'pattern' in kwargs:
kwargs_seq['pattern'] = kwargs['pattern']
del kwargs['pattern']
if isinstance(files, basestring) and any(i in files for i in '?*'):
files = glob.glob(files)
if not files:
raise ValueError('no files found')
if len(files) == 1:
files = files[0]
if isinstance(files, basestring):
with TiffFile(files, **kwargs_file) as tif:
return tif.asarray(**kwargs)
else:
with TiffSequence(files, **kwargs_seq) as imseq:
return imseq.asarray(**kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
__slots__ = ('func', )
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
value = self.func(instance)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
class TiffFile(object):
"""Read image and metadata from TIFF, STK, LSM, and FluoView files.
TiffFile instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Attributes
----------
pages : list
All TIFF pages in file.
series : list of Records(shape, dtype, axes, TiffPages)
TIFF pages with compatible shapes and types.
micromanager_metadata: dict
Extra MicroManager non-TIFF metadata in the file, if exists.
All attributes are read-only.
Examples
--------
>>> with TiffFile('test.tif') as tif:
... data = tif.asarray()
... data.shape
(256, 256, 4)
"""
def __init__(self, arg, name=None, offset=None, size=None,
multifile=True, multifile_close=True):
"""Initialize instance from file.
Parameters
----------
arg : str or open file
Name of file or open file object.
The file objects are closed in TiffFile.close().
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
multifile : bool
If True (default), series may include pages from multiple files.
Currently applies to OME-TIFF only.
multifile_close : bool
If True (default), keep the handles of other files in multifile
series closed. This is inefficient when few files refer to
many pages. If False, the C runtime may run out of resources.
"""
self._fh = FileHandle(arg, name=name, offset=offset, size=size)
self.offset_size = None
self.pages = []
self._multifile = bool(multifile)
self._multifile_close = bool(multifile_close)
self._files = {self._fh.name: self} # cache of TiffFiles
try:
self._fromfile()
except Exception:
self._fh.close()
raise
@property
def filehandle(self):
"""Return file handle."""
return self._fh
@property
def filename(self):
"""Return name of file handle."""
return self._fh.name
def close(self):
"""Close open file handle(s)."""
for tif in self._files.values():
tif._fh.close()
self._files = {}
def _fromfile(self):
"""Read TIFF header and all page records from file."""
self._fh.seek(0)
try:
self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)]
except KeyError:
raise ValueError("not a valid TIFF file")
version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0]
if version == 43: # BigTiff
self.offset_size, zero = struct.unpack(self.byteorder+'HH',
self._fh.read(4))
if zero or self.offset_size != 8:
raise ValueError("not a valid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
raise ValueError("not a TIFF file")
self.pages = []
while True:
try:
page = TiffPage(self)
self.pages.append(page)
except StopIteration:
break
if not self.pages:
raise ValueError("empty TIFF file")
if self.is_micromanager:
# MicroManager files contain metadata not stored in TIFF tags.
self.micromanager_metadata = read_micromanager_metadata(self._fh)
if self.is_lsm:
self._fix_lsm_strip_offsets()
self._fix_lsm_strip_byte_counts()
def _fix_lsm_strip_offsets(self):
"""Unwrap strip offsets for LSM files greater than 4 GB."""
for series in self.series:
wrap = 0
previous_offset = 0
for page in series.pages:
strip_offsets = []
for current_offset in page.strip_offsets:
if current_offset < previous_offset:
wrap += 2**32
strip_offsets.append(current_offset + wrap)
previous_offset = current_offset
page.strip_offsets = tuple(strip_offsets)
def _fix_lsm_strip_byte_counts(self):
"""Set strip_byte_counts to size of compressed data.
The strip_byte_counts tag in LSM files contains the number of bytes
for the uncompressed data.
"""
if not self.pages:
return
strips = {}
for page in self.pages:
assert len(page.strip_offsets) == len(page.strip_byte_counts)
for offset, bytecount in zip(page.strip_offsets,
page.strip_byte_counts):
strips[offset] = bytecount
offsets = sorted(strips.keys())
offsets.append(min(offsets[-1] + strips[offsets[-1]], self._fh.size))
for i, offset in enumerate(offsets[:-1]):
strips[offset] = min(strips[offset], offsets[i+1] - offset)
for page in self.pages:
if page.compression:
page.strip_byte_counts = tuple(
strips[offset] for offset in page.strip_offsets)
@lazyattr
def series(self):
"""Return series of TiffPage with compatible shape and properties."""
if not self.pages:
return []
series = []
page0 = self.pages[0]
if self.is_ome:
series = self._omeseries()
elif self.is_fluoview:
dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
b'EVENT': 'V', b'EXPOSURE': 'L'}
mmhd = list(reversed(page0.mm_header.dimensions))
series = [Record(
axes=''.join(dims.get(i[0].strip().upper(), 'Q')
for i in mmhd if i[1] > 1),
shape=tuple(int(i[1]) for i in mmhd if i[1] > 1),
pages=self.pages, dtype=numpy.dtype(page0.dtype))]
elif self.is_lsm:
lsmi = page0.cz_lsm_info
axes = CZ_SCAN_TYPES[lsmi.scan_type]
if page0.is_rgb:
axes = axes.replace('C', '').replace('XY', 'XYC')
axes = axes[::-1]
shape = tuple(getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes)
pages = [p for p in self.pages if not p.is_reduced]
series = [Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype))]
if len(pages) != len(self.pages): # reduced RGB pages
pages = [p for p in self.pages if p.is_reduced]
cp = 1
i = 0
while cp < len(pages) and i < len(shape)-2:
cp *= shape[i]
i += 1
shape = shape[:i] + pages[0].shape
axes = axes[:i] + 'CYX'
series.append(Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype)))
elif self.is_imagej:
shape = []
axes = []
ij = page0.imagej_tags
if 'frames' in ij:
shape.append(ij['frames'])
axes.append('T')
if 'slices' in ij:
shape.append(ij['slices'])
axes.append('Z')
if 'channels' in ij and not self.is_rgb:
shape.append(ij['channels'])
axes.append('C')
remain = len(self.pages) // (product(shape) if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
shape.extend(page0.shape)
axes.extend(page0.axes)
axes = ''.join(axes)
series = [Record(pages=self.pages, shape=tuple(shape), axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif self.is_nih:
if len(self.pages) == 1:
shape = page0.shape
axes = page0.axes
else:
shape = (len(self.pages),) + page0.shape
axes = 'I' + page0.axes
series = [Record(pages=self.pages, shape=shape, axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif page0.is_shaped:
# TODO: shaped files can contain multiple series
shape = page0.tags['image_description'].value[7:-1]
shape = tuple(int(i) for i in shape.split(b','))
series = [Record(pages=self.pages, shape=shape,
axes='Q' * len(shape),
dtype=numpy.dtype(page0.dtype))]
# generic detection of series
if not series:
shapes = []
pages = {}
for page in self.pages:
if not page.shape:
continue
shape = page.shape + (page.axes,
page.compression in TIFF_DECOMPESSORS)
if shape not in pages:
shapes.append(shape)
pages[shape] = [page]
else:
pages[shape].append(page)
series = [Record(pages=pages[s],
axes=(('I' + s[-2])
if len(pages[s]) > 1 else s[-2]),
dtype=numpy.dtype(pages[s][0].dtype),
shape=((len(pages[s]), ) + s[:-2]
if len(pages[s]) > 1 else s[:-2]))
for s in shapes]
# remove empty series, e.g. in MD Gel files
series = [s for s in series if sum(s.shape) > 0]
return series
def asarray(self, key=None, series=None, memmap=False):
"""Return image data from multiple TIFF pages as numpy array.
By default the first image series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages to return as array.
memmap : bool
If True, return an array stored in a binary file on disk
if possible.
"""
if key is None and series is None:
series = 0
if series is not None:
pages = self.series[series].pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, int):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError("key must be an int, slice, or sequence")
if not len(pages):
raise ValueError("no pages selected")
if self.is_nih:
if pages[0].is_palette:
result = stack_pages(pages, colormapped=False, squeeze=False)
result = numpy.take(pages[0].color_map, result, axis=1)
result = numpy.swapaxes(result, 0, 1)
else:
result = stack_pages(pages, memmap=memmap,
colormapped=False, squeeze=False)
elif len(pages) == 1:
return pages[0].asarray(memmap=memmap)
elif self.is_ome:
assert not self.is_palette, "color mapping disabled for ome-tiff"
if any(p is None for p in pages):
# zero out missing pages
firstpage = next(p for p in pages if p)
nopage = numpy.zeros_like(
firstpage.asarray(memmap=False))
s = self.series[series]
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=s.dtype, shape=s.shape)
result = result.reshape(-1)
else:
result = numpy.empty(s.shape, s.dtype).reshape(-1)
index = 0
class KeepOpen:
# keep Tiff files open between consecutive pages
def __init__(self, parent, close):
self.master = parent
self.parent = parent
self._close = close
def open(self, page):
if self._close and page and page.parent != self.parent:
if self.parent != self.master:
self.parent.filehandle.close()
self.parent = page.parent
self.parent.filehandle.open()
def close(self):
if self._close and self.parent != self.master:
self.parent.filehandle.close()
keep = KeepOpen(self, self._multifile_close)
for page in pages:
keep.open(page)
if page:
a = page.asarray(memmap=False, colormapped=False,
reopen=False)
else:
a = nopage
try:
result[index:index + a.size] = a.reshape(-1)
except ValueError as e:
warnings.warn("ome-tiff: %s" % e)
break
index += a.size
keep.close()
else:
result = stack_pages(pages, memmap=memmap)
if key is None:
try:
result.shape = self.series[series].shape
except ValueError:
try:
warnings.warn("failed to reshape %s to %s" % (
result.shape, self.series[series].shape))
# try series of expected shapes
result.shape = (-1,) + self.series[series].shape
except ValueError:
# revert to generic shape
result.shape = (-1,) + pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result
def _omeseries(self):
"""Return image series in OME-TIFF file(s)."""
root = etree.fromstring(self.pages[0].tags['image_description'].value)
uuid = root.attrib.get('UUID', None)
self._files = {uuid: self}
dirname = self._fh.dirname
modulo = {}
result = []
for element in root:
if element.tag.endswith('BinaryOnly'):
warnings.warn("ome-xml: not an ome-tiff master file")
break
if element.tag.endswith('StructuredAnnotations'):
for annot in element:
if not annot.attrib.get('Namespace',
'').endswith('modulo'):
continue
for value in annot:
for modul in value:
for along in modul:
if not along.tag[:-1].endswith('Along'):
continue
axis = along.tag[-1]
newaxis = along.attrib.get('Type', 'other')
newaxis = AXES_LABELS[newaxis]
if 'Start' in along.attrib:
labels = range(
int(along.attrib['Start']),
int(along.attrib['End']) + 1,
int(along.attrib.get('Step', 1)))
else:
labels = [label.text for label in along
if label.tag.endswith('Label')]
modulo[axis] = (newaxis, labels)
if not element.tag.endswith('Image'):
continue
for pixels in element:
if not pixels.tag.endswith('Pixels'):
continue
atr = pixels.attrib
dtype = atr.get('Type', None)
axes = ''.join(reversed(atr['DimensionOrder']))
shape = list(int(atr['Size'+ax]) for ax in axes)
size = product(shape[:-2])
ifds = [None] * size
for data in pixels:
if not data.tag.endswith('TiffData'):
continue
atr = data.attrib
ifd = int(atr.get('IFD', 0))
num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0))
num = int(atr.get('PlaneCount', num))
idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]]
try:
idx = numpy.ravel_multi_index(idx, shape[:-2])
except ValueError:
# ImageJ produces invalid ome-xml when cropping
warnings.warn("ome-xml: invalid TiffData index")
continue
for uuid in data:
if not uuid.tag.endswith('UUID'):
continue
if uuid.text not in self._files:
if not self._multifile:
# abort reading multifile OME series
# and fall back to generic series
return []
fname = uuid.attrib['FileName']
try:
tif = TiffFile(os.path.join(dirname, fname))
except (IOError, ValueError):
tif.close()
warnings.warn(
"ome-xml: failed to read '%s'" % fname)
break
self._files[uuid.text] = tif
if self._multifile_close:
tif.close()
pages = self._files[uuid.text].pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
# only process first uuid
break
else:
pages = self.pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
if all(i is None for i in ifds):
# skip images without data
continue
dtype = next(i for i in ifds if i).dtype
result.append(Record(axes=axes, shape=shape, pages=ifds,
dtype=numpy.dtype(dtype)))
for record in result:
for axis, (newaxis, labels) in modulo.items():
i = record.axes.index(axis)
size = len(labels)
if record.shape[i] == size:
record.axes = record.axes.replace(axis, newaxis, 1)
else:
record.shape[i] //= size
record.shape.insert(i+1, size)
record.axes = record.axes.replace(axis, axis+newaxis, 1)
record.shape = tuple(record.shape)
# squeeze dimensions
for record in result:
record.shape, record.axes = squeeze_axes(record.shape, record.axes)
return result
def __len__(self):
"""Return number of image pages in file."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified page."""
return self.pages[key]
def __iter__(self):
"""Return iterator over pages."""
return iter(self.pages)
def __str__(self):
"""Return string containing information about file."""
result = [
self._fh.name.capitalize(),
format_size(self._fh.size),
{'<': 'little endian', '>': 'big endian'}[self.byteorder]]
if self.is_bigtiff:
result.append("bigtiff")
if len(self.pages) > 1:
result.append("%i pages" % len(self.pages))
if len(self.series) > 1:
result.append("%i series" % len(self.series))
if len(self._files) > 1:
result.append("%i files" % (len(self._files)))
return ", ".join(result)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@lazyattr
def fstat(self):
try:
return os.fstat(self._fh.fileno())
except Exception: # io.UnsupportedOperation
return None
@lazyattr
def is_bigtiff(self):
return self.offset_size != 4
@lazyattr
def is_rgb(self):
return all(p.is_rgb for p in self.pages)
@lazyattr
def is_palette(self):
return all(p.is_palette for p in self.pages)
@lazyattr
def is_mdgel(self):
return any(p.is_mdgel for p in self.pages)
@lazyattr
def is_mediacy(self):
return any(p.is_mediacy for p in self.pages)
@lazyattr
def is_stk(self):
return all(p.is_stk for p in self.pages)
@lazyattr
def is_lsm(self):
return self.pages[0].is_lsm
@lazyattr
def is_imagej(self):
return self.pages[0].is_imagej
@lazyattr
def is_micromanager(self):
return self.pages[0].is_micromanager
@lazyattr
def is_nih(self):
return self.pages[0].is_nih
@lazyattr
def is_fluoview(self):
return self.pages[0].is_fluoview
@lazyattr
def is_ome(self):
return self.pages[0].is_ome
class TiffPage(object):
"""A TIFF image file directory (IFD).
Attributes
----------
index : int
Index of page in file.
dtype : str {TIFF_SAMPLE_DTYPES}
Data type of image, colormapped if applicable.
shape : tuple
Dimensions of the image array in TIFF page,
colormapped and with one alpha channel if applicable.
axes : str
Axes label codes:
'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane,
'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda,
'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime,
'L' exposure, 'V' event, 'Q' unknown, '_' missing
tags : TiffTags
Dictionary of tags in page.
Tag values are also directly accessible as attributes.
color_map : numpy array
Color look up table, if exists.
cz_lsm_scan_info: Record(dict)
LSM scan info attributes, if exists.
imagej_tags: Record(dict)
Consolidated ImageJ description and metadata tags, if exists.
uic_tags: Record(dict)
Consolidated MetaMorph STK/UIC tags, if exists.
All attributes are read-only.
Notes
-----
The internal, normalized '_shape' attribute is 6 dimensional:
0. number planes (stk)
1. planar samples_per_pixel
2. image_depth Z (sgi)
3. image_length Y
4. image_width X
5. contig samples_per_pixel
"""
def __init__(self, parent):
"""Initialize instance from file."""
self.parent = parent
self.index = len(parent.pages)
self.shape = self._shape = ()
self.dtype = self._dtype = None
self.axes = ""
self.tags = TiffTags()
self._fromfile()
self._process_tags()
def _fromfile(self):
"""Read TIFF IFD structure and its tags from file.
File cursor must be at storage position of IFD offset and is left at
offset to next IFD.
Raises StopIteration if offset (first bytes read) is 0.
"""
fh = self.parent.filehandle
byteorder = self.parent.byteorder
offset_size = self.parent.offset_size
fmt = {4: 'I', 8: 'Q'}[offset_size]
offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0]
if not offset:
raise StopIteration()
# read standard tags
tags = self.tags
fh.seek(offset)
fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size]
try:
numtags = struct.unpack(byteorder + fmt, fh.read(size))[0]
except Exception:
warnings.warn("corrupted page list")
raise StopIteration()
tagcode = 0
for _ in range(numtags):
try:
tag = TiffTag(self.parent)
# print(tag)
except TiffTag.Error as e:
warnings.warn(str(e))
continue
if tagcode > tag.code:
# expected for early LSM and tifffile versions
warnings.warn("tags are not ordered by code")
tagcode = tag.code
if tag.name not in tags:
tags[tag.name] = tag
else:
# some files contain multiple IFD with same code
# e.g. MicroManager files contain two image_description
i = 1
while True:
name = "%s_%i" % (tag.name, i)
if name not in tags:
tags[name] = tag
break
pos = fh.tell()
if self.is_lsm or (self.index and self.parent.is_lsm):
# correct non standard LSM bitspersample tags
self.tags['bits_per_sample']._correct_lsm_bitspersample(self)
if self.is_lsm:
# read LSM info subrecords
for name, reader in CZ_LSM_INFO_READERS.items():
try:
offset = self.cz_lsm_info['offset_'+name]
except KeyError:
continue
if offset < 8:
# older LSM revision
continue
fh.seek(offset)
try:
setattr(self, 'cz_lsm_'+name, reader(fh))
except ValueError:
pass
elif self.is_stk and 'uic1tag' in tags and not tags['uic1tag'].value:
# read uic1tag now that plane count is known
uic1tag = tags['uic1tag']
fh.seek(uic1tag.value_offset)
tags['uic1tag'].value = Record(
read_uic1tag(fh, byteorder, uic1tag.dtype, uic1tag.count,
tags['uic2tag'].count))
fh.seek(pos)
def _process_tags(self):
"""Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported.
"""
tags = self.tags
for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
if not (name in tags or default is None):
tags[name] = TiffTag(code, dtype=dtype, count=count,
value=default, name=name)
if name in tags and validate:
try:
if tags[name].count == 1:
setattr(self, name, validate[tags[name].value])
else:
setattr(self, name, tuple(
validate[value] for value in tags[name].value))
except KeyError:
raise ValueError("%s.value (%s) not supported" %
(name, tags[name].value))
tag = tags['bits_per_sample']
if tag.count == 1:
self.bits_per_sample = tag.value
else:
# LSM might list more items than samples_per_pixel
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.bits_per_sample = value
else:
self.bits_per_sample = value[0]
tag = tags['sample_format']
if tag.count == 1:
self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value]
else:
self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
if 'photometric' not in tags:
self.photometric = None
if 'image_depth' not in tags:
self.image_depth = 1
if 'image_length' in tags:
self.strips_per_image = int(math.floor(
float(self.image_length + self.rows_per_strip - 1) /
self.rows_per_strip))
else:
self.strips_per_image = 0
key = (self.sample_format, self.bits_per_sample)
self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
if 'image_length' not in self.tags or 'image_width' not in self.tags:
# some GEL file pages are missing image data
self.image_length = 0
self.image_width = 0
self.image_depth = 0
self.strip_offsets = 0
self._shape = ()
self.shape = ()
self.axes = ''
if self.is_palette:
self.dtype = self.tags['color_map'].dtype[1]
self.color_map = numpy.array(self.color_map, self.dtype)
dmax = self.color_map.max()
if dmax < 256:
self.dtype = numpy.uint8
self.color_map = self.color_map.astype(self.dtype)
#else:
# self.dtype = numpy.uint8
# self.color_map >>= 8
# self.color_map = self.color_map.astype(self.dtype)
self.color_map.shape = (3, -1)
# determine shape of data
image_length = self.image_length
image_width = self.image_width
image_depth = self.image_depth
samples_per_pixel = self.samples_per_pixel
if self.is_stk:
assert self.image_depth == 1
planes = self.tags['uic2tag'].count
if self.is_contig:
self._shape = (planes, 1, 1, image_length, image_width,
samples_per_pixel)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, image_length, image_width,
samples_per_pixel)
self.axes = 'YXS'
else:
self._shape = (planes, samples_per_pixel, 1, image_length,
image_width, 1)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, samples_per_pixel, image_length,
image_width)
self.axes = 'SYX'
# detect type of series
if planes == 1:
self.shape = self.shape[1:]
elif numpy.all(self.uic2tag.z_distance != 0):
self.axes = 'Z' + self.axes
elif numpy.all(numpy.diff(self.uic2tag.time_created) != 0):
self.axes = 'T' + self.axes
else:
self.axes = 'I' + self.axes
# DISABLED
if self.is_palette:
assert False, "color mapping disabled for stk"
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, planes, image_length, image_width)
else:
self.shape = (3, planes, image_depth, image_length,
image_width)
self.axes = 'C' + self.axes
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
elif self.is_palette:
samples = 1
if 'extra_samples' in self.tags:
samples += len(self.extra_samples)
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples)
else:
self._shape = (1, samples, image_depth, image_length,
image_width, 1)
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, image_length, image_width)
self.axes = 'CYX'
else:
self.shape = (3, image_depth, image_length, image_width)
self.axes = 'CZYX'
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
elif self.is_rgb or samples_per_pixel > 1:
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples_per_pixel)
if image_depth == 1:
self.shape = (image_length, image_width, samples_per_pixel)
self.axes = 'YXS'
else:
self.shape = (image_depth, image_length, image_width,
samples_per_pixel)
self.axes = 'ZYXS'
else:
self._shape = (1, samples_per_pixel, image_depth,
image_length, image_width, 1)
if image_depth == 1:
self.shape = (samples_per_pixel, image_length, image_width)
self.axes = 'SYX'
else:
self.shape = (samples_per_pixel, image_depth,
image_length, image_width)
self.axes = 'SZYX'
if False and self.is_rgb and 'extra_samples' in self.tags:
# DISABLED: only use RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for exs in extra_samples:
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
self.shape = self.shape[:-1] + (4,)
else:
self.shape = (4,) + self.shape[1:]
break
else:
self._shape = (1, 1, image_depth, image_length, image_width, 1)
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
if not self.compression and 'strip_byte_counts' not in tags:
self.strip_byte_counts = (
product(self.shape) * (self.bits_per_sample // 8), )
assert len(self.shape) == len(self.axes)
def asarray(self, squeeze=True, colormapped=True, rgbonly=False,
scale_mdgel=False, memmap=False, reopen=True):
"""Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
If any of 'squeeze', 'colormapped', or 'rgbonly' are not the default,
the shape of the returned array might be different from the page shape.
Parameters
----------
squeeze : bool
If True, all length-1 dimensions (except X and Y) are
squeezed out from result.
colormapped : bool
If True, color mapping is applied for palette-indexed images.
rgbonly : bool
If True, return RGB(A) image without additional extra samples.
memmap : bool
If True, use numpy.memmap to read arrays from file if possible.
For use on 64 bit systems and files with few huge contiguous data.
reopen : bool
If True and the parent file handle is closed, the file is
temporarily re-opened (and closed if no exception occurs).
scale_mdgel : bool
If True, MD Gel data will be scaled according to the private
metadata in the second TIFF page. The dtype will be float32.
"""
if not self._shape:
return
if self.dtype is None:
raise ValueError("data type not supported: %s%i" % (
self.sample_format, self.bits_per_sample))
if self.compression not in TIFF_DECOMPESSORS:
raise ValueError("cannot decompress %s" % self.compression)
tag = self.tags['sample_format']
if tag.count != 1 and any((i-tag.value[0] for i in tag.value)):
raise ValueError("sample formats don't match %s" % str(tag.value))
fh = self.parent.filehandle
closed = fh.closed
if closed:
if reopen:
fh.open()
else:
raise IOError("file handle is closed")
dtype = self._dtype
shape = self._shape
image_width = self.image_width
image_length = self.image_length
image_depth = self.image_depth
typecode = self.parent.byteorder + dtype
bits_per_sample = self.bits_per_sample
if self.is_tiled:
if 'tile_offsets' in self.tags:
byte_counts = self.tile_byte_counts
offsets = self.tile_offsets
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
tile_width = self.tile_width
tile_length = self.tile_length
tile_depth = self.tile_depth if 'tile_depth' in self.tags else 1
tw = (image_width + tile_width - 1) // tile_width
tl = (image_length + tile_length - 1) // tile_length
td = (image_depth + tile_depth - 1) // tile_depth
shape = (shape[0], shape[1],
td*tile_depth, tl*tile_length, tw*tile_width, shape[-1])
tile_shape = (tile_depth, tile_length, tile_width, shape[-1])
runlen = tile_width
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
runlen = image_width
if any(o < 2 for o in offsets):
raise ValueError("corrupted page")
if memmap and self._is_memmappable(rgbonly, colormapped):
result = fh.memmap_array(typecode, shape, offset=offsets[0])
elif self.is_contiguous:
fh.seek(offsets[0])
result = fh.read_array(typecode, product(shape))
result = result.astype('=' + dtype)
else:
if self.is_contig:
runlen *= self.samples_per_pixel
if bits_per_sample in (8, 16, 32, 64, 128):
if (bits_per_sample * runlen) % 8:
raise ValueError("data and sample size mismatch")
def unpack(x):
try:
return numpy.fromstring(x, typecode)
except ValueError as e:
# strips may be missing EOI
warnings.warn("unpack: %s" % e)
xlen = ((len(x) // (bits_per_sample // 8))
* (bits_per_sample // 8))
return numpy.fromstring(x[:xlen], typecode)
elif isinstance(bits_per_sample, tuple):
def unpack(x):
return unpackrgb(x, typecode, bits_per_sample)
else:
def unpack(x):
return unpackints(x, typecode, bits_per_sample, runlen)
decompress = TIFF_DECOMPESSORS[self.compression]
if self.compression == 'jpeg':
table = self.jpeg_tables if 'jpeg_tables' in self.tags else b''
decompress = lambda x: decodejpg(x, table, self.photometric)
if self.is_tiled:
result = numpy.empty(shape, dtype)
tw, tl, td, pl = 0, 0, 0, 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
tile = unpack(decompress(fh.read(bytecount)))
tile.shape = tile_shape
if self.predictor == 'horizontal':
numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile)
result[0, pl, td:td+tile_depth,
tl:tl+tile_length, tw:tw+tile_width, :] = tile
del tile
tw += tile_width
if tw >= shape[4]:
tw, tl = 0, tl + tile_length
if tl >= shape[3]:
tl, td = 0, td + tile_depth
if td >= shape[2]:
td, pl = 0, pl + 1
result = result[...,
:image_depth, :image_length, :image_width, :]
else:
strip_size = (self.rows_per_strip * self.image_width *
self.samples_per_pixel)
result = numpy.empty(shape, dtype).reshape(-1)
index = 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
strip = fh.read(bytecount)
strip = decompress(strip)
strip = unpack(strip)
size = min(result.size, strip.size, strip_size,
result.size - index)
result[index:index+size] = strip[:size]
del strip
index += size
result.shape = self._shape
if self.predictor == 'horizontal' and not (self.is_tiled and not
self.is_contiguous):
# work around bug in LSM510 software
if not (self.parent.is_lsm and not self.compression):
numpy.cumsum(result, axis=-2, dtype=dtype, out=result)
if colormapped and self.is_palette:
if self.color_map.shape[1] >= 2**bits_per_sample:
# FluoView and LSM might fail here
result = numpy.take(self.color_map,
result[:, 0, :, :, :, 0], axis=1)
elif rgbonly and self.is_rgb and 'extra_samples' in self.tags:
# return only RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for i, exs in enumerate(extra_samples):
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
result = result[..., [0, 1, 2, 3+i]]
else:
result = result[:, [0, 1, 2, 3+i]]
break
else:
if self.is_contig:
result = result[..., :3]
else:
result = result[:, :3]
if squeeze:
try:
result.shape = self.shape
except ValueError:
warnings.warn("failed to reshape from %s to %s" % (
str(result.shape), str(self.shape)))
if scale_mdgel and self.parent.is_mdgel:
# MD Gel stores private metadata in the second page
tags = self.parent.pages[1]
if tags.md_file_tag in (2, 128):
scale = tags.md_scale_pixel
scale = scale[0] / scale[1] # rational
result = result.astype('float32')
if tags.md_file_tag == 2:
result **= 2 # squary root data format
result *= scale
if closed:
# TODO: file remains open if an exception occurred above
fh.close()
return result
def _is_memmappable(self, rgbonly, colormapped):
"""Return if image data in file can be memory mapped."""
if not self.parent.filehandle.is_file or not self.is_contiguous:
return False
return not (self.predictor or
(rgbonly and 'extra_samples' in self.tags) or
(colormapped and self.is_palette) or
({'big': '>', 'little': '<'}[sys.byteorder] !=
self.parent.byteorder))
@lazyattr
def is_contiguous(self):
"""Return offset and size of contiguous data, else None.
Excludes prediction and colormapping.
"""
if self.compression or self.bits_per_sample not in (8, 16, 32, 64):
return
if self.is_tiled:
if (self.image_width != self.tile_width or
self.image_length % self.tile_length or
self.tile_width % 16 or self.tile_length % 16):
return
if ('image_depth' in self.tags and 'tile_depth' in self.tags and
(self.image_length != self.tile_length or
self.image_depth % self.tile_depth)):
return
offsets = self.tile_offsets
byte_counts = self.tile_byte_counts
else:
offsets = self.strip_offsets
byte_counts = self.strip_byte_counts
if len(offsets) == 1:
return offsets[0], byte_counts[0]
if self.is_stk or all(offsets[i] + byte_counts[i] == offsets[i+1]
or byte_counts[i+1] == 0 # no data/ignore offset
for i in range(len(offsets)-1)):
return offsets[0], sum(byte_counts)
def __str__(self):
"""Return string containing information about page."""
s = ', '.join(s for s in (
' x '.join(str(i) for i in self.shape),
str(numpy.dtype(self.dtype)),
'%s bit' % str(self.bits_per_sample),
self.photometric if 'photometric' in self.tags else '',
self.compression if self.compression else 'raw',
'|'.join(t[3:] for t in (
'is_stk', 'is_lsm', 'is_nih', 'is_ome', 'is_imagej',
'is_micromanager', 'is_fluoview', 'is_mdgel', 'is_mediacy',
'is_sgi', 'is_reduced', 'is_tiled',
'is_contiguous') if getattr(self, t))) if s)
return "Page %i: %s" % (self.index, s)
def __getattr__(self, name):
"""Return tag value."""
if name in self.tags:
value = self.tags[name].value
setattr(self, name, value)
return value
raise AttributeError(name)
@lazyattr
def uic_tags(self):
"""Consolidate UIC tags."""
if not self.is_stk:
raise AttributeError("uic_tags")
tags = self.tags
result = Record()
result.number_planes = tags['uic2tag'].count
if 'image_description' in tags:
result.plane_descriptions = self.image_description.split(b'\x00')
if 'uic1tag' in tags:
result.update(tags['uic1tag'].value)
if 'uic3tag' in tags:
result.update(tags['uic3tag'].value) # wavelengths
if 'uic4tag' in tags:
result.update(tags['uic4tag'].value) # override uic1 tags
uic2tag = tags['uic2tag'].value
result.z_distance = uic2tag.z_distance
result.time_created = uic2tag.time_created
result.time_modified = uic2tag.time_modified
try:
result.datetime_created = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_created, uic2tag.time_created)]
result.datetime_modified = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_modified, uic2tag.time_modified)]
except ValueError as e:
warnings.warn("uic_tags: %s" % e)
return result
@lazyattr
def imagej_tags(self):
"""Consolidate ImageJ metadata."""
if not self.is_imagej:
raise AttributeError("imagej_tags")
tags = self.tags
if 'image_description_1' in tags:
# MicroManager
result = imagej_description(tags['image_description_1'].value)
else:
result = imagej_description(tags['image_description'].value)
if 'imagej_metadata' in tags:
try:
result.update(imagej_metadata(
tags['imagej_metadata'].value,
tags['imagej_byte_counts'].value,
self.parent.byteorder))
except Exception as e:
warnings.warn(str(e))
return Record(result)
@lazyattr
def is_rgb(self):
"""True if page contains a RGB image."""
return ('photometric' in self.tags and
self.tags['photometric'].value == 2)
@lazyattr
def is_contig(self):
"""True if page contains a contiguous image."""
return ('planar_configuration' in self.tags and
self.tags['planar_configuration'].value == 1)
@lazyattr
def is_palette(self):
"""True if page contains a palette-colored image and not OME or STK."""
try:
# turn off color mapping for OME-TIFF and STK
if self.is_stk or self.is_ome or self.parent.is_ome:
return False
except IndexError:
pass # OME-XML not found in first page
return ('photometric' in self.tags and
self.tags['photometric'].value == 3)
@lazyattr
def is_tiled(self):
"""True if page contains tiled image."""
return 'tile_width' in self.tags
@lazyattr
def is_reduced(self):
"""True if page is a reduced image of another image."""
return bool(self.tags['new_subfile_type'].value & 1)
@lazyattr
def is_mdgel(self):
"""True if page contains md_file_tag tag."""
return 'md_file_tag' in self.tags
@lazyattr
def is_mediacy(self):
"""True if page contains Media Cybernetics Id tag."""
return ('mc_id' in self.tags and
self.tags['mc_id'].value.startswith(b'MC TIFF'))
@lazyattr
def is_stk(self):
"""True if page contains UIC2Tag tag."""
return 'uic2tag' in self.tags
@lazyattr
def is_lsm(self):
"""True if page contains LSM CZ_LSM_INFO tag."""
return 'cz_lsm_info' in self.tags
@lazyattr
def is_fluoview(self):
"""True if page contains FluoView MM_STAMP tag."""
return 'mm_stamp' in self.tags
@lazyattr
def is_nih(self):
"""True if page contains NIH image header."""
return 'nih_image_header' in self.tags
@lazyattr
def is_sgi(self):
"""True if page contains SGI image and tile depth tags."""
return 'image_depth' in self.tags and 'tile_depth' in self.tags
@lazyattr
def is_ome(self):
"""True if page contains OME-XML in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'<?xml version='))
@lazyattr
def is_shaped(self):
"""True if page contains shape in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'shape=('))
@lazyattr
def is_imagej(self):
"""True if page contains ImageJ description."""
return (
('image_description' in self.tags and
self.tags['image_description'].value.startswith(b'ImageJ=')) or
('image_description_1' in self.tags and # Micromanager
self.tags['image_description_1'].value.startswith(b'ImageJ=')))
@lazyattr
def is_micromanager(self):
"""True if page contains Micro-Manager metadata."""
return 'micromanager_metadata' in self.tags
class TiffTag(object):
"""A TIFF tag structure.
Attributes
----------
name : string
Attribute name of tag.
code : int
Decimal code of tag.
dtype : str
Datatype of tag data. One of TIFF_DATA_TYPES.
count : int
Number of values.
value : various types
Tag data as Python object.
value_offset : int
Location of value in file, if any.
All attributes are read-only.
"""
__slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset',
'_offset', '_value', '_type')
class Error(Exception):
pass
def __init__(self, arg, **kwargs):
"""Initialize instance from file or arguments."""
self._offset = None
if hasattr(arg, '_fh'):
self._fromfile(arg, **kwargs)
else:
self._fromdata(arg, **kwargs)
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value
self._value = value
self._type = dtype
def _fromfile(self, parent):
"""Read tag structure from open file. Advance file cursor."""
fh = parent.filehandle
byteorder = parent.byteorder
self._offset = fh.tell()
self.value_offset = self._offset + parent.offset_size + 4
fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size]
data = fh.read(size)
code, dtype = struct.unpack(byteorder + fmt[:2], data[:4])
count, value = struct.unpack(byteorder + fmt[2:], data[4:])
self._value = value
self._type = dtype
if code in TIFF_TAGS:
name = TIFF_TAGS[code][0]
elif code in CUSTOM_TAGS:
name = CUSTOM_TAGS[code][0]
else:
name = str(code)
try:
dtype = TIFF_DATA_TYPES[self._type]
except KeyError:
raise TiffTag.Error("unknown tag data type %i" % self._type)
fmt = '%s%i%s' % (byteorder, count*int(dtype[0]), dtype[1])
size = struct.calcsize(fmt)
if size > parent.offset_size or code in CUSTOM_TAGS:
pos = fh.tell()
tof = {4: 'I', 8: 'Q'}[parent.offset_size]
self.value_offset = offset = struct.unpack(byteorder+tof, value)[0]
if offset < 0 or offset > parent.filehandle.size:
raise TiffTag.Error("corrupt file - invalid tag value offset")
elif offset < 4:
raise TiffTag.Error("corrupt value offset for tag %i" % code)
fh.seek(offset)
if code in CUSTOM_TAGS:
readfunc = CUSTOM_TAGS[code][1]
value = readfunc(fh, byteorder, dtype, count)
if isinstance(value, dict): # numpy.core.records.record
value = Record(value)
elif code in TIFF_TAGS or dtype[-1] == 's':
value = struct.unpack(fmt, fh.read(size))
else:
value = read_numpy(fh, byteorder, dtype, count)
fh.seek(pos)
else:
value = struct.unpack(fmt, value[:size])
if code not in CUSTOM_TAGS and code not in (273, 279, 324, 325):
# scalar value if not strip/tile offsets/byte_counts
if len(value) == 1:
value = value[0]
if (dtype.endswith('s') and isinstance(value, bytes)
and self._type != 7):
# TIFF ASCII fields can contain multiple strings,
# each terminated with a NUL
value = stripascii(value)
self.code = code
self.name = name
self.dtype = dtype
self.count = count
self.value = value
def _correct_lsm_bitspersample(self, parent):
"""Correct LSM bitspersample tag.
Old LSM writers may use a separate region for two 16-bit values,
although they fit into the tag value element of the tag.
"""
if self.code == 258 and self.count == 2:
# TODO: test this. Need example file.
warnings.warn("correcting LSM bitspersample tag")
fh = parent.filehandle
tof = {4: '<I', 8: '<Q'}[parent.offset_size]
self.value_offset = struct.unpack(tof, self._value)[0]
fh.seek(self.value_offset)
self.value = struct.unpack("<HH", fh.read(4))
def as_str(self):
"""Return value as human readable string."""
return ((str(self.value).split('\n', 1)[0]) if (self._type != 7)
else '<undefined>')
def __str__(self):
"""Return string containing information about tag."""
return ' '.join(str(getattr(self, s)) for s in self.__slots__)
class TiffSequence(object):
"""Sequence of image files.
The data shape and dtype of all files must match.
Properties
----------
files : list
List of file names.
shape : tuple
Shape of image sequence.
axes : str
Labels of axes in shape.
Examples
--------
>>> tifs = TiffSequence("test.oif.files/*.tif")
>>> tifs.shape, tifs.axes
((2, 100), 'CT')
>>> data = tifs.asarray()
>>> data.shape
(2, 100, 256, 256)
"""
_patterns = {
'axes': r"""
# matches Olympus OIF and Leica TIFF series
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
"""}
class ParseError(Exception):
pass
def __init__(self, files, imread=TiffFile, pattern='axes',
*args, **kwargs):
"""Initialize instance from multiple files.
Parameters
----------
files : str, or sequence of str
Glob pattern or sequence of file names.
imread : function or class
Image read function or class with asarray function returning numpy
array from single file.
pattern : str
Regular expression pattern that matches axes names and sequence
indices in file names.
By default this matches Olympus OIF and Leica TIFF series.
"""
if isinstance(files, basestring):
files = natural_sorted(glob.glob(files))
files = list(files)
if not files:
raise ValueError("no files found")
#if not os.path.isfile(files[0]):
# raise ValueError("file not found")
self.files = files
if hasattr(imread, 'asarray'):
# redefine imread
_imread = imread
def imread(fname, *args, **kwargs):
with _imread(fname) as im:
return im.asarray(*args, **kwargs)
self.imread = imread
self.pattern = self._patterns.get(pattern, pattern)
try:
self._parse()
if not self.axes:
self.axes = 'I'
except self.ParseError:
self.axes = 'I'
self.shape = (len(files),)
self._start_index = (0,)
self._indices = tuple((i,) for i in range(len(files)))
def __str__(self):
"""Return string with information about image sequence."""
return "\n".join([
self.files[0],
'* files: %i' % len(self.files),
'* axes: %s' % self.axes,
'* shape: %s' % str(self.shape)])
def __len__(self):
return len(self.files)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
pass
def asarray(self, memmap=False, *args, **kwargs):
"""Read image data from all files and return as single numpy array.
If memmap is True, return an array stored in a binary file on disk.
The args and kwargs parameters are passed to the imread function.
Raise IndexError or ValueError if image shapes don't match.
"""
im = self.imread(self.files[0], *args, **kwargs)
shape = self.shape + im.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=im.dtype, shape=shape)
else:
result = numpy.zeros(shape, dtype=im.dtype)
result = result.reshape(-1, *im.shape)
for index, fname in zip(self._indices, self.files):
index = [i-j for i, j in zip(index, self._start_index)]
index = numpy.ravel_multi_index(index, self.shape)
im = self.imread(fname, *args, **kwargs)
result[index] = im
result.shape = shape
return result
def _parse(self):
"""Get axes and shape from file names."""
if not self.pattern:
raise self.ParseError("invalid pattern")
pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE)
matches = pattern.findall(self.files[0])
if not matches:
raise self.ParseError("pattern doesn't match file names")
matches = matches[-1]
if len(matches) % 2:
raise self.ParseError("pattern doesn't match axis name and index")
axes = ''.join(m for m in matches[::2] if m)
if not axes:
raise self.ParseError("pattern doesn't match file names")
indices = []
for fname in self.files:
matches = pattern.findall(fname)[-1]
if axes != ''.join(m for m in matches[::2] if m):
raise ValueError("axes don't match within the image sequence")
indices.append([int(m) for m in matches[1::2] if m])
shape = tuple(numpy.max(indices, axis=0))
start_index = tuple(numpy.min(indices, axis=0))
shape = tuple(i-j+1 for i, j in zip(shape, start_index))
if product(shape) != len(self.files):
warnings.warn("files are missing. Missing data are zeroed")
self.axes = axes.upper()
self.shape = shape
self._indices = indices
self._start_index = start_index
class Record(dict):
"""Dictionary with attribute access.
Can also be initialized with numpy.core.records.record.
"""
__slots__ = ()
def __init__(self, arg=None, **kwargs):
if kwargs:
arg = kwargs
elif arg is None:
arg = {}
try:
dict.__init__(self, arg)
except (TypeError, ValueError):
for i, name in enumerate(arg.dtype.names):
v = arg[i]
self[name] = v if v.dtype.char != 'S' else stripnull(v)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __str__(self):
"""Pretty print Record."""
s = []
lists = []
for k in sorted(self):
try:
if k.startswith('_'): # does not work with byte
continue
except AttributeError:
pass
v = self[k]
if isinstance(v, (list, tuple)) and len(v):
if isinstance(v[0], Record):
lists.append((k, v))
continue
elif isinstance(v[0], TiffPage):
v = [i.index for i in v if i]
s.append(
("* %s: %s" % (k, str(v))).split("\n", 1)[0]
[:PRINT_LINE_LEN].rstrip())
for k, v in lists:
l = []
for i, w in enumerate(v):
l.append("* %s[%i]\n %s" % (k, i,
str(w).replace("\n", "\n ")))
s.append('\n'.join(l))
return '\n'.join(s)
class TiffTags(Record):
"""Dictionary of TiffTag with attribute access."""
def __str__(self):
"""Return string with information about all tags."""
s = []
for tag in sorted(self.values(), key=lambda x: x.code):
typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1])
line = "* %i %s (%s) %s" % (
tag.code, tag.name, typecode, tag.as_str())
s.append(line[:PRINT_LINE_LEN].lstrip())
return '\n'.join(s)
class FileHandle(object):
"""Binary file handle.
* Handle embedded files (for CZI within CZI files).
* Allow to re-open closed files (for multi file formats such as OME-TIFF).
* Read numpy arrays and records from file like objects.
Only binary read, seek, tell, and close are supported on embedded files.
When initialized from another file handle, do not use it unless this
FileHandle is closed.
Attributes
----------
name : str
Name of the file.
path : str
Absolute path to file.
size : int
Size of file in bytes.
is_file : bool
If True, file has a filno and can be memory mapped.
All attributes are read-only.
"""
__slots__ = ('_fh', '_arg', '_mode', '_name', '_dir',
'_offset', '_size', '_close', 'is_file')
def __init__(self, arg, mode='rb', name=None, offset=None, size=None):
"""Initialize file handle from file name or another file handle.
Parameters
----------
arg : str, File, or FileHandle
File name or open file handle.
mode : str
File open mode in case 'arg' is a file name.
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
"""
self._fh = None
self._arg = arg
self._mode = mode
self._name = name
self._dir = ''
self._offset = offset
self._size = size
self._close = True
self.is_file = False
self.open()
def open(self):
"""Open or re-open file."""
if self._fh:
return # file is open
if isinstance(self._arg, basestring):
# file name
self._arg = os.path.abspath(self._arg)
self._dir, self._name = os.path.split(self._arg)
self._fh = open(self._arg, self._mode)
self._close = True
if self._offset is None:
self._offset = 0
elif isinstance(self._arg, FileHandle):
# FileHandle
self._fh = self._arg._fh
if self._offset is None:
self._offset = 0
self._offset += self._arg._offset
self._close = False
if not self._name:
if self._offset:
name, ext = os.path.splitext(self._arg._name)
self._name = "%s@%i%s" % (name, self._offset, ext)
else:
self._name = self._arg._name
self._dir = self._arg._dir
else:
# open file object
self._fh = self._arg
if self._offset is None:
self._offset = self._arg.tell()
self._close = False
if not self._name:
try:
self._dir, self._name = os.path.split(self._fh.name)
except AttributeError:
self._name = "Unnamed stream"
if self._offset:
self._fh.seek(self._offset)
if self._size is None:
pos = self._fh.tell()
self._fh.seek(self._offset, 2)
self._size = self._fh.tell()
self._fh.seek(pos)
try:
self._fh.fileno()
self.is_file = True
except Exception:
self.is_file = False
def read(self, size=-1):
"""Read 'size' bytes from file, or until EOF is reached."""
if size < 0 and self._offset:
size = self._size
return self._fh.read(size)
def memmap_array(self, dtype, shape, offset=0, mode='r', order='C'):
"""Return numpy.memmap of data stored in file."""
if not self.is_file:
raise ValueError("Can not memory map file without fileno.")
return numpy.memmap(self._fh, dtype=dtype, mode=mode,
offset=self._offset + offset,
shape=shape, order=order)
def read_array(self, dtype, count=-1, sep=""):
"""Return numpy array from file.
Work around numpy issue #2230, "numpy.fromfile does not accept
StringIO object" https://github.com/numpy/numpy/issues/2230.
"""
try:
return numpy.fromfile(self._fh, dtype, count, sep)
except IOError:
if count < 0:
size = self._size
else:
size = count * numpy.dtype(dtype).itemsize
data = self._fh.read(size)
return numpy.fromstring(data, dtype, count, sep)
def read_record(self, dtype, shape=1, byteorder=None):
"""Return numpy record from file."""
try:
rec = numpy.rec.fromfile(self._fh, dtype, shape,
byteorder=byteorder)
except Exception:
dtype = numpy.dtype(dtype)
if shape is None:
shape = self._size // dtype.itemsize
size = product(sequence(shape)) * dtype.itemsize
data = self._fh.read(size)
return numpy.rec.fromstring(data, dtype, shape,
byteorder=byteorder)
return rec[0] if shape == 1 else rec
def tell(self):
"""Return file's current position."""
return self._fh.tell() - self._offset
def seek(self, offset, whence=0):
"""Set file's current position."""
if self._offset:
if whence == 0:
self._fh.seek(self._offset + offset, whence)
return
elif whence == 2:
self._fh.seek(self._offset + self._size + offset, 0)
return
self._fh.seek(offset, whence)
def close(self):
"""Close file."""
if self._close and self._fh:
self._fh.close()
self._fh = None
self.is_file = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getattr__(self, name):
"""Return attribute from underlying file object."""
if self._offset:
warnings.warn(
"FileHandle: '%s' not implemented for embedded files" % name)
return getattr(self._fh, name)
@property
def name(self):
return self._name
@property
def dirname(self):
return self._dir
@property
def path(self):
return os.path.join(self._dir, self._name)
@property
def size(self):
return self._size
@property
def closed(self):
return self._fh is None
def read_bytes(fh, byteorder, dtype, count):
"""Read tag data from file and return as byte string."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count).tostring()
def read_numpy(fh, byteorder, dtype, count):
"""Read tag data from file and return as numpy array."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count)
def read_json(fh, byteorder, dtype, count):
"""Read JSON tag data from file and return as object."""
data = fh.read(count)
try:
return json.loads(unicode(stripnull(data), 'utf-8'))
except ValueError:
warnings.warn("invalid JSON `%s`" % data)
def read_mm_header(fh, byteorder, dtype, count):
"""Read MM_HEADER tag from file and return as numpy.rec.array."""
return fh.read_record(MM_HEADER, byteorder=byteorder)
def read_mm_stamp(fh, byteorder, dtype, count):
"""Read MM_STAMP tag from file and return as numpy.array."""
return fh.read_array(byteorder+'f8', 8)
def read_uic1tag(fh, byteorder, dtype, count, plane_count=None):
"""Read MetaMorph STK UIC1Tag from file and return as dictionary.
Return empty dictionary if plane_count is unknown.
"""
assert dtype in ('2I', '1I') and byteorder == '<'
result = {}
if dtype == '2I':
# pre MetaMorph 2.5 (not tested)
values = fh.read_array('<u4', 2*count).reshape(count, 2)
result = {'z_distance': values[:, 0] / values[:, 1]}
elif plane_count:
for i in range(count):
tagid = struct.unpack('<I', fh.read(4))[0]
if tagid in (28, 29, 37, 40, 41):
# silently skip unexpected tags
fh.read(4)
continue
name, value = read_uic_tag(fh, tagid, plane_count, offset=True)
result[name] = value
return result
def read_uic2tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC2Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 6*plane_count).reshape(plane_count, 6)
return {
'z_distance': values[:, 0] / values[:, 1],
'date_created': values[:, 2], # julian days
'time_created': values[:, 3], # milliseconds
'date_modified': values[:, 4], # julian days
'time_modified': values[:, 5], # milliseconds
}
def read_uic3tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC3Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 2*plane_count).reshape(plane_count, 2)
return {'wavelengths': values[:, 0] / values[:, 1]}
def read_uic4tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC4Tag from file and return as dictionary."""
assert dtype == '1I' and byteorder == '<'
result = {}
while True:
tagid = struct.unpack('<H', fh.read(2))[0]
if tagid == 0:
break
name, value = read_uic_tag(fh, tagid, plane_count, offset=False)
result[name] = value
return result
def read_uic_tag(fh, tagid, plane_count, offset):
"""Read a single UIC tag value from file and return tag name and value.
UIC1Tags use an offset.
"""
def read_int(count=1):
value = struct.unpack('<%iI' % count, fh.read(4*count))
return value[0] if count == 1 else value
try:
name, dtype = UIC_TAGS[tagid]
except KeyError:
# unknown tag
return '_tagid_%i' % tagid, read_int()
if offset:
pos = fh.tell()
if dtype not in (int, None):
off = read_int()
if off < 8:
warnings.warn("invalid offset for uic tag '%s': %i"
% (name, off))
return name, off
fh.seek(off)
if dtype is None:
# skip
name = '_' + name
value = read_int()
elif dtype is int:
# int
value = read_int()
elif dtype is Fraction:
# fraction
value = read_int(2)
value = value[0] / value[1]
elif dtype is julian_datetime:
# datetime
value = julian_datetime(*read_int(2))
elif dtype is read_uic_image_property:
# ImagePropertyEx
value = read_uic_image_property(fh)
elif dtype is str:
# pascal string
size = read_int()
if 0 <= size < 2**10:
value = struct.unpack('%is' % size, fh.read(size))[0][:-1]
value = stripnull(value)
elif offset:
value = ''
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
elif dtype == '%ip':
# sequence of pascal strings
value = []
for i in range(plane_count):
size = read_int()
if 0 <= size < 2**10:
string = struct.unpack('%is' % size, fh.read(size))[0][:-1]
string = stripnull(string)
value.append(string)
elif offset:
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
else:
# struct or numpy type
dtype = '<' + dtype
if '%i' in dtype:
dtype = dtype % plane_count
if '(' in dtype:
# numpy type
value = fh.read_array(dtype, 1)[0]
if value.shape[-1] == 2:
# assume fractions
value = value[..., 0] / value[..., 1]
else:
# struct format
value = struct.unpack(dtype, fh.read(struct.calcsize(dtype)))
if len(value) == 1:
value = value[0]
if offset:
fh.seek(pos + 4)
return name, value
def read_uic_image_property(fh):
"""Read UIC ImagePropertyEx tag from file and return as dict."""
# TODO: test this
size = struct.unpack('B', fh.read(1))[0]
name = struct.unpack('%is' % size, fh.read(size))[0][:-1]
flags, prop = struct.unpack('<IB', fh.read(5))
if prop == 1:
value = struct.unpack('II', fh.read(8))
value = value[0] / value[1]
else:
size = struct.unpack('B', fh.read(1))[0]
value = struct.unpack('%is' % size, fh.read(size))[0]
return dict(name=name, flags=flags, value=value)
def read_cz_lsm_info(fh, byteorder, dtype, count):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError("not a valid CS_LSM_INFO structure")
fh.seek(-8, 1)
if structure_size < numpy.dtype(CZ_LSM_INFO).itemsize:
# adjust structure according to structure_size
cz_lsm_info = []
size = 0
for name, dtype in CZ_LSM_INFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
cz_lsm_info.append((name, dtype))
else:
cz_lsm_info = CZ_LSM_INFO
return fh.read_record(cz_lsm_info, byteorder=byteorder)
def read_cz_lsm_floatpairs(fh):
"""Read LSM sequence of float pairs from file and return as list."""
size = struct.unpack('<i', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_positions(fh):
"""Read LSM positions from file and return as list."""
size = struct.unpack('<I', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_time_stamps(fh):
"""Read LSM time stamps from file and return as list."""
size, count = struct.unpack('<ii', fh.read(8))
if size != (8 + 8 * count):
raise ValueError("lsm_time_stamps block is too short")
# return struct.unpack('<%dd' % count, fh.read(8*count))
return fh.read_array('<f8', count=count)
def read_cz_lsm_event_list(fh):
"""Read LSM events from file and return as list of (time, type, text)."""
count = struct.unpack('<II', fh.read(8))[1]
events = []
while count > 0:
esize, etime, etype = struct.unpack('<IdI', fh.read(16))
etext = stripnull(fh.read(esize - 16))
events.append((etime, etype, etext))
count -= 1
return events
def read_cz_lsm_scan_info(fh):
"""Read LSM scan information from file and return as Record."""
block = Record()
blocks = [block]
unpack = struct.unpack
if 0x10000000 != struct.unpack('<I', fh.read(4))[0]:
# not a Recording sub block
raise ValueError("not a lsm_scan_info structure")
fh.read(8)
while True:
entry, dtype, size = unpack('<III', fh.read(12))
if dtype == 2:
# ascii
value = stripnull(fh.read(size))
elif dtype == 4:
# long
value = unpack('<i', fh.read(4))[0]
elif dtype == 5:
# rational
value = unpack('<d', fh.read(8))[0]
else:
value = 0
if entry in CZ_LSM_SCAN_INFO_ARRAYS:
blocks.append(block)
name = CZ_LSM_SCAN_INFO_ARRAYS[entry]
newobj = []
setattr(block, name, newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_STRUCTS:
blocks.append(block)
newobj = Record()
block.append(newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES:
name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry]
setattr(block, name, value)
elif entry == 0xffffffff:
# end sub block
block = blocks.pop()
else:
# unknown entry
setattr(block, "entry_0x%x" % entry, value)
if not blocks:
break
return block
def read_nih_image_header(fh, byteorder, dtype, count):
"""Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array."""
a = fh.read_record(NIH_IMAGE_HEADER, byteorder=byteorder)
a = a.newbyteorder(byteorder)
a.xunit = a.xunit[:a._xunit_len]
a.um = a.um[:a._um_len]
return a
def read_micromanager_metadata(fh):
"""Read MicroManager non-TIFF settings from open file and return as dict.
The settings can be used to read image data without parsing the TIFF file.
Raise ValueError if file does not contain valid MicroManager metadata.
"""
fh.seek(0)
try:
byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)]
except IndexError:
raise ValueError("not a MicroManager TIFF file")
results = {}
fh.seek(8)
(index_header, index_offset, display_header, display_offset,
comments_header, comments_offset, summary_header, summary_length
) = struct.unpack(byteorder + "IIIIIIII", fh.read(32))
if summary_header != 2355492:
raise ValueError("invalid MicroManager summary_header")
results['summary'] = read_json(fh, byteorder, None, summary_length)
if index_header != 54773648:
raise ValueError("invalid MicroManager index_header")
fh.seek(index_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 3453623:
raise ValueError("invalid MicroManager index_header")
data = struct.unpack(byteorder + "IIIII"*count, fh.read(20*count))
results['index_map'] = {
'channel': data[::5], 'slice': data[1::5], 'frame': data[2::5],
'position': data[3::5], 'offset': data[4::5]}
if display_header != 483765892:
raise ValueError("invalid MicroManager display_header")
fh.seek(display_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 347834724:
raise ValueError("invalid MicroManager display_header")
results['display_settings'] = read_json(fh, byteorder, None, count)
if comments_header != 99384722:
raise ValueError("invalid MicroManager comments_header")
fh.seek(comments_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 84720485:
raise ValueError("invalid MicroManager comments_header")
results['comments'] = read_json(fh, byteorder, None, count)
return results
def imagej_metadata(data, bytecounts, byteorder):
"""Return dict from ImageJ metadata tag value."""
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
def read_string(data, byteorder):
return _str(stripnull(data[0 if byteorder == '<' else 1::2]))
def read_double(data, byteorder):
return struct.unpack(byteorder+('d' * (len(data) // 8)), data)
def read_bytes(data, byteorder):
#return struct.unpack('b' * len(data), data)
return numpy.fromstring(data, 'uint8')
metadata_types = { # big endian
b'info': ('info', read_string),
b'labl': ('labels', read_string),
b'rang': ('ranges', read_double),
b'luts': ('luts', read_bytes),
b'roi ': ('roi', read_bytes),
b'over': ('overlays', read_bytes)}
metadata_types.update( # little endian
dict((k[::-1], v) for k, v in metadata_types.items()))
if not bytecounts:
raise ValueError("no ImageJ metadata")
if not data[:4] in (b'IJIJ', b'JIJI'):
raise ValueError("invalid ImageJ metadata")
header_size = bytecounts[0]
if header_size < 12 or header_size > 804:
raise ValueError("invalid ImageJ metadata header size")
ntypes = (header_size - 4) // 8
header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8])
pos = 4 + ntypes * 8
counter = 0
result = {}
for mtype, count in zip(header[::2], header[1::2]):
values = []
name, func = metadata_types.get(mtype, (_str(mtype), read_bytes))
for _ in range(count):
counter += 1
pos1 = pos + bytecounts[counter]
values.append(func(data[pos:pos1], byteorder))
pos = pos1
result[name.strip()] = values[0] if count == 1 else values
return result
def imagej_description(description):
"""Return dict from ImageJ image_description tag."""
def _bool(val):
return {b'true': True, b'false': False}[val.lower()]
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
result = {}
for line in description.splitlines():
try:
key, val = line.split(b'=')
except Exception:
continue
key = key.strip()
val = val.strip()
for dtype in (int, float, _bool, _str):
try:
val = dtype(val)
break
except Exception:
pass
result[_str(key)] = val
return result
def _replace_by(module_function, package=None, warn=False):
"""Try replace decorated function by module.function."""
try:
from importlib import import_module
except ImportError:
warnings.warn('could not import module importlib')
return lambda func: func
def decorate(func, module_function=module_function, warn=warn):
try:
module, function = module_function.split('.')
if not package:
module = import_module(module)
else:
module = import_module('.' + module, package=package)
func, oldfunc = getattr(module, function), func
globals()['__old_' + func.__name__] = oldfunc
except Exception:
if warn:
warnings.warn("failed to import %s" % module_function)
return func
return decorate
def decodejpg(encoded, tables=b'', photometric=None,
ycbcr_subsampling=None, ycbcr_positioning=None):
"""Decode JPEG encoded byte string (using _czifile extension module)."""
import _czifile
image = _czifile.decodejpg(encoded, tables)
if photometric == 'rgb' and ycbcr_subsampling and ycbcr_positioning:
# TODO: convert YCbCr to RGB
pass
return image.tostring()
@_replace_by('_tifffile.decodepackbits')
def decodepackbits(encoded):
"""Decompress PackBits encoded byte string.
PackBits is a simple byte-oriented run-length compression scheme.
"""
func = ord if sys.version[0] == '2' else lambda x: x
result = []
result_extend = result.extend
i = 0
try:
while True:
n = func(encoded[i]) + 1
i += 1
if n < 129:
result_extend(encoded[i:i+n])
i += n
elif n > 129:
result_extend(encoded[i:i+1] * (258-n))
i += 1
except IndexError:
pass
return b''.join(result) if sys.version[0] == '2' else bytes(result)
@_replace_by('_tifffile.decodelzw')
def decodelzw(encoded):
"""Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
The strip must begin with a CLEAR code and end with an EOI code.
This is an implementation of the LZW decoding algorithm described in (1).
It is not compatible with old style LZW compressed files like quad-lzw.tif.
"""
len_encoded = len(encoded)
bitcount_max = len_encoded * 8
unpack = struct.unpack
if sys.version[0] == '2':
newtable = [chr(i) for i in range(256)]
else:
newtable = [bytes([i]) for i in range(256)]
newtable.extend((0, 0))
def next_code():
"""Return integer of `bitw` bits at `bitcount` position in encoded."""
start = bitcount // 8
s = encoded[start:start+4]
try:
code = unpack('>I', s)[0]
except Exception:
code = unpack('>I', s + b'\x00'*(4-len(s)))[0]
code <<= bitcount % 8
code &= mask
return code >> shr
switchbitch = { # code: bit-width, shr-bits, bit-mask
255: (9, 23, int(9*'1'+'0'*23, 2)),
511: (10, 22, int(10*'1'+'0'*22, 2)),
1023: (11, 21, int(11*'1'+'0'*21, 2)),
2047: (12, 20, int(12*'1'+'0'*20, 2)), }
bitw, shr, mask = switchbitch[255]
bitcount = 0
if len_encoded < 4:
raise ValueError("strip must be at least 4 characters long")
if next_code() != 256:
raise ValueError("strip must begin with CLEAR code")
code = 0
oldcode = 0
result = []
result_append = result.append
while True:
code = next_code() # ~5% faster when inlining this function
bitcount += bitw
if code == 257 or bitcount >= bitcount_max: # EOI
break
if code == 256: # CLEAR
table = newtable[:]
table_append = table.append
lentable = 258
bitw, shr, mask = switchbitch[255]
code = next_code()
bitcount += bitw
if code == 257: # EOI
break
result_append(table[code])
else:
if code < lentable:
decoded = table[code]
newcode = table[oldcode] + decoded[:1]
else:
newcode = table[oldcode]
newcode += newcode[:1]
decoded = newcode
result_append(decoded)
table_append(newcode)
lentable += 1
oldcode = code
if lentable in switchbitch:
bitw, shr, mask = switchbitch[lentable]
if code != 257:
warnings.warn("unexpected end of lzw stream (code %i)" % code)
return b''.join(result)
@_replace_by('_tifffile.unpackints')
def unpackints(data, dtype, itemsize, runlen=0):
"""Decompress byte string to array of integers of any bit size <= 32.
Parameters
----------
data : byte str
Data to decompress.
dtype : numpy.dtype or str
A numpy boolean or integer type.
itemsize : int
Number of bits per integer.
runlen : int
Number of consecutive integers, after which to start at next byte.
"""
if itemsize == 1: # bitarray
data = numpy.fromstring(data, '|B')
data = numpy.unpackbits(data)
if runlen % 8:
data = data.reshape(-1, runlen + (8 - runlen % 8))
data = data[:, :runlen].reshape(-1)
return data.astype(dtype)
dtype = numpy.dtype(dtype)
if itemsize in (8, 16, 32, 64):
return numpy.fromstring(data, dtype)
if itemsize < 1 or itemsize > 32:
raise ValueError("itemsize out of range: %i" % itemsize)
if dtype.kind not in "biu":
raise ValueError("invalid dtype")
itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize)
if itembytes != dtype.itemsize:
raise ValueError("dtype.itemsize too small")
if runlen == 0:
runlen = len(data) // itembytes
skipbits = runlen*itemsize % 8
if skipbits:
skipbits = 8 - skipbits
shrbits = itembytes*8 - itemsize
bitmask = int(itemsize*'1'+'0'*shrbits, 2)
dtypestr = '>' + dtype.char # dtype always big endian?
unpack = struct.unpack
l = runlen * (len(data)*8 // (runlen*itemsize + skipbits))
result = numpy.empty((l, ), dtype)
bitcount = 0
for i in range(len(result)):
start = bitcount // 8
s = data[start:start+itembytes]
try:
code = unpack(dtypestr, s)[0]
except Exception:
code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0]
code <<= bitcount % 8
code &= bitmask
result[i] = code >> shrbits
bitcount += itemsize
if (i+1) % runlen == 0:
bitcount += skipbits
return result
def unpackrgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True):
"""Return array from byte string containing packed samples.
Use to unpack RGB565 or RGB555 to RGB888 format.
Parameters
----------
data : byte str
The data to be decoded. Samples in each pixel are stored consecutively.
Pixels are aligned to 8, 16, or 32 bit boundaries.
dtype : numpy.dtype
The sample data type. The byteorder applies also to the data stream.
bitspersample : tuple
Number of bits for each sample in a pixel.
rescale : bool
Upscale samples to the number of bits in dtype.
Returns
-------
result : ndarray
Flattened array of unpacked samples of native dtype.
Examples
--------
>>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff)
>>> print(unpackrgb(data, '<B', (5, 6, 5), False))
[ 1 1 1 31 63 31]
>>> print(unpackrgb(data, '<B', (5, 6, 5)))
[ 8 4 8 255 255 255]
>>> print(unpackrgb(data, '<B', (5, 5, 5)))
[ 16 8 8 255 255 255]
"""
dtype = numpy.dtype(dtype)
bits = int(numpy.sum(bitspersample))
if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)):
raise ValueError("sample size not supported %s" % str(bitspersample))
dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits)
data = numpy.fromstring(data, dtype.byteorder+dt)
result = numpy.empty((data.size, len(bitspersample)), dtype.char)
for i, bps in enumerate(bitspersample):
t = data >> int(numpy.sum(bitspersample[i+1:]))
t &= int('0b'+'1'*bps, 2)
if rescale:
o = ((dtype.itemsize * 8) // bps + 1) * bps
if o > data.dtype.itemsize * 8:
t = t.astype('I')
t *= (2**o - 1) // (2**bps - 1)
t //= 2**(o - (dtype.itemsize * 8))
result[:, i] = t
return result.reshape(-1)
def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy array
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
def squeeze_axes(shape, axes, skip='XY'):
"""Return shape and axes with single-dimensional entries removed.
Remove unused dimensions unless their axes are listed in 'skip'.
>>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC')
((5, 2, 1), 'TYX')
"""
if len(shape) != len(axes):
raise ValueError("dimensions of axes and shape don't match")
shape, axes = zip(*(i for i in zip(shape, axes)
if i[0] > 1 or i[1] in skip))
return shape, ''.join(axes)
def transpose_axes(data, axes, asaxes='CTZYX'):
"""Return data with its axes permuted to match specified axes.
A view is returned if possible.
>>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape
(5, 2, 1, 3, 4)
"""
for ax in axes:
if ax not in asaxes:
raise ValueError("unknown axis %s" % ax)
# add missing axes to data
shape = data.shape
for ax in reversed(asaxes):
if ax not in axes:
axes = ax + axes
shape = (1,) + shape
data = data.reshape(shape)
# transpose axes
data = data.transpose([axes.index(ax) for ax in asaxes])
return data
def stack_pages(pages, memmap=False, *args, **kwargs):
"""Read data from sequence of TiffPage and stack them vertically.
If memmap is True, return an array stored in a binary file on disk.
Additional parameters are passsed to the page asarray function.
"""
if len(pages) == 0:
raise ValueError("no pages")
if len(pages) == 1:
return pages[0].asarray(memmap=memmap, *args, **kwargs)
result = pages[0].asarray(*args, **kwargs)
shape = (len(pages),) + result.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=result.dtype, shape=shape)
else:
result = numpy.empty(shape, dtype=result.dtype)
for i, page in enumerate(pages):
result[i] = page.asarray(*args, **kwargs)
return result
def stripnull(string):
"""Return string truncated at first null character.
Clean NULL terminated C strings.
>>> stripnull(b'string\\x00')
b'string'
"""
i = string.find(b'\x00')
return string if (i < 0) else string[:i]
def stripascii(string):
"""Return string truncated at last byte that is 7bit ASCII.
Clean NULL separated and terminated TIFF strings.
>>> stripascii(b'string\\x00string\\n\\x01\\x00')
b'string\\x00string\\n'
>>> stripascii(b'\\x00')
b''
"""
# TODO: pythonize this
ord_ = ord if sys.version_info[0] < 3 else lambda x: x
i = len(string)
while i:
i -= 1
if 8 < ord_(string[i]) < 127:
break
else:
i = -1
return string[:i+1]
def format_size(size):
"""Return file size as string from byte size."""
for unit in ('B', 'KB', 'MB', 'GB', 'TB'):
if size < 2048:
return "%.f %s" % (size, unit)
size /= 1024.0
def sequence(value):
"""Return tuple containing value if value is not a sequence.
>>> sequence(1)
(1,)
>>> sequence([1])
[1]
"""
try:
len(value)
return value
except TypeError:
return (value, )
def product(iterable):
"""Return product of sequence of numbers.
Equivalent of functools.reduce(operator.mul, iterable, 1).
>>> product([2**8, 2**30])
274877906944
>>> product([])
1
"""
prod = 1
for i in iterable:
prod *= i
return prod
def natural_sorted(iterable):
"""Return human sorted list of strings.
E.g. for sorting file names.
>>> natural_sorted(['f1', 'f2', 'f10'])
['f1', 'f2', 'f10']
"""
def sortkey(x):
return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)]
numbers = re.compile(r'(\d+)')
return sorted(iterable, key=sortkey)
def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)):
"""Return datetime object from timestamp in Excel serial format.
Convert LSM time stamps.
>>> excel_datetime(40237.029999999795)
datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
"""
return epoch + datetime.timedelta(timestamp)
def julian_datetime(julianday, milisecond=0):
"""Return datetime from days since 1/1/4713 BC and ms since midnight.
Convert Julian dates according to MetaMorph.
>>> julian_datetime(2451576, 54362783)
datetime.datetime(2000, 2, 2, 15, 6, 2, 783)
"""
if julianday <= 1721423:
# no datetime before year 1
return None
a = julianday + 1
if a > 2299160:
alpha = math.trunc((a - 1867216.25) / 36524.25)
a += 1 + alpha - alpha // 4
b = a + (1524 if a > 1721423 else 1158)
c = math.trunc((b - 122.1) / 365.25)
d = math.trunc(365.25 * c)
e = math.trunc((b - d) / 30.6001)
day = b - d - math.trunc(30.6001 * e)
month = e - (1 if e < 13.5 else 13)
year = c - (4716 if month > 2.5 else 4715)
hour, milisecond = divmod(milisecond, 1000 * 60 * 60)
minute, milisecond = divmod(milisecond, 1000 * 60)
second, milisecond = divmod(milisecond, 1000)
return datetime.datetime(year, month, day,
hour, minute, second, milisecond)
def test_tifffile(directory='testimages', verbose=True):
"""Read all images in directory.
Print error message on failure.
>>> test_tifffile(verbose=False)
"""
successful = 0
failed = 0
start = time.time()
for f in glob.glob(os.path.join(directory, '*.*')):
if verbose:
print("\n%s>\n" % f.lower(), end='')
t0 = time.time()
try:
tif = TiffFile(f, multifile=True)
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
try:
img = tif.asarray()
except ValueError:
try:
img = tif[0].asarray()
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
finally:
tif.close()
successful += 1
if verbose:
print("%s, %s %s, %s, %.0f ms" % (
str(tif), str(img.shape), img.dtype, tif[0].compression,
(time.time()-t0) * 1e3))
if verbose:
print("\nSuccessfully read %i of %i files in %.3f s\n" % (
successful, successful+failed, time.time()-start))
class TIFF_SUBFILE_TYPES(object):
def __getitem__(self, key):
result = []
if key & 1:
result.append('reduced_image')
if key & 2:
result.append('page')
if key & 4:
result.append('mask')
return tuple(result)
TIFF_PHOTOMETRICS = {
0: 'miniswhite',
1: 'minisblack',
2: 'rgb',
3: 'palette',
4: 'mask',
5: 'separated', # CMYK
6: 'ycbcr',
8: 'cielab',
9: 'icclab',
10: 'itulab',
32803: 'cfa', # Color Filter Array
32844: 'logl',
32845: 'logluv',
34892: 'linear_raw'
}
TIFF_COMPESSIONS = {
1: None,
2: 'ccittrle',
3: 'ccittfax3',
4: 'ccittfax4',
5: 'lzw',
6: 'ojpeg',
7: 'jpeg',
8: 'adobe_deflate',
9: 't85',
10: 't43',
32766: 'next',
32771: 'ccittrlew',
32773: 'packbits',
32809: 'thunderscan',
32895: 'it8ctpad',
32896: 'it8lw',
32897: 'it8mp',
32898: 'it8bl',
32908: 'pixarfilm',
32909: 'pixarlog',
32946: 'deflate',
32947: 'dcs',
34661: 'jbig',
34676: 'sgilog',
34677: 'sgilog24',
34712: 'jp2000',
34713: 'nef',
}
TIFF_DECOMPESSORS = {
None: lambda x: x,
'adobe_deflate': zlib.decompress,
'deflate': zlib.decompress,
'packbits': decodepackbits,
'lzw': decodelzw,
# 'jpeg': decodejpg
}
TIFF_DATA_TYPES = {
1: '1B', # BYTE 8-bit unsigned integer.
2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code;
# the last byte must be NULL (binary zero).
3: '1H', # SHORT 16-bit (2-byte) unsigned integer
4: '1I', # LONG 32-bit (4-byte) unsigned integer.
5: '2I', # RATIONAL Two LONGs: the first represents the numerator of
# a fraction; the second, the denominator.
6: '1b', # SBYTE An 8-bit signed (twos-complement) integer.
7: '1s', # UNDEFINED An 8-bit byte that may contain anything,
# depending on the definition of the field.
8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer.
9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer.
10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator
# of a fraction, the second the denominator.
11: '1f', # FLOAT Single precision (4-byte) IEEE format.
12: '1d', # DOUBLE Double precision (8-byte) IEEE format.
13: '1I', # IFD unsigned 4 byte IFD offset.
#14: '', # UNICODE
#15: '', # COMPLEX
16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff)
17: '1q', # SLONG8 signed 8 byte integer (BigTiff)
18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff)
}
TIFF_SAMPLE_FORMATS = {
1: 'uint',
2: 'int',
3: 'float',
#4: 'void',
#5: 'complex_int',
6: 'complex',
}
TIFF_SAMPLE_DTYPES = {
('uint', 1): '?', # bitmap
('uint', 2): 'B',
('uint', 3): 'B',
('uint', 4): 'B',
('uint', 5): 'B',
('uint', 6): 'B',
('uint', 7): 'B',
('uint', 8): 'B',
('uint', 9): 'H',
('uint', 10): 'H',
('uint', 11): 'H',
('uint', 12): 'H',
('uint', 13): 'H',
('uint', 14): 'H',
('uint', 15): 'H',
('uint', 16): 'H',
('uint', 17): 'I',
('uint', 18): 'I',
('uint', 19): 'I',
('uint', 20): 'I',
('uint', 21): 'I',
('uint', 22): 'I',
('uint', 23): 'I',
('uint', 24): 'I',
('uint', 25): 'I',
('uint', 26): 'I',
('uint', 27): 'I',
('uint', 28): 'I',
('uint', 29): 'I',
('uint', 30): 'I',
('uint', 31): 'I',
('uint', 32): 'I',
('uint', 64): 'Q',
('int', 8): 'b',
('int', 16): 'h',
('int', 32): 'i',
('int', 64): 'q',
('float', 16): 'e',
('float', 32): 'f',
('float', 64): 'd',
('complex', 64): 'F',
('complex', 128): 'D',
('uint', (5, 6, 5)): 'B',
}
TIFF_ORIENTATIONS = {
1: 'top_left',
2: 'top_right',
3: 'bottom_right',
4: 'bottom_left',
5: 'left_top',
6: 'right_top',
7: 'right_bottom',
8: 'left_bottom',
}
# TODO: is there a standard for character axes labels?
AXES_LABELS = {
'X': 'width',
'Y': 'height',
'Z': 'depth',
'S': 'sample', # rgb(a)
'I': 'series', # general sequence, plane, page, IFD
'T': 'time',
'C': 'channel', # color, emission wavelength
'A': 'angle',
'P': 'phase', # formerly F # P is Position in LSM!
'R': 'tile', # region, point, mosaic
'H': 'lifetime', # histogram
'E': 'lambda', # excitation wavelength
'L': 'exposure', # lux
'V': 'event',
'Q': 'other',
#'M': 'mosaic', # LSM 6
}
AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items()))
# Map OME pixel types to numpy dtype
OME_PIXEL_TYPES = {
'int8': 'i1',
'int16': 'i2',
'int32': 'i4',
'uint8': 'u1',
'uint16': 'u2',
'uint32': 'u4',
'float': 'f4',
# 'bit': 'bit',
'double': 'f8',
'complex': 'c8',
'double-complex': 'c16',
}
# NIH Image PicHeader v1.63
NIH_IMAGE_HEADER = [
('fileid', 'a8'),
('nlines', 'i2'),
('pixelsperline', 'i2'),
('version', 'i2'),
('oldlutmode', 'i2'),
('oldncolors', 'i2'),
('colors', 'u1', (3, 32)),
('oldcolorstart', 'i2'),
('colorwidth', 'i2'),
('extracolors', 'u2', (6, 3)),
('nextracolors', 'i2'),
('foregroundindex', 'i2'),
('backgroundindex', 'i2'),
('xscale', 'f8'),
('_x0', 'i2'),
('_x1', 'i2'),
('units_t', 'i2'), # NIH_UNITS_TYPE
('p1', [('x', 'i2'), ('y', 'i2')]),
('p2', [('x', 'i2'), ('y', 'i2')]),
('curvefit_t', 'i2'), # NIH_CURVEFIT_TYPE
('ncoefficients', 'i2'),
('coeff', 'f8', 6),
('_um_len', 'u1'),
('um', 'a15'),
('_x2', 'u1'),
('binarypic', 'b1'),
('slicestart', 'i2'),
('sliceend', 'i2'),
('scalemagnification', 'f4'),
('nslices', 'i2'),
('slicespacing', 'f4'),
('currentslice', 'i2'),
('frameinterval', 'f4'),
('pixelaspectratio', 'f4'),
('colorstart', 'i2'),
('colorend', 'i2'),
('ncolors', 'i2'),
('fill1', '3u2'),
('fill2', '3u2'),
('colortable_t', 'u1'), # NIH_COLORTABLE_TYPE
('lutmode_t', 'u1'), # NIH_LUTMODE_TYPE
('invertedtable', 'b1'),
('zeroclip', 'b1'),
('_xunit_len', 'u1'),
('xunit', 'a11'),
('stacktype_t', 'i2'), # NIH_STACKTYPE_TYPE
]
NIH_COLORTABLE_TYPE = (
'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow',
'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum')
NIH_LUTMODE_TYPE = (
'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale',
'ColorLut', 'CustomGrayscale')
NIH_CURVEFIT_TYPE = (
'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit',
'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated',
'UncalibratedOD')
NIH_UNITS_TYPE = (
'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters',
'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits')
NIH_STACKTYPE_TYPE = (
'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack')
# Map Universal Imaging Corporation MetaMorph internal tag ids to name and type
UIC_TAGS = {
0: ('auto_scale', int),
1: ('min_scale', int),
2: ('max_scale', int),
3: ('spatial_calibration', int),
4: ('x_calibration', Fraction),
5: ('y_calibration', Fraction),
6: ('calibration_units', str),
7: ('name', str),
8: ('thresh_state', int),
9: ('thresh_state_red', int),
10: ('tagid_10', None), # undefined
11: ('thresh_state_green', int),
12: ('thresh_state_blue', int),
13: ('thresh_state_lo', int),
14: ('thresh_state_hi', int),
15: ('zoom', int),
16: ('create_time', julian_datetime),
17: ('last_saved_time', julian_datetime),
18: ('current_buffer', int),
19: ('gray_fit', None),
20: ('gray_point_count', None),
21: ('gray_x', Fraction),
22: ('gray_y', Fraction),
23: ('gray_min', Fraction),
24: ('gray_max', Fraction),
25: ('gray_unit_name', str),
26: ('standard_lut', int),
27: ('wavelength', int),
28: ('stage_position', '(%i,2,2)u4'), # N xy positions as fractions
29: ('camera_chip_offset', '(%i,2,2)u4'), # N xy offsets as fractions
30: ('overlay_mask', None),
31: ('overlay_compress', None),
32: ('overlay', None),
33: ('special_overlay_mask', None),
34: ('special_overlay_compress', None),
35: ('special_overlay', None),
36: ('image_property', read_uic_image_property),
37: ('stage_label', '%ip'), # N str
38: ('autoscale_lo_info', Fraction),
39: ('autoscale_hi_info', Fraction),
40: ('absolute_z', '(%i,2)u4'), # N fractions
41: ('absolute_z_valid', '(%i,)u4'), # N long
42: ('gamma', int),
43: ('gamma_red', int),
44: ('gamma_green', int),
45: ('gamma_blue', int),
46: ('camera_bin', int),
47: ('new_lut', int),
48: ('image_property_ex', None),
49: ('plane_property', int),
50: ('user_lut_table', '(256,3)u1'),
51: ('red_autoscale_info', int),
52: ('red_autoscale_lo_info', Fraction),
53: ('red_autoscale_hi_info', Fraction),
54: ('red_minscale_info', int),
55: ('red_maxscale_info', int),
56: ('green_autoscale_info', int),
57: ('green_autoscale_lo_info', Fraction),
58: ('green_autoscale_hi_info', Fraction),
59: ('green_minscale_info', int),
60: ('green_maxscale_info', int),
61: ('blue_autoscale_info', int),
62: ('blue_autoscale_lo_info', Fraction),
63: ('blue_autoscale_hi_info', Fraction),
64: ('blue_min_scale_info', int),
65: ('blue_max_scale_info', int),
#66: ('overlay_plane_color', read_uic_overlay_plane_color),
}
# Olympus FluoView
MM_DIMENSION = [
('name', 'a16'),
('size', 'i4'),
('origin', 'f8'),
('resolution', 'f8'),
('unit', 'a64'),
]
MM_HEADER = [
('header_flag', 'i2'),
('image_type', 'u1'),
('image_name', 'a257'),
('offset_data', 'u4'),
('palette_size', 'i4'),
('offset_palette0', 'u4'),
('offset_palette1', 'u4'),
('comment_size', 'i4'),
('offset_comment', 'u4'),
('dimensions', MM_DIMENSION, 10),
('offset_position', 'u4'),
('map_type', 'i2'),
('map_min', 'f8'),
('map_max', 'f8'),
('min_value', 'f8'),
('max_value', 'f8'),
('offset_map', 'u4'),
('gamma', 'f8'),
('offset', 'f8'),
('gray_channel', MM_DIMENSION),
('offset_thumbnail', 'u4'),
('voice_field', 'i4'),
('offset_voice_field', 'u4'),
]
# Carl Zeiss LSM
CZ_LSM_INFO = [
('magic_number', 'u4'),
('structure_size', 'i4'),
('dimension_x', 'i4'),
('dimension_y', 'i4'),
('dimension_z', 'i4'),
('dimension_channels', 'i4'),
('dimension_time', 'i4'),
('data_type', 'i4'), # CZ_DATA_TYPES
('thumbnail_x', 'i4'),
('thumbnail_y', 'i4'),
('voxel_size_x', 'f8'),
('voxel_size_y', 'f8'),
('voxel_size_z', 'f8'),
('origin_x', 'f8'),
('origin_y', 'f8'),
('origin_z', 'f8'),
('scan_type', 'u2'),
('spectral_scan', 'u2'),
('type_of_data', 'u4'), # CZ_TYPE_OF_DATA
('offset_vector_overlay', 'u4'),
('offset_input_lut', 'u4'),
('offset_output_lut', 'u4'),
('offset_channel_colors', 'u4'),
('time_interval', 'f8'),
('offset_channel_data_types', 'u4'),
('offset_scan_info', 'u4'), # CZ_LSM_SCAN_INFO
('offset_ks_data', 'u4'),
('offset_time_stamps', 'u4'),
('offset_event_list', 'u4'),
('offset_roi', 'u4'),
('offset_bleach_roi', 'u4'),
('offset_next_recording', 'u4'),
# LSM 2.0 ends here
('display_aspect_x', 'f8'),
('display_aspect_y', 'f8'),
('display_aspect_z', 'f8'),
('display_aspect_time', 'f8'),
('offset_mean_of_roi_overlay', 'u4'),
('offset_topo_isoline_overlay', 'u4'),
('offset_topo_profile_overlay', 'u4'),
('offset_linescan_overlay', 'u4'),
('offset_toolbar_flags', 'u4'),
('offset_channel_wavelength', 'u4'),
('offset_channel_factors', 'u4'),
('objective_sphere_correction', 'f8'),
('offset_unmix_parameters', 'u4'),
# LSM 3.2, 4.0 end here
('offset_acquisition_parameters', 'u4'),
('offset_characteristics', 'u4'),
('offset_palette', 'u4'),
('time_difference_x', 'f8'),
('time_difference_y', 'f8'),
('time_difference_z', 'f8'),
('internal_use_1', 'u4'),
('dimension_p', 'i4'),
('dimension_m', 'i4'),
('dimensions_reserved', '16i4'),
('offset_tile_positions', 'u4'),
('reserved_1', '9u4'),
('offset_positions', 'u4'),
('reserved_2', '21u4'), # must be 0
]
# Import functions for LSM_INFO sub-records
CZ_LSM_INFO_READERS = {
'scan_info': read_cz_lsm_scan_info,
'time_stamps': read_cz_lsm_time_stamps,
'event_list': read_cz_lsm_event_list,
'channel_colors': read_cz_lsm_floatpairs,
'positions': read_cz_lsm_floatpairs,
'tile_positions': read_cz_lsm_floatpairs,
}
# Map cz_lsm_info.scan_type to dimension order
CZ_SCAN_TYPES = {
0: 'XYZCT', # x-y-z scan
1: 'XYZCT', # z scan (x-z plane)
2: 'XYZCT', # line scan
3: 'XYTCZ', # time series x-y
4: 'XYZTC', # time series x-z
5: 'XYTCZ', # time series 'Mean of ROIs'
6: 'XYZTC', # time series x-y-z
7: 'XYCTZ', # spline scan
8: 'XYCZT', # spline scan x-z
9: 'XYTCZ', # time series spline plane x-z
10: 'XYZCT', # point mode
}
# Map dimension codes to cz_lsm_info attribute
CZ_DIMENSIONS = {
'X': 'dimension_x',
'Y': 'dimension_y',
'Z': 'dimension_z',
'C': 'dimension_channels',
'T': 'dimension_time',
}
# Description of cz_lsm_info.data_type
CZ_DATA_TYPES = {
0: 'varying data types',
1: '8 bit unsigned integer',
2: '12 bit unsigned integer',
5: '32 bit float',
}
# Description of cz_lsm_info.type_of_data
CZ_TYPE_OF_DATA = {
0: 'Original scan data',
1: 'Calculated data',
2: '3D reconstruction',
3: 'Topography height map',
}
CZ_LSM_SCAN_INFO_ARRAYS = {
0x20000000: "tracks",
0x30000000: "lasers",
0x60000000: "detection_channels",
0x80000000: "illumination_channels",
0xa0000000: "beam_splitters",
0xc0000000: "data_channels",
0x11000000: "timers",
0x13000000: "markers",
}
CZ_LSM_SCAN_INFO_STRUCTS = {
# 0x10000000: "recording",
0x40000000: "track",
0x50000000: "laser",
0x70000000: "detection_channel",
0x90000000: "illumination_channel",
0xb0000000: "beam_splitter",
0xd0000000: "data_channel",
0x12000000: "timer",
0x14000000: "marker",
}
CZ_LSM_SCAN_INFO_ATTRIBUTES = {
# recording
0x10000001: "name",
0x10000002: "description",
0x10000003: "notes",
0x10000004: "objective",
0x10000005: "processing_summary",
0x10000006: "special_scan_mode",
0x10000007: "scan_type",
0x10000008: "scan_mode",
0x10000009: "number_of_stacks",
0x1000000a: "lines_per_plane",
0x1000000b: "samples_per_line",
0x1000000c: "planes_per_volume",
0x1000000d: "images_width",
0x1000000e: "images_height",
0x1000000f: "images_number_planes",
0x10000010: "images_number_stacks",
0x10000011: "images_number_channels",
0x10000012: "linscan_xy_size",
0x10000013: "scan_direction",
0x10000014: "time_series",
0x10000015: "original_scan_data",
0x10000016: "zoom_x",
0x10000017: "zoom_y",
0x10000018: "zoom_z",
0x10000019: "sample_0x",
0x1000001a: "sample_0y",
0x1000001b: "sample_0z",
0x1000001c: "sample_spacing",
0x1000001d: "line_spacing",
0x1000001e: "plane_spacing",
0x1000001f: "plane_width",
0x10000020: "plane_height",
0x10000021: "volume_depth",
0x10000023: "nutation",
0x10000034: "rotation",
0x10000035: "precession",
0x10000036: "sample_0time",
0x10000037: "start_scan_trigger_in",
0x10000038: "start_scan_trigger_out",
0x10000039: "start_scan_event",
0x10000040: "start_scan_time",
0x10000041: "stop_scan_trigger_in",
0x10000042: "stop_scan_trigger_out",
0x10000043: "stop_scan_event",
0x10000044: "stop_scan_time",
0x10000045: "use_rois",
0x10000046: "use_reduced_memory_rois",
0x10000047: "user",
0x10000048: "use_bc_correction",
0x10000049: "position_bc_correction1",
0x10000050: "position_bc_correction2",
0x10000051: "interpolation_y",
0x10000052: "camera_binning",
0x10000053: "camera_supersampling",
0x10000054: "camera_frame_width",
0x10000055: "camera_frame_height",
0x10000056: "camera_offset_x",
0x10000057: "camera_offset_y",
0x10000059: "rt_binning",
0x1000005a: "rt_frame_width",
0x1000005b: "rt_frame_height",
0x1000005c: "rt_region_width",
0x1000005d: "rt_region_height",
0x1000005e: "rt_offset_x",
0x1000005f: "rt_offset_y",
0x10000060: "rt_zoom",
0x10000061: "rt_line_period",
0x10000062: "prescan",
0x10000063: "scan_direction_z",
# track
0x40000001: "multiplex_type", # 0 after line; 1 after frame
0x40000002: "multiplex_order",
0x40000003: "sampling_mode", # 0 sample; 1 line average; 2 frame average
0x40000004: "sampling_method", # 1 mean; 2 sum
0x40000005: "sampling_number",
0x40000006: "acquire",
0x40000007: "sample_observation_time",
0x4000000b: "time_between_stacks",
0x4000000c: "name",
0x4000000d: "collimator1_name",
0x4000000e: "collimator1_position",
0x4000000f: "collimator2_name",
0x40000010: "collimator2_position",
0x40000011: "is_bleach_track",
0x40000012: "is_bleach_after_scan_number",
0x40000013: "bleach_scan_number",
0x40000014: "trigger_in",
0x40000015: "trigger_out",
0x40000016: "is_ratio_track",
0x40000017: "bleach_count",
0x40000018: "spi_center_wavelength",
0x40000019: "pixel_time",
0x40000021: "condensor_frontlens",
0x40000023: "field_stop_value",
0x40000024: "id_condensor_aperture",
0x40000025: "condensor_aperture",
0x40000026: "id_condensor_revolver",
0x40000027: "condensor_filter",
0x40000028: "id_transmission_filter1",
0x40000029: "id_transmission1",
0x40000030: "id_transmission_filter2",
0x40000031: "id_transmission2",
0x40000032: "repeat_bleach",
0x40000033: "enable_spot_bleach_pos",
0x40000034: "spot_bleach_posx",
0x40000035: "spot_bleach_posy",
0x40000036: "spot_bleach_posz",
0x40000037: "id_tubelens",
0x40000038: "id_tubelens_position",
0x40000039: "transmitted_light",
0x4000003a: "reflected_light",
0x4000003b: "simultan_grab_and_bleach",
0x4000003c: "bleach_pixel_time",
# laser
0x50000001: "name",
0x50000002: "acquire",
0x50000003: "power",
# detection_channel
0x70000001: "integration_mode",
0x70000002: "special_mode",
0x70000003: "detector_gain_first",
0x70000004: "detector_gain_last",
0x70000005: "amplifier_gain_first",
0x70000006: "amplifier_gain_last",
0x70000007: "amplifier_offs_first",
0x70000008: "amplifier_offs_last",
0x70000009: "pinhole_diameter",
0x7000000a: "counting_trigger",
0x7000000b: "acquire",
0x7000000c: "point_detector_name",
0x7000000d: "amplifier_name",
0x7000000e: "pinhole_name",
0x7000000f: "filter_set_name",
0x70000010: "filter_name",
0x70000013: "integrator_name",
0x70000014: "channel_name",
0x70000015: "detector_gain_bc1",
0x70000016: "detector_gain_bc2",
0x70000017: "amplifier_gain_bc1",
0x70000018: "amplifier_gain_bc2",
0x70000019: "amplifier_offset_bc1",
0x70000020: "amplifier_offset_bc2",
0x70000021: "spectral_scan_channels",
0x70000022: "spi_wavelength_start",
0x70000023: "spi_wavelength_stop",
0x70000026: "dye_name",
0x70000027: "dye_folder",
# illumination_channel
0x90000001: "name",
0x90000002: "power",
0x90000003: "wavelength",
0x90000004: "aquire",
0x90000005: "detchannel_name",
0x90000006: "power_bc1",
0x90000007: "power_bc2",
# beam_splitter
0xb0000001: "filter_set",
0xb0000002: "filter",
0xb0000003: "name",
# data_channel
0xd0000001: "name",
0xd0000003: "acquire",
0xd0000004: "color",
0xd0000005: "sample_type",
0xd0000006: "bits_per_sample",
0xd0000007: "ratio_type",
0xd0000008: "ratio_track1",
0xd0000009: "ratio_track2",
0xd000000a: "ratio_channel1",
0xd000000b: "ratio_channel2",
0xd000000c: "ratio_const1",
0xd000000d: "ratio_const2",
0xd000000e: "ratio_const3",
0xd000000f: "ratio_const4",
0xd0000010: "ratio_const5",
0xd0000011: "ratio_const6",
0xd0000012: "ratio_first_images1",
0xd0000013: "ratio_first_images2",
0xd0000014: "dye_name",
0xd0000015: "dye_folder",
0xd0000016: "spectrum",
0xd0000017: "acquire",
# timer
0x12000001: "name",
0x12000002: "description",
0x12000003: "interval",
0x12000004: "trigger_in",
0x12000005: "trigger_out",
0x12000006: "activation_time",
0x12000007: "activation_number",
# marker
0x14000001: "name",
0x14000002: "description",
0x14000003: "trigger_in",
0x14000004: "trigger_out",
}
# Map TIFF tag code to attribute name, default value, type, count, validator
TIFF_TAGS = {
254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()),
255: ('subfile_type', None, 3, 1,
{0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}),
256: ('image_width', None, 4, 1, None),
257: ('image_length', None, 4, 1, None),
258: ('bits_per_sample', 1, 3, 1, None),
259: ('compression', 1, 3, 1, TIFF_COMPESSIONS),
262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS),
266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}),
269: ('document_name', None, 2, None, None),
270: ('image_description', None, 2, None, None),
271: ('make', None, 2, None, None),
272: ('model', None, 2, None, None),
273: ('strip_offsets', None, 4, None, None),
274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS),
277: ('samples_per_pixel', 1, 3, 1, None),
278: ('rows_per_strip', 2**32-1, 4, 1, None),
279: ('strip_byte_counts', None, 4, None, None),
280: ('min_sample_value', None, 3, None, None),
281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample
282: ('x_resolution', None, 5, 1, None),
283: ('y_resolution', None, 5, 1, None),
284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}),
285: ('page_name', None, 2, None, None),
286: ('x_position', None, 5, 1, None),
287: ('y_position', None, 5, 1, None),
296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}),
297: ('page_number', None, 3, 2, None),
305: ('software', None, 2, None, None),
306: ('datetime', None, 2, None, None),
315: ('artist', None, 2, None, None),
316: ('host_computer', None, 2, None, None),
317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}),
318: ('white_point', None, 5, 2, None),
319: ('primary_chromaticities', None, 5, 6, None),
320: ('color_map', None, 3, None, None),
322: ('tile_width', None, 4, 1, None),
323: ('tile_length', None, 4, 1, None),
324: ('tile_offsets', None, 4, None, None),
325: ('tile_byte_counts', None, 4, None, None),
338: ('extra_samples', None, 3, None,
{0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}),
339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS),
340: ('smin_sample_value', None, None, None, None),
341: ('smax_sample_value', None, None, None, None),
347: ('jpeg_tables', None, 7, None, None),
530: ('ycbcr_subsampling', 1, 3, 2, None),
531: ('ycbcr_positioning', 1, 3, 1, None),
32996: ('sgi_matteing', None, None, 1, None), # use extra_samples
32996: ('sgi_datatype', None, None, 1, None), # use sample_format
32997: ('image_depth', None, 4, 1, None),
32998: ('tile_depth', None, 4, 1, None),
33432: ('copyright', None, 1, None, None),
33445: ('md_file_tag', None, 4, 1, None),
33446: ('md_scale_pixel', None, 5, 1, None),
33447: ('md_color_table', None, 3, None, None),
33448: ('md_lab_name', None, 2, None, None),
33449: ('md_sample_info', None, 2, None, None),
33450: ('md_prep_date', None, 2, None, None),
33451: ('md_prep_time', None, 2, None, None),
33452: ('md_file_units', None, 2, None, None),
33550: ('model_pixel_scale', None, 12, 3, None),
33922: ('model_tie_point', None, 12, None, None),
34665: ('exif_ifd', None, None, 1, None),
34735: ('geo_key_directory', None, 3, None, None),
34736: ('geo_double_params', None, 12, None, None),
34737: ('geo_ascii_params', None, 2, None, None),
34853: ('gps_ifd', None, None, 1, None),
37510: ('user_comment', None, None, None, None),
42112: ('gdal_metadata', None, 2, None, None),
42113: ('gdal_nodata', None, 2, None, None),
50289: ('mc_xy_position', None, 12, 2, None),
50290: ('mc_z_position', None, 12, 1, None),
50291: ('mc_xy_calibration', None, 12, 3, None),
50292: ('mc_lens_lem_na_n', None, 12, 3, None),
50293: ('mc_channel_name', None, 1, None, None),
50294: ('mc_ex_wavelength', None, 12, 1, None),
50295: ('mc_time_stamp', None, 12, 1, None),
50838: ('imagej_byte_counts', None, None, None, None),
65200: ('flex_xml', None, 2, None, None),
# code: (attribute name, default value, type, count, validator)
}
# Map custom TIFF tag codes to attribute names and import functions
CUSTOM_TAGS = {
700: ('xmp', read_bytes),
34377: ('photoshop', read_numpy),
33723: ('iptc', read_bytes),
34675: ('icc_profile', read_bytes),
33628: ('uic1tag', read_uic1tag), # Universal Imaging Corporation STK
33629: ('uic2tag', read_uic2tag),
33630: ('uic3tag', read_uic3tag),
33631: ('uic4tag', read_uic4tag),
34361: ('mm_header', read_mm_header), # Olympus FluoView
34362: ('mm_stamp', read_mm_stamp),
34386: ('mm_user_block', read_bytes),
34412: ('cz_lsm_info', read_cz_lsm_info), # Carl Zeiss LSM
43314: ('nih_image_header', read_nih_image_header),
# 40001: ('mc_ipwinscal', read_bytes),
40100: ('mc_id_old', read_bytes),
50288: ('mc_id', read_bytes),
50296: ('mc_frame_properties', read_bytes),
50839: ('imagej_metadata', read_bytes),
51123: ('micromanager_metadata', read_json),
}
# Max line length of printed output
PRINT_LINE_LEN = 79
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation='nearest',
dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported ``from matplotlib import pyplot``.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image size in any dimension.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
#if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can't handle %s photometrics" % photometric)
# TODO: handle photometric == 'separated' (CMYK)
isrgb = photometric in ('rgb', 'palette')
data = numpy.atleast_2d(data.squeeze())
data = data[(slice(0, maxdim), ) * len(data.shape)]
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if isrgb and data.shape[-3] in (3, 4):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif not isrgb and (data.shape[-1] < data.shape[-2] // 16 and
data.shape[-1] < data.shape[-3] // 16 and
data.shape[-1] < 5):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if photometric == 'palette' and isrgb:
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
elif data.dtype.kind == 'c':
raise NotImplementedError("complex type") # TODO: handle complex types
if not isrgb:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == 'i':
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
if data.dtype.kind == 'f':
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
else:
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
try:
title = unicode(title, 'Windows-1252')
except TypeError:
pass
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.kind in 'ubf' or vmin == 0:
cmap = 'cubehelix'
else:
cmap = 'coolwarm'
if photometric == 'miniswhite':
cmap += '_r'
image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0, ) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image
def _app_show():
"""Block the GUI. For use as skimage plugin."""
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show()
def main(argv=None):
"""Command line usage main function."""
if float(sys.version[0:3]) < 2.6:
print("This script requires Python version 2.6 or better.")
print("This is Python version %s" % sys.version)
return 0
if argv is None:
argv = sys.argv
import optparse
parser = optparse.OptionParser(
usage="usage: %prog [options] path",
description="Display image data in TIFF files.",
version="%%prog %s" % __version__)
opt = parser.add_option
opt('-p', '--page', dest='page', type='int', default=-1,
help="display single page")
opt('-s', '--series', dest='series', type='int', default=-1,
help="display series of pages of same shape")
opt('--nomultifile', dest='nomultifile', action='store_true',
default=False, help="don't read OME series from multiple files")
opt('--noplot', dest='noplot', action='store_true', default=False,
help="don't display images")
opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear',
help="image interpolation method")
opt('--dpi', dest='dpi', type='int', default=96,
help="set plot resolution")
opt('--debug', dest='debug', action='store_true', default=False,
help="raise exception on failures")
opt('--test', dest='test', action='store_true', default=False,
help="try read all images in path")
opt('--doctest', dest='doctest', action='store_true', default=False,
help="runs the docstring examples")
opt('-v', '--verbose', dest='verbose', action='store_true', default=True)
opt('-q', '--quiet', dest='verbose', action='store_false')
settings, path = parser.parse_args()
path = ' '.join(path)
if settings.doctest:
import doctest
doctest.testmod()
return 0
if not path:
parser.error("No file specified")
if settings.test:
test_tifffile(path, settings.verbose)
return 0
if any(i in path for i in '?*'):
path = glob.glob(path)
if not path:
print('no files match the pattern')
return 0
# TODO: handle image sequences
#if len(path) == 1:
path = path[0]
print("Reading file structure...", end=' ')
start = time.time()
try:
tif = TiffFile(path, multifile=not settings.nomultifile)
except Exception as e:
if settings.debug:
raise
else:
print("\n", e)
sys.exit(0)
print("%.3f ms" % ((time.time()-start) * 1e3))
if tif.is_ome:
settings.norgb = True
images = [(None, tif[0 if settings.page < 0 else settings.page])]
if not settings.noplot:
print("Reading image data... ", end=' ')
def notnone(x):
return next(i for i in x if i is not None)
start = time.time()
try:
if settings.page >= 0:
images = [(tif.asarray(key=settings.page),
tif[settings.page])]
elif settings.series >= 0:
images = [(tif.asarray(series=settings.series),
notnone(tif.series[settings.series].pages))]
else:
images = []
for i, s in enumerate(tif.series):
try:
images.append(
(tif.asarray(series=i), notnone(s.pages)))
except ValueError as e:
images.append((None, notnone(s.pages)))
if settings.debug:
raise
else:
print("\n* series %i failed: %s... " % (i, e),
end='')
print("%.3f ms" % ((time.time()-start) * 1e3))
except Exception as e:
if settings.debug:
raise
else:
print(e)
tif.close()
print("\nTIFF file:", tif)
print()
for i, s in enumerate(tif.series):
print ("Series %i" % i)
print(s)
print()
for i, page in images:
print(page)
print(page.tags)
if page.is_palette:
print("\nColor Map:", page.color_map.shape, page.color_map.dtype)
for attr in ('cz_lsm_info', 'cz_lsm_scan_info', 'uic_tags',
'mm_header', 'imagej_tags', 'micromanager_metadata',
'nih_image_header'):
if hasattr(page, attr):
print("", attr.upper(), Record(getattr(page, attr)), sep="\n")
print()
if page.is_micromanager:
print('MICROMANAGER_FILE_METADATA')
print(Record(tif.micromanager_metadata))
if images and not settings.noplot:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot
except ImportError as e:
warnings.warn("failed to import matplotlib.\n%s" % e)
else:
for img, page in images:
if img is None:
continue
vmin, vmax = None, None
if 'gdal_nodata' in page.tags:
try:
vmin = numpy.min(img[img > float(page.gdal_nodata)])
except ValueError:
pass
if page.is_stk:
try:
vmin = page.uic_tags['min_scale']
vmax = page.uic_tags['max_scale']
except KeyError:
pass
else:
if vmax <= vmin:
vmin, vmax = None, None
title = "%s\n %s" % (str(tif), str(page))
imshow(img, title=title, vmin=vmin, vmax=vmax,
bitspersample=page.bits_per_sample,
photometric=page.photometric,
interpolation=settings.interpol,
dpi=settings.dpi)
pyplot.show()
TIFFfile = TiffFile # backwards compatibility
if sys.version_info[0] > 2:
basestring = str, bytes
unicode = str
if __name__ == "__main__":
sys.exit(main())
|
popazerty/openblackhole-SH4
|
refs/heads/master
|
lib/python/Screens/InputBox.py
|
14
|
from enigma import getPrevAsciiCode
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.ActionMap import NumberActionMap
from Components.Label import Label
from Components.Input import Input
from Components.config import config
from Tools.BoundFunction import boundFunction
from Tools.Notifications import AddPopup
from time import time
class InputBox(Screen):
def __init__(self, session, title = "", windowTitle = _("Input"), useableChars = None, **kwargs):
Screen.__init__(self, session)
self["text"] = Label(title)
self["input"] = Input(**kwargs)
self.onShown.append(boundFunction(self.setTitle, windowTitle))
if useableChars is not None:
self["input"].setUseableChars(useableChars)
self["actions"] = NumberActionMap(["WizardActions", "InputBoxActions", "InputAsciiActions", "KeyboardInputActions"],
{
"gotAsciiCode": self.gotAsciiCode,
"ok": self.go,
"back": self.cancel,
"left": self.keyLeft,
"right": self.keyRight,
"home": self.keyHome,
"end": self.keyEnd,
"deleteForward": self.keyDelete,
"deleteBackward": self.keyBackspace,
"tab": self.keyTab,
"toggleOverwrite": self.keyInsert,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
if self["input"].type == Input.TEXT:
if config.misc.remotecontrol_text_support.value:
self.onExecBegin.append(self.setKeyboardModeNone)
else:
self.onExecBegin.append(self.setKeyboardModeAscii)
else:
self.onExecBegin.append(self.setKeyboardModeNone)
def gotAsciiCode(self):
self["input"].handleAscii(getPrevAsciiCode())
def keyLeft(self):
self["input"].left()
def keyRight(self):
self["input"].right()
def keyNumberGlobal(self, number):
self["input"].number(number)
def keyDelete(self):
self["input"].delete()
def go(self):
self.close(self["input"].getText())
def cancel(self):
self.close(None)
def keyHome(self):
self["input"].home()
def keyEnd(self):
self["input"].end()
def keyBackspace(self):
self["input"].deleteBackward()
def keyTab(self):
self["input"].tab()
def keyInsert(self):
self["input"].toggleOverwrite()
class PinInput(InputBox):
def __init__(self, session, service="", triesEntry=None, pinList=None, popup=False, simple=True, *args, **kwargs):
if not pinList: pinList = []
InputBox.__init__(self, session = session, text = " ", maxSize = True, type = Input.PIN, *args, **kwargs)
self.waitTime = 15
self.triesEntry = triesEntry
self.pinList = pinList
self["service"] = Label(service)
if service and simple:
self.skinName = "PinInputPopup"
if self.getTries() == 0:
if (self.triesEntry.time.value + (self.waitTime * 60)) > time():
remaining = (self.triesEntry.time.value + (self.waitTime * 60)) - time()
remainingMinutes = int(remaining / 60)
remainingSeconds = int(remaining % 60)
messageText = _("You have to wait %s!") % (str(remainingMinutes) + " " + _("minutes") + ", " + str(remainingSeconds) + " " + _("seconds"))
if service and simple:
AddPopup(messageText, type = MessageBox.TYPE_ERROR, timeout = 3)
self.closePinCancel()
else:
self.onFirstExecBegin.append(boundFunction(self.session.openWithCallback, self.closePinCancel, MessageBox, messageText, MessageBox.TYPE_ERROR, timeout = 3))
else:
self.setTries(3)
self["tries"] = Label("")
self.onShown.append(self.showTries)
def gotAsciiCode(self):
if self["input"].currPos == len(self["input"]) - 1:
InputBox.gotAsciiCode(self)
self.go()
else:
InputBox.gotAsciiCode(self)
def keyNumberGlobal(self, number):
if self["input"].currPos == len(self["input"]) - 1:
InputBox.keyNumberGlobal(self, number)
self.go()
else:
InputBox.keyNumberGlobal(self, number)
def checkPin(self, pin):
if pin is not None and " " not in pin and int(pin) in self.pinList:
return True
return False
def go(self):
self.triesEntry.time.value = int(time())
self.triesEntry.time.save()
if self.checkPin(self["input"].getText()):
self.setTries(3)
self.closePinCorrect()
else:
self.keyHome()
self.decTries()
if self.getTries() == 0:
self.closePinWrong()
else:
pass
def closePinWrong(self, *args):
print "args:", args
self.close(False)
def closePinCorrect(self, *args):
self.setTries(3)
self.close(True)
def closePinCancel(self, *args):
self.close(None)
def cancel(self):
self.closePinCancel()
def getTries(self):
return self.triesEntry.tries.value
def decTries(self):
self.setTries(self.triesEntry.tries.value - 1)
self.showTries()
def setTries(self, tries):
self.triesEntry.tries.value = tries
self.triesEntry.tries.save()
def showTries(self):
self["tries"].setText(_("Tries left:") + " " + str(self.getTries()))
|
brownharryb/erpnext
|
refs/heads/develop
|
erpnext/selling/report/sales_person_wise_transaction_summary/sales_person_wise_transaction_summary.py
|
12
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
from frappe.utils import flt
from erpnext import get_company_currency
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
entries = get_entries(filters)
item_details = get_item_details()
data = []
company_currency = get_company_currency(filters.get("company"))
for d in entries:
if d.stock_qty > 0 or filters.get('show_return_entries', 0):
data.append([
d.name, d.customer, d.territory, d.warehouse, d.posting_date, d.item_code,
item_details.get(d.item_code, {}).get("item_group"), item_details.get(d.item_code, {}).get("brand"),
d.stock_qty, d.base_net_amount, d.sales_person, d.allocated_percentage, d.contribution_amt, company_currency
])
if data:
total_row = [""]*len(data[0])
data.append(total_row)
return columns, data
def get_columns(filters):
if not filters.get("doc_type"):
msgprint(_("Please select the document type first"), raise_exception=1)
columns = [
{
"label": _(filters["doc_type"]),
"options": filters["doc_type"],
"fieldname": frappe.scrub(filters['doc_type']),
"fieldtype": "Link",
"width": 140
},
{
"label": _("Customer"),
"options": "Customer",
"fieldname": "customer",
"fieldtype": "Link",
"width": 140
},
{
"label": _("Territory"),
"options": "Territory",
"fieldname": "territory",
"fieldtype": "Link",
"width": 140
},
{
"label": _("Warehouse"),
"options": "Warehouse",
"fieldname": "warehouse",
"fieldtype": "Link",
"width": 140
},
{
"label": _("Posting Date"),
"fieldname": "posting_date",
"fieldtype": "Date",
"width": 140
},
{
"label": _("Item Code"),
"options": "Item",
"fieldname": "item_code",
"fieldtype": "Link",
"width": 140
},
{
"label": _("Item Group"),
"options": "Item Group",
"fieldname": "item_group",
"fieldtype": "Link",
"width": 140
},
{
"label": _("Brand"),
"options": "Brand",
"fieldname": "brand",
"fieldtype": "Link",
"width": 140
},
{
"label": _("Qty"),
"fieldname": "qty",
"fieldtype": "Float",
"width": 140
},
{
"label": _("Amount"),
"options": "currency",
"fieldname": "amount",
"fieldtype": "Currency",
"width": 140
},
{
"label": _("Sales Person"),
"options": "Sales Person",
"fieldname": "sales_person",
"fieldtype": "Link",
"width": 140
},
{
"label": _("Contribution %"),
"fieldname": "contribution",
"fieldtype": "Float",
"width": 140
},
{
"label": _("Contribution Amount"),
"options": "currency",
"fieldname": "contribution_amt",
"fieldtype": "Currency",
"width": 140
},
{
"label":_("Currency"),
"options": "Currency",
"fieldname":"currency",
"fieldtype":"Link",
"hidden" : 1
}
]
return columns
def get_entries(filters):
date_field = filters["doc_type"] == "Sales Order" and "transaction_date" or "posting_date"
if filters["doc_type"] == "Sales Order":
qty_field = "delivered_qty"
else:
qty_field = "qty"
conditions, values = get_conditions(filters, date_field)
entries = frappe.db.sql("""
SELECT
dt.name, dt.customer, dt.territory, dt.%s as posting_date, dt_item.item_code,
st.sales_person, st.allocated_percentage, dt_item.warehouse,
CASE
WHEN dt.status = "Closed" THEN dt_item.%s * dt_item.conversion_factor
ELSE dt_item.stock_qty
END as stock_qty,
CASE
WHEN dt.status = "Closed" THEN (dt_item.base_net_rate * dt_item.%s * dt_item.conversion_factor)
ELSE dt_item.base_net_amount
END as base_net_amount,
CASE
WHEN dt.status = "Closed" THEN ((dt_item.base_net_rate * dt_item.%s * dt_item.conversion_factor) * st.allocated_percentage/100)
ELSE dt_item.base_net_amount * st.allocated_percentage/100
END as contribution_amt
FROM
`tab%s` dt, `tab%s Item` dt_item, `tabSales Team` st
WHERE
st.parent = dt.name and dt.name = dt_item.parent and st.parenttype = %s
and dt.docstatus = 1 %s order by st.sales_person, dt.name desc
""" %(date_field, qty_field, qty_field, qty_field, filters["doc_type"], filters["doc_type"], '%s', conditions),
tuple([filters["doc_type"]] + values), as_dict=1)
return entries
def get_conditions(filters, date_field):
conditions = [""]
values = []
for field in ["company", "customer", "territory"]:
if filters.get(field):
conditions.append("dt.{0}=%s".format(field))
values.append(filters[field])
if filters.get("sales_person"):
lft, rgt = frappe.get_value("Sales Person", filters.get("sales_person"), ["lft", "rgt"])
conditions.append("exists(select name from `tabSales Person` where lft >= {0} and rgt <= {1} and name=st.sales_person)".format(lft, rgt))
if filters.get("from_date"):
conditions.append("dt.{0}>=%s".format(date_field))
values.append(filters["from_date"])
if filters.get("to_date"):
conditions.append("dt.{0}<=%s".format(date_field))
values.append(filters["to_date"])
items = get_items(filters)
if items:
conditions.append("dt_item.item_code in (%s)" % ', '.join(['%s']*len(items)))
values += items
return " and ".join(conditions), values
def get_items(filters):
if filters.get("item_group"): key = "item_group"
elif filters.get("brand"): key = "brand"
else: key = ""
items = []
if key:
items = frappe.db.sql_list("""select name from tabItem where %s = %s""" %
(key, '%s'), (filters[key]))
return items
def get_item_details():
item_details = {}
for d in frappe.db.sql("""SELECT `name`, `item_group`, `brand` FROM `tabItem`""", as_dict=1):
item_details.setdefault(d.name, d)
return item_details
|
liangazhou/django-rdp
|
refs/heads/master
|
packages/eclipse/plugins/org.python.pydev.jython_4.4.0.201510052309/Lib/pydoc.py
|
11
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision: 88564 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil, warnings
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from traceback import extract_tb
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup(data):
name, kind, cls, value = data
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo', '$py.class'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
exc, value, tb = exc_info
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"http://docs.python.org/library")
basedir = os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, data):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = data
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda key_value, s=self: s.modulelink(key_value[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', join(submodules, '\n'))
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = split(plain(text), '\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = __builtin__
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
class _OldStyleClass: pass
_OLD_INSTANCE_TYPE = type(_OldStyleClass())
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Render text documentation, given an object or a path to an object."""
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if type(object) is _OLD_INSTANCE_TYPE:
# If the passed object is an instance of an old-style class,
# document its available methods instead of its value.
object = object.__class__
elif not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + text.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
pager(render_doc(thing, title, forceload))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/sphinxext/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'exec': ('exec', ''),
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'print': ('print', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.iteritems():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '
'TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '
'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '
'SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'COERCIONS': ('coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('conversions', 'COERCIONS'),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '
'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '
'ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.symbols: self.showsymbol(request)
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(strip(doc) + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import StringIO, formatter
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
desc = split(__import__(modname).__doc__ or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = 'localhost'
self.address = (host, port)
self.url = 'http://%s:%d/' % (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
EricQAQ/Puck
|
refs/heads/master
|
puck/globals.py
|
1
|
from .local import LocalStack
class Proxy(object):
def __init__(self, local):
# self.__local = local
object.__setattr__(self, '__local', local)
def _get_real_object(self):
"""Get the real object behind the Proxy. So that the users can
operate the real things through the proxy
"""
try:
return object.__getattribute__(self, '__local')()
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__)
def __setattr__(self, key, value):
setattr(self._get_real_object(), key, value)
def __getattr__(self, item):
return getattr(self._get_real_object(), item)
def __getitem__(self, item):
return self._get_real_object().__getitem__(item)
def __setitem__(self, key, value):
self._get_real_object().__setitem__(key, value)
request_stack = LocalStack()
current_app = Proxy(lambda: request_stack.top.app)
request = Proxy(lambda: request_stack.top.request)
session = Proxy(lambda: request_stack.top.session)
|
davehunt/kuma
|
refs/heads/master
|
kuma/wiki/tests/test_content.py
|
9
|
# -*- coding: utf-8 -*-
from urlparse import urljoin
import bleach
from cssselect.parser import SelectorSyntaxError
from jinja2 import escape, Markup
from nose.tools import eq_, ok_
from nose.plugins.attrib import attr
from pyquery import PyQuery as pq
from kuma.core.tests import KumaTestCase
from kuma.users.tests import UserTestCase
import kuma.wiki.content
from ..constants import ALLOWED_TAGS, ALLOWED_ATTRIBUTES
from ..content import (CodeSyntaxFilter, SectionTOCFilter, SectionIDFilter,
H2TOCFilter, H3TOCFilter, SECTION_TAGS,
get_seo_description, get_content_sections,
extract_css_classnames, extract_html_attributes,
extract_kumascript_macro_names)
from ..helpers import bugize_text
from ..models import Document
from . import normalize_html, doc_rev, document
class ContentSectionToolTests(UserTestCase):
def test_section_pars_for_empty_docs(self):
doc = document(title='Doc', locale=u'fr', slug=u'doc', save=True,
html='<!-- -->')
res = get_content_sections(doc.html)
eq_(type(res).__name__, 'list')
def test_section_ids(self):
doc_src = """
<h1 class="header1">Header One</h1>
<p>test</p>
<section>
<h1 class="header2">Header Two</h1>
<p>test</p>
</section>
<h2 name="Constants" class="hasname">This title does not match the name</h2>
<p>test</p>
<h1 id="i-already-have-an-id" class="hasid">This text clobbers the ID</h1>
<h1 class="header3">Header Three</h1>
<p>test</p>
<section id="Quick_Links" class="Quick_Links">
<ol>
<li>Hey look, quick links</li>
</ol>
</section>
"""
result_src = (kuma.wiki.content
.parse(doc_src)
.injectSectionIDs()
.serialize())
result_doc = pq(result_src)
expected = (
('header1', 'Header_One'),
('header2', 'Header_Two'),
('hasname', 'Constants'),
('hasid', 'This_text_clobbers_the_ID'),
('Quick_Links', 'Quick_Links'),
)
for cls, id in expected:
eq_(id, result_doc.find('.%s' % cls).attr('id'))
# Then, ensure all elements in need of an ID now all have unique IDs.
ok_(len(SECTION_TAGS) > 0)
els = result_doc.find(', '.join(SECTION_TAGS))
seen_ids = set()
for i in range(0, len(els)):
id = els.eq(i).attr('id')
ok_(id is not None)
ok_(id not in seen_ids)
seen_ids.add(id)
def test_incremented_section_ids(self):
doc_src = """
<h1 class="header1">Header One</h1>
<h1>Header One</h1>
<h1>Header One</h1>
<h1>Header Two</h1>
<h1 name="someId">Header Two</h1>
"""
result_src = (kuma.wiki.content
.parse(doc_src)
.injectSectionIDs()
.serialize())
expected = """
<h1 id="Header_One" class="header1">Header One</h1>
<h1 id="Header_One_2">Header One</h1>
<h1 id="Header_One_3">Header One</h1>
<h1 id="Header_Two">Header Two</h1>
<h1 id="someId" name="someId">Header Two</h1>
"""
self.assertHTMLEqual(result_src, expected)
# Ensure 1, 2 doesn't turn into 3, 4
result_src = (kuma.wiki.content
.parse(expected)
.injectSectionIDs()
.serialize())
self.assertHTMLEqual(result_src, expected)
def test_simple_implicit_section_extract(self):
doc_src = """
<h1 id="s1">Head 1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">Head 2</h1>
<p>test</p>
<p>test</p>
"""
expected = """
<h1 id="s1">Head 1</h1>
<p>test</p>
<p>test</p>
"""
result = (kuma.wiki.content.parse(doc_src)
.extractSection(id="s1")
.serialize())
eq_(normalize_html(expected), normalize_html(result))
def test_contained_implicit_section_extract(self):
doc_src = """
<h1 id="s4-next">Head</h1>
<p>test</p>
<section id="parent-s5">
<h1 id="s5">Head 5</h1>
<p>test</p>
<p>test</p>
<section>
<h1>head subsection</h1>
</section>
<h2 id="s5-1">Head 5-1</h2>
<p>test</p>
<p>test</p>
<h1 id="s5-next">Head 5 next</h1>
<p>test</p>
<p>test</p>
</section>
<h1 id="s7">Head 7</h1>
<p>test</p>
<p>test</p>
"""
expected = """
<h1 id="s5">Head 5</h1>
<p>test</p>
<p>test</p>
<section>
<h1>head subsection</h1>
</section>
<h2 id="s5-1">Head 5-1</h2>
<p>test</p>
<p>test</p>
"""
result = (kuma.wiki.content.parse(doc_src)
.extractSection(id="s5")
.serialize())
eq_(normalize_html(expected), normalize_html(result))
def test_explicit_section_extract(self):
doc_src = """
<h1 id="s4-next">Head</h1>
<p>test</p>
<section id="parent-s5">
<h1 id="s5">Head 5</h1>
<p>test</p>
<p>test</p>
<section>
<h1>head subsection</h1>
</section>
<h2 id="s5-1">Head 5-1</h2>
<p>test</p>
<p>test</p>
<h1 id="s5-next">Head 5 next</h1>
<p>test</p>
<p>test</p>
</section>
<h1 id="s7">Head 7</h1>
<p>test</p>
<p>test</p>
"""
expected = """
<h1 id="s5">Head 5</h1>
<p>test</p>
<p>test</p>
<section>
<h1>head subsection</h1>
</section>
<h2 id="s5-1">Head 5-1</h2>
<p>test</p>
<p>test</p>
<h1 id="s5-next">Head 5 next</h1>
<p>test</p>
<p>test</p>
"""
result = (kuma.wiki.content.parse(doc_src)
.extractSection(id="parent-s5")
.serialize())
eq_(normalize_html(expected), normalize_html(result))
def test_multilevel_implicit_section_extract(self):
doc_src = """
<p>test</p>
<h1 id="s4">Head 4</h1>
<p>test</p>
<p>test</p>
<h2 id="s4-1">Head 4-1</h2>
<p>test</p>
<p>test</p>
<h3 id="s4-2">Head 4-1-1</h3>
<p>test</p>
<p>test</p>
<h1 id="s4-next">Head</h1>
<p>test</p>
"""
expected = """
<h1 id="s4">Head 4</h1>
<p>test</p>
<p>test</p>
<h2 id="s4-1">Head 4-1</h1>
<p>test</p>
<p>test</p>
<h3 id="s4-2">Head 4-1-1</h1>
<p>test</p>
<p>test</p>
"""
result = (kuma.wiki.content.parse(doc_src)
.extractSection(id="s4")
.serialize())
eq_(normalize_html(expected), normalize_html(result))
def test_morelevels_implicit_section_extract(self):
doc_src = """
<h1 id="s7">Head 7</h1>
<p>test</p>
<p>test</p>
<h1 id="s8">Head</h1>
<p>test</p>
<h2 id="s8-1">Head</h1>
<p>test</p>
<h3 id="s8-1-1">Head</h3>
<p>test</p>
<h2 id="s8-2">Head</h1>
<p>test</p>
<h3 id="s8-2-1">Head</h3>
<p>test</p>
<h4 id="s8-2-1-1">Head</h4>
<p>test</p>
<h2 id="s8-3">Head</h1>
<p>test</p>
<h1 id="s9">Head</h1>
<p>test</p>
<p>test</p>
"""
expected = """
<h1 id="s8">Head</h1>
<p>test</p>
<h2 id="s8-1">Head</h1>
<p>test</p>
<h3 id="s8-1-1">Head</h3>
<p>test</p>
<h2 id="s8-2">Head</h1>
<p>test</p>
<h3 id="s8-2-1">Head</h3>
<p>test</p>
<h4 id="s8-2-1-1">Head</h4>
<p>test</p>
<h2 id="s8-3">Head</h1>
<p>test</p>
"""
result = (kuma.wiki.content.parse(doc_src)
.extractSection(id="s8")
.serialize())
eq_(normalize_html(expected), normalize_html(result))
def test_basic_section_replace(self):
doc_src = """
<h1 id="s1">Head 1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">Head 2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">Head 3</h1>
<p>test</p>
<p>test</p>
"""
replace_src = """
<h1 id="s2">Head 2</h1>
<p>replacement worked</p>
"""
expected = """
<h1 id="s1">Head 1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">Head 2</h1>
<p>replacement worked</p>
<h1 id="s3">Head 3</h1>
<p>test</p>
<p>test</p>
"""
result = (kuma.wiki.content
.parse(doc_src)
.replaceSection(id="s2", replace_src=replace_src)
.serialize())
eq_(normalize_html(expected), normalize_html(result))
def test_section_edit_links(self):
doc_src = """
<h1 id="s1">Head 1</h1>
<p>test</p>
<p>test</p>
<h2 id="s2">Head 2</h2>
<p>test</p>
<p>test</p>
<h3 id="s3">Head 3</h3>
<p>test</p>
<p>test</p>
"""
expected = """
<h1 id="s1"><a class="edit-section" data-section-id="s1" data-section-src-url="/en-US/docs/some-slug?raw=true&section=s1" href="/en-US/docs/some-slug$edit?section=s1&edit_links=true" title="Edit section">Edit</a>Head 1</h1>
<p>test</p>
<p>test</p>
<h2 id="s2"><a class="edit-section" data-section-id="s2" data-section-src-url="/en-US/docs/some-slug?raw=true&section=s2" href="/en-US/docs/some-slug$edit?section=s2&edit_links=true" title="Edit section">Edit</a>Head 2</h2>
<p>test</p>
<p>test</p>
<h3 id="s3"><a class="edit-section" data-section-id="s3" data-section-src-url="/en-US/docs/some-slug?raw=true&section=s3" href="/en-US/docs/some-slug$edit?section=s3&edit_links=true" title="Edit section">Edit</a>Head 3</h3>
<p>test</p>
<p>test</p>
"""
result = (kuma.wiki.content
.parse(doc_src)
.injectSectionEditingLinks('some-slug', 'en-US')
.serialize())
eq_(normalize_html(expected), normalize_html(result))
def test_code_syntax_conversion(self):
doc_src = """
<h2>Some JavaScript</h2>:
<pre class="deki-transform" function="syntax.JavaScript">
function foo(){
alert("bar");
}
</pre>
<pre>Some CSS:</pre>
<pre class="dek-trans" function="syntax.CSS">
.dek-trans { color: red; }
</pre>
"""
expected = """
<h2>Some JavaScript</h2>:
<pre class="brush: js">
function foo(){
alert("bar");
}
</pre>
<pre>Some CSS:</pre>
<pre class="brush: css">
.dek-trans { color: red; }
</pre>
"""
result = (kuma.wiki.content
.parse(doc_src)
.filter(CodeSyntaxFilter).serialize())
eq_(normalize_html(expected), normalize_html(result))
def test_non_ascii_section_headers(self):
headers = [
(u'Documentation à propos de HTML',
u'Documentation_à_propos_de_HTML'),
(u'Outils facilitant le développement HTML',
u'Outils_facilitant_le_développement_HTML'),
(u'字面值(literals)',
u'字面值(literals)'),
(u'Documentação',
u'Documentação'),
(u'Lektury uzupełniające',
u'Lektury_uzupełniające'),
(u'Атрибуты',
u'Атрибуты'),
(u'HTML5 엘리먼트',
u'HTML5_엘리먼트'),
(u'Non safe title "#$%&+,/:;=?@[\\]^`{|}~',
u'Non_safe_title'),
]
section_filter = SectionIDFilter('')
for original, slugified in headers:
ok_(slugified == section_filter.slugify(original))
@attr('toc')
def test_generate_toc(self):
doc_src = """
<h2 id="HTML">HTML</h2>
<h3 id="HTML5_canvas_element">HTML5 <code>canvas</code> element</h3>
<h2 id="JavaScript">JavaScript</h2>
JavaScript is awesome.
<h3 id="WebGL">WebGL</h3>
<h3 id="Audio">Audio</h3>
<h4 id="Audio-API">Audio API</h4>
<h2 id="CSS">CSS</h2>
<h4 id="CSS_transforms">CSS transforms</h4>
<h3 id="Gradients">Gradients</h3>
<h4 id="Scaling_backgrounds">Scaling backgrounds</h4>
"""
expected = """
<li><a rel="internal" href="#HTML">HTML</a>
<ol>
<li><a rel="internal" href="#HTML5_canvas_element">HTML5 <code>canvas</code> element</a></li>
</ol>
</li>
<li><a rel="internal" href="#JavaScript">JavaScript</a>
<ol>
<li><a rel="internal" href="#WebGL">WebGL</a>
<li><a rel="internal" href="#Audio">Audio</a>
<ol>
<li><a rel="internal" href="#Audio-API">Audio API</a></li>
</ol>
</li>
</ol>
</li>
<li><a rel="internal" href="#CSS">CSS</a>
<ol>
<li>
<ol>
<li><a rel="internal" href="#CSS_transforms">CSS transforms</a>
</ol>
</li>
<li><a rel="internal" href="#Gradients">Gradients</a>
<ol>
<li><a rel="internal" href="#Scaling_backgrounds">Scaling backgrounds</a>
</ol>
</ol>
</li>
"""
result = (kuma.wiki.content
.parse(doc_src)
.filter(SectionTOCFilter).serialize())
eq_(normalize_html(expected), normalize_html(result))
@attr('toc')
def test_generate_toc_h2(self):
doc_src = """
<h2 id="HTML">HTML</h2>
<h3 id="HTML5_canvas_element">HTML5 <code>canvas</code> element</h3>
<h2 id="JavaScript">JavaScript</h2>
JavaScript is awesome.
<h3 id="WebGL">WebGL</h3>
<h3 id="Audio">Audio</h3>
<h4 id="Audio-API">Audio API</h4>
<h2 id="CSS">CSS</h2>
<h4 id="CSS_transforms">CSS transforms</h4>
<h3 id="Gradients">Gradients</h3>
<h4 id="Scaling_backgrounds">Scaling backgrounds</h4>
"""
expected = """
<li><a rel="internal" href="#HTML">HTML</a>
</li>
<li><a rel="internal" href="#JavaScript">JavaScript</a>
</li>
<li><a rel="internal" href="#CSS">CSS</a>
</li>
"""
result = (kuma.wiki.content
.parse(doc_src)
.filter(H2TOCFilter).serialize())
eq_(normalize_html(expected), normalize_html(result))
@attr('toc')
def test_generate_toc_h3(self):
doc_src = """
<h2 id="HTML">HTML</h2>
<h3 id="HTML5_canvas_element">HTML5 <code>canvas</code> element</h3>
<h2 id="JavaScript">JavaScript</h2>
JavaScript is awesome.
<h3 id="WebGL">WebGL</h3>
<h3 id="Audio">Audio</h3>
<h4 id="Audio-API">Audio API</h4>
<h2 id="CSS">CSS</h2>
<h4 id="CSS_transforms">CSS transforms</h4>
<h3 id="Gradients">Gradients</h3>
<h4 id="Scaling_backgrounds">Scaling backgrounds</h4>
"""
expected = """
<li><a rel="internal" href="#HTML">HTML</a>
<ol>
<li><a rel="internal" href="#HTML5_canvas_element">HTML5 <code>canvas</code> element</a></li>
</ol>
</li>
<li><a rel="internal" href="#JavaScript">JavaScript</a>
<ol>
<li><a rel="internal" href="#WebGL">WebGL</a>
<li><a rel="internal" href="#Audio">Audio</a>
</li>
</ol>
</li>
<li><a rel="internal" href="#CSS">CSS</a>
<ol>
<li><a rel="internal" href="#Gradients">Gradients</a>
</ol>
</li>
"""
result = (kuma.wiki.content
.parse(doc_src)
.filter(H3TOCFilter).serialize())
eq_(normalize_html(expected), normalize_html(result))
def test_bug_925043(self):
'''Bug 925043 - Redesign TOC has a bunch of empty <code> tags in markup'''
doc_src = """
<h2 id="Print">Mastering <code>print</code></h2>
<code>print 'Hello World!'</code>
"""
expected = """
<li>
<a href="#Print" rel="internal">Mastering<code>print</code></a>
</li>
"""
result = (kuma.wiki.content
.parse(doc_src)
.filter(SectionTOCFilter).serialize())
eq_(normalize_html(expected), normalize_html(result))
def test_noinclude(self):
doc_src = u"""
<div class="noinclude">{{ XULRefAttr() }}</div>
<dl>
<dt>{{ XULAttr("maxlength") }}</dt>
<dd>Type: <em>integer</em></dd>
<dd>Przykłady 例 예제 示例</dd>
</dl>
<div class="noinclude">
<p>{{ languages( { "ja": "ja/XUL/Attribute/maxlength" } ) }}</p>
</div>
"""
expected = u"""
<dl>
<dt>{{ XULAttr("maxlength") }}</dt>
<dd>Type: <em>integer</em></dd>
<dd>Przykłady 例 예제 示例</dd>
</dl>
"""
result = (kuma.wiki.content.filter_out_noinclude(doc_src))
eq_(normalize_html(expected), normalize_html(result))
def test_noinclude_empty_content(self):
"""Bug 777475: The noinclude filter and pyquery seems to really dislike
empty string as input"""
doc_src = ''
try:
result = kuma.wiki.content.filter_out_noinclude(doc_src)
eq_('', result)
except:
self.fail("There should not have been an exception")
def test_sample_code_extraction(self):
sample_html = u"""
<div class="foo">
<p>Hello world!</p>
<p>Unicode fun: Przykłady 例 예제 示例</p>
</div>
"""
sample_css = u"""
.foo p { color: red; }
"""
sample_js = u"""
window.alert("Hi there!");
"""
doc_src = u"""
<p>This is a page. Deal with it.</p>
<h3 id="sample0">This is a section</h3>
<pre class="brush:html; highlight: [5, 15]; html-script: true">section html</pre>
<pre class="brush:css;">section css</pre>
<pre class="brush: js">section js</pre>
<h3>The following is a new section</h3>
<div id="sample1" class="code-sample">
<pre class="brush: html;">Ignore me</pre>
<pre class="brush:css;">Ignore me</pre>
<pre class="brush: js">Ignore me</pre>
</div>
<ul id="sample2" class="code-sample">
<li><span>HTML</span>
<pre class="brush: html">%s</pre>
</li>
<li><span>CSS</span>
<pre class="brush:css;random:crap;in:the;classname">%s</pre>
</li>
<li><span>JS</span>
<pre class="brush: js">%s</pre>
</li>
</ul>
<p>More content shows up here.</p>
<p id="not-a-sample">This isn't a sample, but it
shouldn't cause an error</p>
<h4 id="sample3">Another section</h4>
<pre class="brush: html">Ignore me</pre>
<pre class="brush: js">Ignore me</pre>
<h4>Yay a header</h4>
<p>Yadda yadda</p>
<div id="sample4" class="code-sample">
<pre class="brush: js">Ignore me</pre>
</div>
<p>Yadda yadda</p>
""" % (escape(sample_html), escape(sample_css), escape(sample_js))
# live sample using the section logic
result = kuma.wiki.content.extract_code_sample('sample0', doc_src)
eq_('section html', result['html'].strip())
eq_('section css', result['css'].strip())
eq_('section js', result['js'].strip())
# pull out a complete sample.
result = kuma.wiki.content.extract_code_sample('sample2', doc_src)
eq_(sample_html.strip(), result['html'].strip())
eq_(sample_css.strip(), result['css'].strip())
eq_(sample_js.strip(), result['js'].strip())
# a sample missing one part.
result = kuma.wiki.content.extract_code_sample('sample3', doc_src)
eq_('Ignore me', result['html'].strip())
eq_(None, result['css'])
eq_('Ignore me', result['js'].strip())
# a sample with only one part.
result = kuma.wiki.content.extract_code_sample('sample4', doc_src)
eq_(None, result['html'])
eq_(None, result['css'])
eq_('Ignore me', result['js'].strip())
# a "sample" with no code listings.
result = kuma.wiki.content.extract_code_sample('not-a-sample', doc_src)
eq_(None, result['html'])
eq_(None, result['css'])
eq_(None, result['js'])
def test_bug819999(self):
"""
Non-breaking spaces are turned to normal spaces in code sample
extraction.
"""
doc_src = """
<h2 id="bug819999">Bug 819999</h2>
<pre class="brush: css">
.widget select,
.no-widget .select {
position : absolute;
left : -5000em;
height : 0;
overflow : hidden;
}
</pre>
"""
result = kuma.wiki.content.extract_code_sample('bug819999', doc_src)
ok_(result['css'].find(u'\xa0') == -1)
def test_bug1173170(self):
"""
Make sure the colons in sample ids doesn't trip up the code
extraction due to their ambiguity with pseudo selectors
"""
doc_src = """<pre id="Bug:1173170">Bug 1173170</pre>"""
try:
kuma.wiki.content.extract_code_sample('Bug:1173170', doc_src)
except SelectorSyntaxError:
self.fail("There should be no SelectorSyntaxError")
def test_bugize_text(self):
bad = 'Fixing bug #12345 again. <img src="http://davidwalsh.name" /> <a href="">javascript></a>'
good = 'Fixing <a href="https://bugzilla.mozilla.org/show_bug.cgi?id=12345" target="_blank">bug 12345</a> again. <img src="http://davidwalsh.name" /> <a href="">javascript></a>'
eq_(bugize_text(bad), Markup(good))
bad_upper = 'Fixing Bug #12345 again.'
good_upper = 'Fixing <a href="https://bugzilla.mozilla.org/show_bug.cgi?id=12345" target="_blank">Bug 12345</a> again.'
eq_(bugize_text(bad_upper), Markup(good_upper))
def test_iframe_host_filter(self):
slug = 'test-code-embed'
embed_url = 'https://sampleserver/en-US/docs/%s$samples/sample1' % slug
doc_src = """
<p>This is a page. Deal with it.</p>
<div id="sample1" class="code-sample">
<pre class="brush: html">Some HTML</pre>
<pre class="brush: css">.some-css { color: red; }</pre>
<pre class="brush: js">window.alert("HI THERE")</pre>
</div>
<iframe id="if1" src="%(embed_url)s"></iframe>
<iframe id="if2" src="http://testserver"></iframe>
<iframe id="if3" src="https://some.alien.site.com"></iframe>
<p>test</p>
""" % dict(embed_url=embed_url)
result_src = (kuma.wiki.content.parse(doc_src)
.filterIframeHosts('^https?\:\/\/sampleserver')
.serialize())
page = pq(result_src)
if1 = page.find('#if1')
eq_(if1.length, 1)
eq_(if1.attr('src'), embed_url)
if2 = page.find('#if2')
eq_(if2.length, 1)
eq_(if2.attr('src'), '')
if3 = page.find('#if3')
eq_(if3.length, 1)
eq_(if3.attr('src'), '')
def test_iframe_host_filter_invalid_host(self):
doc_src = """
<iframe id="if1" src="http://sampleserver"></iframe>
<iframe id="if2" src="http://testserver"></iframe>
<iframe id="if3" src="http://davidwalsh.name"></iframe>
<iframe id="if4" src="ftp://davidwalsh.name"></iframe>
<p>test</p>
"""
result_src = (kuma.wiki.content.parse(doc_src)
.filterIframeHosts('^https?\:\/\/(sample|test)server')
.serialize())
page = pq(result_src)
eq_(page.find('#if1').attr('src'), 'http://sampleserver')
eq_(page.find('#if2').attr('src'), 'http://testserver')
eq_(page.find('#if3').attr('src'), '')
eq_(page.find('#if4').attr('src'), '')
def test_iframe_host_filter_youtube(self):
tubes = (
'http://www.youtube.com/embed/iaNoBlae5Qw/?feature=player_detailpage',
'https://youtube.com/embed/iaNoBlae5Qw/?feature=player_detailpage',
'https://youtube.com/sembed/'
)
doc_src = """
<iframe id="if1" src="%s"></iframe>
<iframe id="if2" src="%s"></iframe>
<iframe id="if3" src="%s"></iframe>
<p>test</p>
""" % tubes
result_src = (kuma.wiki.content.parse(doc_src)
.filterIframeHosts('^https?\:\/\/(www.)?youtube.com\/embed\/(\.*)')
.serialize())
page = pq(result_src)
eq_(page.find('#if1').attr('src'), tubes[0])
eq_(page.find('#if2').attr('src'), tubes[1])
eq_(page.find('#if3').attr('src'), '')
def test_iframe_host_contents_filter(self):
"""Any contents inside an <iframe> should be removed"""
doc_src = """
<iframe>
<iframe src="javascript:alert(1);"></iframe>
</iframe>
"""
expected_src = """
<iframe>
</iframe>
"""
result_src = (kuma.wiki.content.parse(doc_src)
.filterIframeHosts('^https?\:\/\/sampleserver')
.serialize())
eq_(normalize_html(expected_src), normalize_html(result_src))
def test_link_annotation(self):
d, r = doc_rev("This document exists")
d.save()
r.save()
document(title=u'Héritée', locale=u'fr', slug=u'CSS/Héritage',
save=True)
document(title=u'DOM/StyleSheet', locale=u'en-US',
slug=u'DOM/StyleSheet', save=True)
base_url = u'https://testserver'
vars = dict(
base_url=base_url,
exist_url=d.get_absolute_url(),
exist_url_with_base=urljoin(base_url, d.get_absolute_url()),
uilocale_url=u'/en-US/docs/%s/%s' % (d.locale, d.slug),
noexist_url=u'/en-US/docs/no-such-doc',
noexist_url_with_base=urljoin(base_url,
u'/en-US/docs/no-such-doc'),
noexist_uilocale_url=u'/en-US/docs/en-US/blah-blah-blah',
nonen_slug='/fr/docs/CSS/H%c3%a9ritage',
tag_url='/en-US/docs/tag/foo',
feed_url='/en-US/docs/feeds/atom/all',
templates_url='/en-US/docs/templates',
)
doc_src = u"""
<li><a href="%(nonen_slug)s">Héritée</a></li>
<li><a href="%(exist_url)s">This doc should exist</a></li>
<li><a href="%(exist_url)s#withanchor">This doc should exist</a></li>
<li><a href="%(exist_url_with_base)s">This doc should exist</a></li>
<li><a href="%(exist_url_with_base)s#withanchor">This doc should exist</a></li>
<li><a href="%(uilocale_url)s">This doc should exist</a></li>
<li><a class="foobar" href="%(exist_url)s">This doc should exist, and its class should be left alone.</a></li>
<li><a href="%(noexist_url)s#withanchor">This doc should NOT exist</a></li>
<li><a href="%(noexist_url)s">This doc should NOT exist</a></li>
<li><a href="%(noexist_url_with_base)s">This doc should NOT exist</a></li>
<li><a href="%(noexist_url_with_base)s#withanchor">This doc should NOT exist</a></li>
<li><a href="%(noexist_uilocale_url)s">This doc should NOT exist</a></li>
<li><a class="foobar" href="%(noexist_url)s">This doc should NOT exist, and its class should be altered</a></li>
<li><a href="http://mozilla.org/">This is an external link</a></li>
<li><a class="foobar" name="quux">A lack of href should not cause a problem.</a></li>
<li><a>In fact, a "link" with no attributes should be no problem as well.</a></li>
<a href="%(tag_url)s">Tag link</a>
<a href="%(feed_url)s">Feed link</a>
<a href="%(templates_url)s">Templates link</a>
<a href="/en-US/docs/DOM/stylesheet">Case sensitive 1</a>
<a href="/en-US/docs/DOM/Stylesheet">Case sensitive 1</a>
<a href="/en-US/docs/DOM/StyleSheet">Case sensitive 1</a>
<a href="/en-us/docs/dom/StyleSheet">Case sensitive 1</a>
<a href="/en-US/docs/dom/Styles">For good measure</a>
""" % vars
expected = u"""
<li><a href="%(nonen_slug)s">Héritée</a></li>
<li><a href="%(exist_url)s">This doc should exist</a></li>
<li><a href="%(exist_url)s#withanchor">This doc should exist</a></li>
<li><a href="%(exist_url_with_base)s">This doc should exist</a></li>
<li><a href="%(exist_url_with_base)s#withanchor">This doc should exist</a></li>
<li><a href="%(uilocale_url)s">This doc should exist</a></li>
<li><a class="foobar" href="%(exist_url)s">This doc should exist, and its class should be left alone.</a></li>
<li><a class="new" href="%(noexist_url)s#withanchor">This doc should NOT exist</a></li>
<li><a class="new" href="%(noexist_url)s">This doc should NOT exist</a></li>
<li><a class="new" href="%(noexist_url_with_base)s">This doc should NOT exist</a></li>
<li><a class="new" href="%(noexist_url_with_base)s#withanchor">This doc should NOT exist</a></li>
<li><a class="new" href="%(noexist_uilocale_url)s">This doc should NOT exist</a></li>
<li><a class="foobar new" href="%(noexist_url)s">This doc should NOT exist, and its class should be altered</a></li>
<li><a class="external" href="http://mozilla.org/">This is an external link</a></li>
<li><a class="foobar" name="quux">A lack of href should not cause a problem.</a></li>
<li><a>In fact, a "link" with no attributes should be no problem as well.</a></li>
<a href="%(tag_url)s">Tag link</a>
<a href="%(feed_url)s">Feed link</a>
<a href="%(templates_url)s">Templates link</a>
<a href="/en-US/docs/DOM/stylesheet">Case sensitive 1</a>
<a href="/en-US/docs/DOM/Stylesheet">Case sensitive 1</a>
<a href="/en-US/docs/DOM/StyleSheet">Case sensitive 1</a>
<a href="/en-us/docs/dom/StyleSheet">Case sensitive 1</a>
<a class="new" href="/en-US/docs/dom/Styles">For good measure</a>
""" % vars
# Split the markup into lines, to better see failures
doc_lines = doc_src.strip().split("\n")
expected_lines = expected.strip().split("\n")
for idx in range(0, len(doc_lines)):
doc_line = doc_lines[idx]
expected_line = expected_lines[idx]
result_line = (kuma.wiki.content.parse(doc_line)
.annotateLinks(
base_url=vars['base_url'])
.serialize())
self.assertHTMLEqual(normalize_html(expected_line), normalize_html(result_line))
@attr('bug821986')
def test_editor_safety_filter(self):
"""Markup that's hazardous for editing should be stripped"""
doc_src = """
<svg><circle onload=confirm(3)>
<h1 class="header1">Header One</h1>
<p>test</p>
<section>
<h1 class="header2">Header Two</h1>
<p>test</p>
</section>
<h1 class="header3">Header Three</h1>
<p>test</p>
"""
expected_src = """
<svg><circle>
<h1 class="header1">Header One</h1>
<p>test</p>
<section>
<h1 class="header2">Header Two</h1>
<p>test</p>
</section>
<h1 class="header3">Header Three</h1>
<p>test</p>
"""
result_src = (kuma.wiki.content.parse(doc_src)
.filterEditorSafety()
.serialize())
eq_(normalize_html(expected_src), normalize_html(result_src))
def test_ignore_heading_section_extract(self):
doc_src = """
<p>test</p>
<h1 id="s4">Head 4</h1>
<p>test</p>
<h2 id="s4-1">Head 4-1</h2>
<p>test</p>
<h3 id="s4-2">Head 4-1-1</h3>
<p>test s4-2</p>
<h1 id="s4-next">Head</h1>
<p>test</p>
"""
expected = """
<p>test</p>
<h3 id="s4-2">Head 4-1-1</h3>
<p>test s4-2</p>
"""
result = (kuma.wiki.content.parse(doc_src)
.extractSection(id="s4-1",
ignore_heading=True)
.serialize())
eq_(normalize_html(expected), normalize_html(result))
def test_ignore_heading_section_replace(self):
doc_src = """
<h1 id="s1">Head 1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">Head 2</h1>
<p>test</p>
<p>test</p>
<h1 id="s3">Head 3</h1>
<p>test</p>
<p>test</p>
"""
replace_src = """
<p>replacement worked yay hooray</p>
"""
expected = """
<h1 id="s1">Head 1</h1>
<p>test</p>
<p>test</p>
<h1 id="s2">Head 2</h1>
<p>replacement worked yay hooray</p>
<h1 id="s3">Head 3</h1>
<p>test</p>
<p>test</p>
"""
result = (kuma.wiki.content
.parse(doc_src)
.replaceSection(id="s2",
replace_src=replace_src,
ignore_heading=True)
.serialize())
eq_(normalize_html(expected), normalize_html(result))
class AllowedHTMLTests(KumaTestCase):
simple_tags = (
'div', 'span', 'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'pre',
'code', 'dl', 'dt', 'dd', 'table',
'section', 'header', 'footer',
'nav', 'article', 'aside', 'figure', 'dialog', 'hgroup',
'mark', 'time', 'meter', 'output', 'progress',
'audio', 'details', 'datagrid', 'datalist', 'table',
'address'
)
unclose_tags = ('img', 'input', 'br', 'command')
special_tags = (
"<table><thead><tr><th>foo</th></tr></thead><tbody><tr><td>foo</td></tr></tbody></table>",
)
special_attributes = (
'<command id="foo">',
'<img align="left" alt="picture of foo" class="foo" dir="rtl" id="foo" src="foo" title="foo">',
'<a class="foo" href="foo" id="foo" title="foo">foo</a>',
'<div class="foo">foo</div>',
'<video class="movie" controls id="some-movie" lang="en-US" src="some-movie.mpg">Fallback</video>'
# TODO: Styles have to be cleaned on a case-by-case basis. We
# need to enumerate the styles we're going to allow, then feed
# them to bleach.
# '<span style="font-size: 24px"></span>',
)
def test_allowed_tags(self):
for tag in self.simple_tags:
html_str = '<%(tag)s></%(tag)s>' % {'tag': tag}
eq_(html_str, bleach.clean(html_str, attributes=ALLOWED_ATTRIBUTES,
tags=ALLOWED_TAGS))
for tag in self.unclose_tags:
html_str = '<%s>' % tag
eq_(html_str, bleach.clean(html_str, attributes=ALLOWED_ATTRIBUTES,
tags=ALLOWED_TAGS))
for html_str in self.special_tags:
eq_(html_str, bleach.clean(html_str, attributes=ALLOWED_ATTRIBUTES,
tags=ALLOWED_TAGS))
def test_allowed_attributes(self):
for tag in ('div', 'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'pre', 'code',
'dl', 'dt', 'dd', 'section', 'header', 'footer', 'nav',
'article', 'aside', 'figure', 'dialog', 'hgroup', 'mark',
'time', 'meter', 'output', 'progress', 'audio', 'details',
'datagrid', 'datalist', 'address'):
html_str = '<%(tag)s id="foo"></%(tag)s>' % {'tag': tag}
eq_(html_str, bleach.clean(html_str, attributes=ALLOWED_ATTRIBUTES,
tags=ALLOWED_TAGS))
for html_str in self.special_attributes:
eq_(html_str, bleach.clean(html_str, attributes=ALLOWED_ATTRIBUTES,
tags=ALLOWED_TAGS))
def test_stripped_ie_comment(self):
"""bug 801046: strip IE conditional comments"""
content = """
<p>Hi there.</p>
<!--[if]><script>alert(1)</script -->
<!--[if<img src=x onerror=alert(2)//]> -->
<p>Goodbye</p>
"""
expected = """
<p>Hi there.</p>
<p>Goodbye</p>
"""
result = Document.objects.clean_content(content)
eq_(normalize_html(expected), normalize_html(result))
class SearchParserTests(KumaTestCase):
"""Tests for document parsers that extract content for search indexing"""
def test_css_classname_extraction(self):
expected = ('foobar', 'barfoo', 'bazquux')
content = """
<p class="%s">Test</p>
<p class="%s">Test</p>
<div class="%s">Test</div>
""" % expected
result = extract_css_classnames(content)
eq_(sorted(expected), sorted(result))
def test_html_attribute_extraction(self):
expected = (
'class="foobar"',
'id="frazzy"',
'data-boof="farb"'
)
content = """
<p %s>Test</p>
<p %s>Test</p>
<div %s>Test</div>
""" % expected
result = extract_html_attributes(content)
eq_(sorted(expected), sorted(result))
def test_kumascript_macro_extraction(self):
expected = ('foobar', 'barfoo', 'bazquux', 'banana')
content = """
<p>{{ %s }}</p>
<p>{{ %s("foo", "bar", "baz") }}</p>
<p>{{ %s ("quux") }}</p>
<p>{{%s}}</p>
""" % expected
result = extract_kumascript_macro_names(content)
eq_(sorted(expected), sorted(result))
class GetSEODescriptionTests(KumaTestCase):
def test_summary_section(self):
content = (
'<h2 id="Summary">Summary</h2><p>The <strong>Document Object '
'Model'
'</strong> (<strong>DOM</strong>) is an API for '
'<a href="/en-US/docs/HTML" title="en-US/docs/HTML">HTML</a> and '
'<a href="/en-US/docs/XML" title="en-US/docs/XML">XML</a> '
'documents. It provides a structural representation of the '
'document, enabling you to modify its content and visual '
'presentation by using a scripting language such as '
'<a href="/en-US/docs/JavaScript" '
'title="https://developer.mozilla.org/en-US/docs/JavaScript">'
'JavaScript</a>.</span></p>')
expected = (
'The Document Object Model (DOM) is an API for HTML and '
'XML documents. It provides a structural representation of the'
' document, enabling you to modify its content and visual'
' presentation by using a scripting language such as'
' JavaScript.')
eq_(expected, get_seo_description(content, 'en-US'))
def test_keep_markup(self):
content = """
<h2 id="Summary">Summary</h2>
<p>The <strong>Document Object Model </strong>
(<strong>DOM</strong>) is an API for <a href="/en-US/docs/HTML"
title="en-US/docs/HTML">HTML</a> and <a href="/en-US/docs/XML"
title="en-US/docs/XML">XML</a> documents. It provides a structural
representation of the document, enabling you to modify its content
and visual presentation by using a scripting language such as <a
href="/en-US/docs/JavaScript"
title="https://developer.mozilla.org/en-US/docs/JavaScript">
JavaScript</a>.</span></p>
"""
expected = """
The <strong>Document Object Model </strong>
(<strong>DOM</strong>) is an API for <a href="/en-US/docs/HTML"
title="en-US/docs/HTML">HTML</a> and <a href="/en-US/docs/XML"
title="en-US/docs/XML">XML</a> documents. It provides a structural
representation of the document, enabling you to modify its content
and visual presentation by using a scripting language such as <a
href="/en-US/docs/JavaScript"
title="https://developer.mozilla.org/en-US/docs/JavaScript">
JavaScript</a>.</span>
"""
eq_(normalize_html(expected),
normalize_html(get_seo_description(content, 'en-US', False)))
def test_html_elements_spaces(self):
# No spaces with html tags
content = (
u'<p><span class="seoSummary">The <strong>Document Object '
'Model'
'</strong> (<strong>DOM</strong>) is an API for '
'<a href="/en-US/docs/HTML" title="en-US/docs/HTML">HTML</a> and '
'<a href="/en-US/docs/XML" title="en-US/docs/XML">XML</a> '
'documents. It provides a structural representation of the '
'document, enabling you to modify its content and visual '
'presentation by using a scripting language such as '
'<a href="/en-US/docs/JavaScript" '
'title="https://developer.mozilla.org/en-US/docs/JavaScript">'
'JavaScript</a>.</span></p>')
expected = (
'The Document Object Model (DOM) is an API for HTML and '
'XML'
' documents. It provides a structural representation of the'
' document, enabling you to modify its content and visual'
' presentation by using a scripting language such as'
' JavaScript.')
eq_(expected, get_seo_description(content, 'en-US'))
content = (u'<p><span class="seoSummary"><strong>Cascading Style '
'Sheets</strong>, most of the time abbreviated in '
'<strong>CSS</strong>, is a '
'<a href="/en-US/docs/DOM/stylesheet">stylesheet</a> '
'language used to describe the presentation of a document '
'written in <a href="/en-US/docs/HTML" title="The '
'HyperText Mark-up Language">HTML</a></span> or <a '
'href="/en-US/docs/XML" title="en-US/docs/XML">XML</a> '
'(including various XML languages like <a '
'href="/en-US/docs/SVG" title="en-US/docs/SVG">SVG</a> or '
'<a href="/en-US/docs/XHTML" '
'title="en-US/docs/XHTML">XHTML</a>)<span '
'class="seoSummary">. CSS describes how the structured '
'element must be rendered on screen, on paper, in speech, '
'or on other media.</span></p>')
expected = ('Cascading Style Sheets, most of the time abbreviated in '
'CSS, is a stylesheet language used to describe the '
'presentation of a document written in HTML. CSS '
'describes how the structured element must be rendered on '
'screen, on paper, in speech, or on other media.')
eq_(expected, get_seo_description(content, 'en-US'))
def test_empty_paragraph_content(self):
content = u'''<p></p><div class="overheadIndicator draft draftHeader">
<strong>DRAFT</strong>
<div>This page is not complete.</div>
</div><p></p>
<p></p><div class="note"><strong>Note:</strong> Please do not
translate this page until it is done; it will be much easier at
that point. The French translation is a test to be sure that it
works well.</div><p></p>'''
expected = ('')
eq_(expected, get_seo_description(content, 'en-US', False))
|
fighterCui/L4ReFiascoOC
|
refs/heads/master
|
l4/pkg/python/contrib/Lib/test/test_select.py
|
56
|
from test import test_support
import unittest
import select
import os
import sys
class SelectTestCase(unittest.TestCase):
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
def test_error_conditions(self):
self.assertRaises(TypeError, select.select, 1, 2, 3)
self.assertRaises(TypeError, select.select, [self.Nope()], [], [])
self.assertRaises(TypeError, select.select, [self.Almost()], [], [])
self.assertRaises(TypeError, select.select, [], [], [], "not a number")
def test_select(self):
if sys.platform[:3] in ('win', 'mac', 'os2', 'riscos'):
if test_support.verbose:
print "Can't test select easily on", sys.platform
return
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
p = os.popen(cmd, 'r')
for tout in (0, 1, 2, 4, 8, 16) + (None,)*10:
if test_support.verbose:
print 'timeout =', tout
rfd, wfd, xfd = select.select([p], [], [], tout)
if (rfd, wfd, xfd) == ([], [], []):
continue
if (rfd, wfd, xfd) == ([p], [], []):
line = p.readline()
if test_support.verbose:
print repr(line)
if not line:
if test_support.verbose:
print 'EOF'
break
continue
self.fail('Unexpected return values from select():', rfd, wfd, xfd)
p.close()
def test_main():
test_support.run_unittest(SelectTestCase)
test_support.reap_children()
if __name__ == "__main__":
test_main()
|
orchidinfosys/odoo
|
refs/heads/master
|
addons/website_mail_channel/controllers/main.py
|
29
|
# -*- coding: utf-8 -*-
import datetime
from dateutil import relativedelta
from openerp import tools, SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.website.models.website import slug
from openerp.addons.web.http import request
class MailGroup(http.Controller):
_thread_per_page = 20
_replies_per_page = 10
def _get_archives(self, group_id):
MailMessage = request.registry['mail.message']
groups = MailMessage.read_group(
request.cr, request.uid, [('model', '=', 'mail.channel'), ('res_id', '=', group_id)], ['subject', 'date'],
groupby="date", orderby="date desc", context=request.context)
for group in groups:
begin_date = datetime.datetime.strptime(group['__domain'][0][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
end_date = datetime.datetime.strptime(group['__domain'][1][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
group['date_begin'] = '%s' % datetime.date.strftime(begin_date, tools.DEFAULT_SERVER_DATE_FORMAT)
group['date_end'] = '%s' % datetime.date.strftime(end_date, tools.DEFAULT_SERVER_DATE_FORMAT)
return groups
@http.route("/groups", type='http', auth="public", website=True)
def view(self, **post):
cr, uid, context = request.cr, request.uid, request.context
group_obj = request.registry.get('mail.channel')
mail_message_obj = request.registry.get('mail.message')
group_ids = group_obj.search(cr, uid, [('alias_id', '!=', False), ('alias_id.alias_name', '!=', False)], context=context)
groups = group_obj.browse(cr, uid, group_ids, context)
# compute statistics
month_date = datetime.datetime.today() - relativedelta.relativedelta(months=1)
result = mail_message_obj.read_group(
cr, SUPERUSER_ID,
[('model', '=', 'mail.channel'), ('date', '>=', month_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT))],
[], ['res_id'],
context=context)
result = dict([(x['res_id'], x['res_id_count']) for x in result])
group_data = dict()
for group in groups:
group_data[group.id] = {'monthly_message_nbr': result.get(group.id, 0)}
values = {'groups': groups, 'group_data': group_data}
return request.website.render('website_mail_channel.mail_channels', values)
@http.route(["/groups/is_member"], type='json', auth="public", website=True)
def is_member(self, channel_id=0, **kw):
""" Determine if the current user is member of the given channel_id
:param channel_id : the channel_id to check
"""
current_user_id = request.uid
session_partner_id = request.session.get('partner_id')
public_id = request.website.user_id.id
partner = None
# find the current partner
if current_user_id != public_id:
partner = request.env['res.users'].sudo().browse(current_user_id).partner_id
elif session_partner_id:
partner = request.env['res.partner'].sudo().browse(session_partner_id)
values = {
'is_user': current_user_id != public_id,
'email': partner and partner.email or "",
'is_member': False,
'alias_name': False,
}
# check if the current partner is member or not
channel = request.env['mail.channel'].browse(int(channel_id))
if channel.exists() and partner is not None:
values['is_member'] = bool(partner in channel.sudo().channel_partner_ids)
return values
@http.route(["/groups/subscription"], type='json', auth="public", website=True)
def subscription(self, channel_id=0, subscription="on", email='', **kw):
""" Subscribe to a mailing list : this will create a partner with its email address (if public user not
registered yet) and add it as channel member
:param channel_id : the channel id to join/quit
:param subscription : 'on' to unsubscribe the user, 'off' to subscribe
"""
subscribe = subscription == 'on'
channel = request.env['mail.channel'].browse(int(channel_id))
partner_ids = []
# search partner_id
current_user_id = request.uid
public_id = request.website.user_id.id
if current_user_id != public_id:
partner_ids = [request.env['res.users'].browse(current_user_id).partner_id.id]
else: # mail_thread method
partner_ids = channel.sudo()._find_partner_from_emails([email], check_followers=True)
if not partner_ids or not partner_ids[0]:
name = email.split('@')[0]
partner_ids = [request.env['res.partner'].sudo().create({'name': name, 'email': email}).id]
# add or remove channel members
if subscribe:
channel.check_access_rule('read')
channel.sudo().write({'channel_partner_ids': [(3, partner_id) for partner_id in partner_ids]})
return False
else: # add partner to the channel
request.session['partner_id'] = partner_ids[0]
channel.check_access_rule('read')
channel.sudo().write({'channel_partner_ids': [(4, partner_id) for partner_id in partner_ids]})
return True
@http.route([
"/groups/<model('mail.channel'):group>",
"/groups/<model('mail.channel'):group>/page/<int:page>"
], type='http', auth="public", website=True)
def thread_headers(self, group, page=1, mode='thread', date_begin=None, date_end=None, **post):
cr, uid, context = request.cr, request.uid, request.context
thread_obj = request.registry.get('mail.message')
domain = [('model', '=', 'mail.channel'), ('res_id', '=', group.id), ('message_type', '!=', 'notification')]
if mode == 'thread':
domain += [('parent_id', '=', False)]
if date_begin and date_end:
domain += [('date', '>=', date_begin), ('date', '<=', date_end)]
thread_count = thread_obj.search_count(cr, uid, domain, context=context)
pager = request.website.pager(
url='/groups/%s' % slug(group),
total=thread_count,
page=page,
step=self._thread_per_page,
url_args={'mode': mode, 'date_begin': date_begin or '', 'date_end': date_end or ''},
)
thread_ids = thread_obj.search(cr, uid, domain, limit=self._thread_per_page, offset=pager['offset'])
messages = thread_obj.browse(cr, uid, thread_ids, context)
values = {
'messages': messages,
'group': group,
'pager': pager,
'mode': mode,
'archives': self._get_archives(group.id),
'date_begin': date_begin,
'date_end': date_end,
'replies_per_page': self._replies_per_page,
}
return request.website.render('website_mail_channel.group_messages', values)
@http.route([
'''/groups/<model('mail.channel'):group>/<model('mail.message', "[('model','=','mail.channel'), ('res_id','=',group[0])]"):message>''',
], type='http', auth="public", website=True)
def thread_discussion(self, group, message, mode='thread', date_begin=None, date_end=None, **post):
cr, uid, context = request.cr, request.uid, request.context
Message = request.registry['mail.message']
if mode == 'thread':
base_domain = [('model', '=', 'mail.channel'), ('res_id', '=', group.id), ('parent_id', '=', message.parent_id and message.parent_id.id or False)]
else:
base_domain = [('model', '=', 'mail.channel'), ('res_id', '=', group.id)]
next_message = None
next_message_ids = Message.search(cr, uid, base_domain + [('date', '<', message.date)], order="date DESC", limit=1, context=context)
if next_message_ids:
next_message = Message.browse(cr, uid, next_message_ids[0], context=context)
prev_message = None
prev_message_ids = Message.search(cr, uid, base_domain + [('date', '>', message.date)], order="date ASC", limit=1, context=context)
if prev_message_ids:
prev_message = Message.browse(cr, uid, prev_message_ids[0], context=context)
values = {
'message': message,
'group': group,
'mode': mode,
'archives': self._get_archives(group.id),
'date_begin': date_begin,
'date_end': date_end,
'replies_per_page': self._replies_per_page,
'next_message': next_message,
'prev_message': prev_message,
}
return request.website.render('website_mail_channel.group_message', values)
@http.route(
'''/groups/<model('mail.channel'):group>/<model('mail.message', "[('model','=','mail.channel'), ('res_id','=',group[0])]"):message>/get_replies''',
type='json', auth="public", methods=['POST'], website=True)
def render_messages(self, group, message, **post):
last_displayed_id = post.get('last_displayed_id')
if not last_displayed_id:
return False
Message = request.registry['mail.message']
replies_domain = [('id', '<', int(last_displayed_id)), ('parent_id', '=', message.id)]
msg_ids = Message.search(request.cr, request.uid, replies_domain, limit=self._replies_per_page, context=request.context)
msg_count = Message.search(request.cr, request.uid, replies_domain, count=True, context=request.context)
messages = Message.browse(request.cr, request.uid, msg_ids, context=request.context)
values = {
'group': group,
'thread_header': message,
'messages': messages,
'msg_more_count': msg_count - self._replies_per_page,
'replies_per_page': self._replies_per_page,
}
return request.registry['ir.ui.view'].render(request.cr, request.uid, 'website_mail_channel.messages_short', values, engine='ir.qweb', context=request.context)
@http.route("/groups/<model('mail.channel'):group>/get_alias_info", type='json', auth='public', website=True)
def get_alias_info(self, group, **post):
return {
'alias_name': group.alias_id and group.alias_id.alias_name and group.alias_id.alias_domain and '%s@%s' % (group.alias_id.alias_name, group.alias_id.alias_domain) or False
}
|
mattvick/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py
|
123
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import platform
import sys
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.runtests import RunTests
class RunTestsTest(unittest.TestCase):
def test_webkit_run_unit_tests(self):
tool = MockTool(log_executive=True)
tool._deprecated_port.run_python_unittests_command = lambda: None
tool._deprecated_port.run_perl_unittests_command = lambda: None
step = RunTests(tool, MockOptions(test=True, non_interactive=True, quiet=False))
if sys.platform != "cygwin":
expected_logs = """Running bindings generation tests
MOCK run_and_throw_if_fail: ['mock-run-bindings-tests'], cwd=/mock-checkout
Running WebKit unit tests
MOCK run_and_throw_if_fail: ['mock-run-webkit-unit-tests'], cwd=/mock-checkout
Running run-webkit-tests
MOCK run_and_throw_if_fail: ['mock-run-webkit-tests', '--no-new-test-results', '--no-show-results', '--exit-after-n-failures=30', '--quiet', '--skip-failing-tests'], cwd=/mock-checkout
"""
else:
expected_logs = """Running bindings generation tests
MOCK run_and_throw_if_fail: ['mock-run-bindings-tests'], cwd=/mock-checkout
Running WebKit unit tests
MOCK run_and_throw_if_fail: ['mock-run-webkit-unit-tests'], cwd=/mock-checkout
Running run-webkit-tests
MOCK run_and_throw_if_fail: ['mock-run-webkit-tests', '--no-new-test-results', '--no-show-results', '--exit-after-n-failures=30', '--no-build'], cwd=/mock-checkout
"""
OutputCapture().assert_outputs(self, step.run, [{}], expected_logs=expected_logs)
|
sagangwee/sagangwee.github.io
|
refs/heads/master
|
build/pygments/build/lib.linux-i686-2.7/pygments/styles/autumn.py
|
135
|
# -*- coding: utf-8 -*-
"""
pygments.styles.autumn
~~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by the terminal highlighting style.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class AutumnStyle(Style):
"""
A colorful style, inspired by the terminal highlighting style.
"""
default_style = ""
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #aaaaaa',
Comment.Preproc: 'noitalic #4c8317',
Comment.Special: 'italic #0000aa',
Keyword: '#0000aa',
Keyword.Type: '#00aaaa',
Operator.Word: '#0000aa',
Name.Builtin: '#00aaaa',
Name.Function: '#00aa00',
Name.Class: 'underline #00aa00',
Name.Namespace: 'underline #00aaaa',
Name.Variable: '#aa0000',
Name.Constant: '#aa0000',
Name.Entity: 'bold #800',
Name.Attribute: '#1e90ff',
Name.Tag: 'bold #1e90ff',
Name.Decorator: '#888888',
String: '#aa5500',
String.Symbol: '#0000aa',
String.Regex: '#009999',
Number: '#009999',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: '#F00 bg:#FAA'
}
|
hojel/calibre
|
refs/heads/master
|
src/calibre/ebooks/pdb/haodoo/reader.py
|
24
|
# -*- coding: utf-8 -*-
'''
Read content from Haodoo.net pdb file.
'''
__license__ = 'GPL v3'
__copyright__ = '2012, Kan-Ru Chen <kanru@kanru.info>'
__docformat__ = 'restructuredtext en'
import struct
import os
from calibre import prepare_string_for_xml
from calibre.ebooks.pdb.formatreader import FormatReader
from calibre.ebooks.metadata import MetaInformation
from calibre.ebooks.txt.processor import opf_writer, HTML_TEMPLATE
BPDB_IDENT = b'BOOKMTIT'
UPDB_IDENT = b'BOOKMTIU'
punct_table = {
u"︵": u"(",
u"︶": u")",
u"︷": u"{",
u"︸": u"}",
u"︹": u"〔",
u"︺": u"〕",
u"︻": u"【",
u"︼": u"】",
u"︗": u"〖",
u"︘": u"〗",
u"﹇": u"[]",
u"﹈": u"[]",
u"︽": u"《",
u"︾": u"》",
u"︿": u"〈",
u"﹀": u"〉",
u"﹁": u"「",
u"﹂": u"」",
u"﹃": u"『",
u"﹄": u"』",
u"|": u"—",
u"︙": u"…",
u"ⸯ": u"~",
u"│": u"…",
u"¦": u"…",
u" ": u" ",
}
def fix_punct(line):
for (key, value) in punct_table.items():
line = line.replace(key, value)
return line
class LegacyHeaderRecord(object):
def __init__(self, raw):
fields = raw.lstrip().replace(b'\x1b\x1b\x1b', b'\x1b').split(b'\x1b')
self.title = fix_punct(fields[0].decode('cp950', 'replace'))
self.num_records = int(fields[1])
self.chapter_titles = map(
lambda x: fix_punct(x.decode('cp950', 'replace').rstrip(b'\x00')),
fields[2:])
class UnicodeHeaderRecord(object):
def __init__(self, raw):
fields = raw.lstrip().replace(b'\x1b\x00\x1b\x00\x1b\x00',
b'\x1b\x00').split(b'\x1b\x00')
self.title = fix_punct(fields[0].decode('utf_16_le', 'ignore'))
self.num_records = int(fields[1])
self.chapter_titles = map(
lambda x: fix_punct(x.decode('utf_16_le', 'replace').rstrip(b'\x00')),
fields[2].split(b'\r\x00\n\x00'))
class Reader(FormatReader):
def __init__(self, header, stream, log, options):
self.stream = stream
self.log = log
self.sections = []
for i in range(header.num_sections):
self.sections.append(header.section_data(i))
if header.ident == BPDB_IDENT:
self.header_record = LegacyHeaderRecord(self.section_data(0))
self.encoding = 'cp950'
else:
self.header_record = UnicodeHeaderRecord(self.section_data(0))
self.encoding = 'utf_16_le'
def author(self):
self.stream.seek(35)
version = struct.unpack(b'>b', self.stream.read(1))[0]
if version == 2:
self.stream.seek(0)
author = self.stream.read(35).rstrip(b'\x00').decode(self.encoding, 'replace')
return author
else:
return u'Unknown'
def get_metadata(self):
mi = MetaInformation(self.header_record.title,
[self.author()])
mi.language = u'zh-tw'
return mi
def section_data(self, number):
return self.sections[number]
def decompress_text(self, number):
return self.section_data(number).decode(self.encoding,
'replace').rstrip(b'\x00')
def extract_content(self, output_dir):
txt = u''
self.log.info(u'Decompressing text...')
for i in range(1, self.header_record.num_records + 1):
self.log.debug(u'\tDecompressing text section %i' % i)
title = self.header_record.chapter_titles[i-1]
lines = []
title_added = False
for line in self.decompress_text(i).splitlines():
line = fix_punct(line)
line = line.strip()
if not title_added and title in line:
line = u'<h1 class="chapter">' + line + u'</h1>\n'
title_added = True
else:
line = prepare_string_for_xml(line)
lines.append(u'<p>%s</p>' % line)
if not title_added:
lines.insert(0, u'<h1 class="chapter">' + title + u'</h1>\n')
txt += u'\n'.join(lines)
self.log.info(u'Converting text to OEB...')
html = HTML_TEMPLATE % (self.header_record.title, txt)
with open(os.path.join(output_dir, u'index.html'), 'wb') as index:
index.write(html.encode('utf-8'))
mi = self.get_metadata()
manifest = [(u'index.html', None)]
spine = [u'index.html']
opf_writer(output_dir, u'metadata.opf', manifest, spine, mi)
return os.path.join(output_dir, u'metadata.opf')
|
jiadaizhao/LeetCode
|
refs/heads/master
|
0101-0200/0116-Populating Next Right Pointers in Each Node/0116-Populating Next Right Pointers in Each Node.py
|
1
|
# Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
class Solution:
def connect(self, root: 'Node') -> 'Node':
curr = root
while curr:
p = curr
while p:
if p.left:
p.left.next = p.right
if p.next:
p.right.next = p.next.left
p = p.next
curr = curr.left
return root
|
earaujoassis/metaheuristics
|
refs/heads/master
|
scheduling/jobshop/suite/__init__.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2011-2015 Ewerton Assis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
plumgrid/plumgrid-nova
|
refs/heads/master
|
nova/db/sqlalchemy/migrate_repo/versions/160_fix_system_metadata_deleted.py
|
23
|
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
sys_meta = sqlalchemy.Table('instance_system_metadata', meta,
autoload=True)
# is None does not work here.
sys_meta.update().\
where(sys_meta.c.deleted == None).\
values(deleted=0).\
execute()
def downgrade(migration_engine):
# This migration only corrects NULL to be 0. There's no action to
# revert this.
pass
|
ProstoMaxim/incubator-airflow
|
refs/heads/master
|
airflow/macros/__init__.py
|
27
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from datetime import datetime, timedelta
import dateutil
import time
from . import hive
import uuid
def ds_add(ds, days):
"""
Add or subtract days from a YYYY-MM-DD
:param ds: anchor date in ``YYYY-MM-DD`` format to add to
:type ds: str
:param days: number of days to add to the ds, you can use negative values
:type days: int
>>> ds_add('2015-01-01', 5)
'2015-01-06'
>>> ds_add('2015-01-06', -5)
'2015-01-01'
"""
ds = datetime.strptime(ds, '%Y-%m-%d')
if days:
ds = ds + timedelta(days)
return ds.isoformat()[:10]
def ds_format(ds, input_format, output_format):
"""
Takes an input string and outputs another string
as specified in the output format
:param ds: input string which contains a date
:type ds: str
:param input_format: input string format. E.g. %Y-%m-%d
:type input_format: str
:param output_format: output string format E.g. %Y-%m-%d
:type output_format: str
>>> ds_format('2015-01-01', "%Y-%m-%d", "%m-%d-%y")
'01-01-15'
>>> ds_format('1/5/2015', "%m/%d/%Y", "%Y-%m-%d")
'2015-01-05'
"""
return datetime.strptime(ds, input_format).strftime(output_format)
def _integrate_plugins():
"""Integrate plugins to the context"""
import sys
from airflow.plugins_manager import macros_modules
for macros_module in macros_modules:
sys.modules[macros_module.__name__] = macros_module
globals()[macros_module._name] = macros_module
##########################################################
# TODO FIXME Remove in Airflow 2.0
import os as _os
if not _os.environ.get('AIRFLOW_USE_NEW_IMPORTS', False):
from zope.deprecation import deprecated as _deprecated
for _macro in macros_module._objects:
macro_name = _macro.__name__
globals()[macro_name] = _macro
_deprecated(
macro_name,
"Importing plugin macro '{i}' directly from "
"'airflow.macros' has been deprecated. Please "
"import from 'airflow.macros.[plugin_module]' "
"instead. Support for direct imports will be dropped "
"entirely in Airflow 2.0.".format(i=macro_name))
|
Ryezhang/scrapy
|
refs/heads/master
|
tests/test_responsetypes.py
|
27
|
# -*- coding: utf-8 -*-
import unittest
from scrapy.responsetypes import responsetypes
from scrapy.http import Response, TextResponse, XmlResponse, HtmlResponse, Headers
class ResponseTypesTest(unittest.TestCase):
def test_from_filename(self):
mappings = [
('data.bin', Response),
('file.txt', TextResponse),
('file.xml.gz', Response),
('file.xml', XmlResponse),
('file.html', HtmlResponse),
('file.unknownext', Response),
]
for source, cls in mappings:
retcls = responsetypes.from_filename(source)
assert retcls is cls, "%s ==> %s != %s" % (source, retcls, cls)
def test_from_content_disposition(self):
mappings = [
(b'attachment; filename="data.xml"', XmlResponse),
(b'attachment; filename=data.xml', XmlResponse),
(u'attachment;filename=data£.tar.gz'.encode('utf-8'), Response),
(u'attachment;filename=dataµ.tar.gz'.encode('latin-1'), Response),
(u'attachment;filename=data高.doc'.encode('gbk'), Response),
(u'attachment;filename=دورهdata.html'.encode('cp720'), HtmlResponse),
(u'attachment;filename=日本語版Wikipedia.xml'.encode('iso2022_jp'), XmlResponse),
]
for source, cls in mappings:
retcls = responsetypes.from_content_disposition(source)
assert retcls is cls, "%s ==> %s != %s" % (source, retcls, cls)
def test_from_content_type(self):
mappings = [
('text/html; charset=UTF-8', HtmlResponse),
('text/xml; charset=UTF-8', XmlResponse),
('application/xhtml+xml; charset=UTF-8', HtmlResponse),
('application/vnd.wap.xhtml+xml; charset=utf-8', HtmlResponse),
('application/xml; charset=UTF-8', XmlResponse),
('application/octet-stream', Response),
('application/x-json; encoding=UTF8;charset=UTF-8', TextResponse),
('application/json-amazonui-streaming;charset=UTF-8', TextResponse),
]
for source, cls in mappings:
retcls = responsetypes.from_content_type(source)
assert retcls is cls, "%s ==> %s != %s" % (source, retcls, cls)
def test_from_body(self):
mappings = [
(b'\x03\x02\xdf\xdd\x23', Response),
(b'Some plain text\ndata with tabs\t and null bytes\0', TextResponse),
(b'<html><head><title>Hello</title></head>', HtmlResponse),
(b'<?xml version="1.0" encoding="utf-8"', XmlResponse),
]
for source, cls in mappings:
retcls = responsetypes.from_body(source)
assert retcls is cls, "%s ==> %s != %s" % (source, retcls, cls)
def test_from_headers(self):
mappings = [
({'Content-Type': ['text/html; charset=utf-8']}, HtmlResponse),
({'Content-Type': ['application/octet-stream'], 'Content-Disposition': ['attachment; filename=data.txt']}, TextResponse),
({'Content-Type': ['text/html; charset=utf-8'], 'Content-Encoding': ['gzip']}, Response),
]
for source, cls in mappings:
source = Headers(source)
retcls = responsetypes.from_headers(source)
assert retcls is cls, "%s ==> %s != %s" % (source, retcls, cls)
def test_from_args(self):
# TODO: add more tests that check precedence between the different arguments
mappings = [
({'url': 'http://www.example.com/data.csv'}, TextResponse),
# headers takes precedence over url
({'headers': Headers({'Content-Type': ['text/html; charset=utf-8']}), 'url': 'http://www.example.com/item/'}, HtmlResponse),
({'headers': Headers({'Content-Disposition': ['attachment; filename="data.xml.gz"']}), 'url': 'http://www.example.com/page/'}, Response),
]
for source, cls in mappings:
retcls = responsetypes.from_args(**source)
assert retcls is cls, "%s ==> %s != %s" % (source, retcls, cls)
def test_custom_mime_types_loaded(self):
# check that mime.types files shipped with scrapy are loaded
self.assertEqual(responsetypes.mimetypes.guess_type('x.scrapytest')[0], 'x-scrapy/test')
if __name__ == "__main__":
unittest.main()
|
josenavas/QiiTa
|
refs/heads/master
|
qiita_pet/uimodules/__init__.py
|
1
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from .study_information_tab import StudyInformationTab
from .prep_template_tab import RawDataInfoDiv, EditInvestigationType
from .preprocessed_data_tab import PreprocessedDataTab, PreprocessedDataInfoTab
from .processed_data_tab import ProcessedDataTab, ProcessedDataInfoTab
__all__ = ['StudyInformationTab', 'EditInvestigationType', 'RawDataInfoDiv',
'PreprocessedDataTab', 'PreprocessedDataInfoTab',
'ProcessedDataTab', 'ProcessedDataInfoTab']
|
herilalaina/scikit-learn
|
refs/heads/master
|
examples/cluster/plot_kmeans_assumptions.py
|
76
|
"""
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3,
random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
|
laslabs/odoo
|
refs/heads/9.0
|
addons/website_sale/controllers/__init__.py
|
42
|
# -*- coding: utf-8 -*
import main
import website_mail
|
samarthmed/emacs-config
|
refs/heads/master
|
.python-environments/default/lib/python2.7/site-packages/setuptools/command/install_scripts.py
|
505
|
from distutils import log
import distutils.command.install_scripts as orig
import os
from pkg_resources import Distribution, PathMetadata, ensure_directory
class install_scripts(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
import setuptools.command.easy_install as ei
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
exec_param = getattr(bs_cmd, 'executable', None)
bw_cmd = self.get_finalized_command("bdist_wininst")
is_wininst = getattr(bw_cmd, '_is_running', False)
writer = ei.ScriptWriter
if is_wininst:
exec_param = "python.exe"
writer = ei.WindowsScriptWriter
# resolve the writer to the environment
writer = writer.best()
cmd = writer.command_spec_class.best().from_param(exec_param)
for args in writer.get_args(dist, cmd.as_header()):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
|
heeraj123/oh-mainline
|
refs/heads/master
|
vendor/packages/django-assets/docs/conf.py
|
16
|
# -*- coding: utf-8 -*-
#
# django-assets documentation build configuration file, created by
# sphinx-quickstart on Fri May 08 06:02:25 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# make sure we are documenting the local version with autodoc
sys.path.insert(0, os.path.abspath('..'))
import django_assets
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-assets'
copyright = u'2009, Michael Elsdörfer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(map(str, django_assets.__version__))
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-assetsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-assets.tex', u'django-assets Documentation',
u'Michael Elsdörfer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
alfcrisci/httpie
|
refs/heads/master
|
httpie/__init__.py
|
45
|
"""
HTTPie - a CLI, cURL-like tool for humans.
"""
__author__ = 'Jakub Roztocil'
__version__ = '1.0.0-dev'
__licence__ = 'BSD'
class ExitStatus:
"""Exit status code constants."""
OK = 0
ERROR = 1
ERROR_TIMEOUT = 2
# Used only when requested with --check-status:
ERROR_HTTP_3XX = 3
ERROR_HTTP_4XX = 4
ERROR_HTTP_5XX = 5
|
bullapse/LED_strip_webservice
|
refs/heads/master
|
main.py
|
1
|
import led_service
import config
# import requests_toolbelt.adapters.appengine
# Use the App Engine Requests adapter. This makes sure that Requests uses
# URLFetch.
# requests_toolbelt.adapters.appengine.monkeypatch()
app = notspotify.create_app()
if __name__ == "__main__":
app.run(host='127.0.0.1', port=8080, debug=True)
|
wilvk/ansible
|
refs/heads/devel
|
lib/ansible/modules/database/postgresql/__init__.py
|
12133432
| |
dkerwin/ansible-modules-core
|
refs/heads/devel
|
database/__init__.py
|
12133432
| |
mrkiwi-nz/django-helpdesk
|
refs/heads/master
|
helpdesk/templatetags/__init__.py
|
12133432
| |
home-assistant/home-assistant
|
refs/heads/dev
|
tests/components/august/test_init.py
|
2
|
"""The tests for the august platform."""
import asyncio
from unittest.mock import patch
from aiohttp import ClientResponseError
from yalexs.authenticator_common import AuthenticationState
from yalexs.exceptions import AugustApiAIOHTTPError
from homeassistant import setup
from homeassistant.components.august.const import DOMAIN
from homeassistant.components.lock import DOMAIN as LOCK_DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_LOCK,
SERVICE_UNLOCK,
STATE_LOCKED,
STATE_ON,
)
from homeassistant.exceptions import HomeAssistantError
from tests.common import MockConfigEntry
from tests.components.august.mocks import (
_create_august_with_devices,
_mock_august_authentication,
_mock_doorsense_enabled_august_lock_detail,
_mock_doorsense_missing_august_lock_detail,
_mock_get_config,
_mock_inoperative_august_lock_detail,
_mock_operative_august_lock_detail,
)
async def test_august_is_offline(hass):
"""Config entry state is SETUP_RETRY when august is offline."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data=_mock_get_config()[DOMAIN],
title="August august",
)
config_entry.add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(
"yalexs.authenticator_async.AuthenticatorAsync.async_authenticate",
side_effect=asyncio.TimeoutError,
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_RETRY
async def test_unlock_throws_august_api_http_error(hass):
"""Test unlock throws correct error on http error."""
mocked_lock_detail = await _mock_operative_august_lock_detail(hass)
def _unlock_return_activities_side_effect(access_token, device_id):
raise AugustApiAIOHTTPError("This should bubble up as its user consumable")
await _create_august_with_devices(
hass,
[mocked_lock_detail],
api_call_side_effects={
"unlock_return_activities": _unlock_return_activities_side_effect
},
)
last_err = None
data = {ATTR_ENTITY_ID: "lock.a6697750d607098bae8d6baa11ef8063_name"}
try:
await hass.services.async_call(LOCK_DOMAIN, SERVICE_UNLOCK, data, blocking=True)
except HomeAssistantError as err:
last_err = err
assert (
str(last_err)
== "A6697750D607098BAE8D6BAA11EF8063 Name: This should bubble up as its user consumable"
)
async def test_lock_throws_august_api_http_error(hass):
"""Test lock throws correct error on http error."""
mocked_lock_detail = await _mock_operative_august_lock_detail(hass)
def _lock_return_activities_side_effect(access_token, device_id):
raise AugustApiAIOHTTPError("This should bubble up as its user consumable")
await _create_august_with_devices(
hass,
[mocked_lock_detail],
api_call_side_effects={
"lock_return_activities": _lock_return_activities_side_effect
},
)
last_err = None
data = {ATTR_ENTITY_ID: "lock.a6697750d607098bae8d6baa11ef8063_name"}
try:
await hass.services.async_call(LOCK_DOMAIN, SERVICE_LOCK, data, blocking=True)
except HomeAssistantError as err:
last_err = err
assert (
str(last_err)
== "A6697750D607098BAE8D6BAA11EF8063 Name: This should bubble up as its user consumable"
)
async def test_inoperative_locks_are_filtered_out(hass):
"""Ensure inoperative locks do not get setup."""
august_operative_lock = await _mock_operative_august_lock_detail(hass)
august_inoperative_lock = await _mock_inoperative_august_lock_detail(hass)
await _create_august_with_devices(
hass, [august_operative_lock, august_inoperative_lock]
)
lock_abc_name = hass.states.get("lock.abc_name")
assert lock_abc_name is None
lock_a6697750d607098bae8d6baa11ef8063_name = hass.states.get(
"lock.a6697750d607098bae8d6baa11ef8063_name"
)
assert lock_a6697750d607098bae8d6baa11ef8063_name.state == STATE_LOCKED
async def test_lock_has_doorsense(hass):
"""Check to see if a lock has doorsense."""
doorsenselock = await _mock_doorsense_enabled_august_lock_detail(hass)
nodoorsenselock = await _mock_doorsense_missing_august_lock_detail(hass)
await _create_august_with_devices(hass, [doorsenselock, nodoorsenselock])
binary_sensor_online_with_doorsense_name_open = hass.states.get(
"binary_sensor.online_with_doorsense_name_open"
)
assert binary_sensor_online_with_doorsense_name_open.state == STATE_ON
binary_sensor_missing_doorsense_id_name_open = hass.states.get(
"binary_sensor.missing_doorsense_id_name_open"
)
assert binary_sensor_missing_doorsense_id_name_open is None
async def test_auth_fails(hass):
"""Config entry state is SETUP_ERROR when auth fails."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data=_mock_get_config()[DOMAIN],
title="August august",
)
config_entry.add_to_hass(hass)
assert hass.config_entries.flow.async_progress() == []
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(
"yalexs.authenticator_async.AuthenticatorAsync.async_authenticate",
side_effect=ClientResponseError(None, None, status=401),
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_ERROR
flows = hass.config_entries.flow.async_progress()
assert flows[0]["step_id"] == "reauth_validate"
async def test_bad_password(hass):
"""Config entry state is SETUP_ERROR when the password has been changed."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data=_mock_get_config()[DOMAIN],
title="August august",
)
config_entry.add_to_hass(hass)
assert hass.config_entries.flow.async_progress() == []
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(
"yalexs.authenticator_async.AuthenticatorAsync.async_authenticate",
return_value=_mock_august_authentication(
"original_token", 1234, AuthenticationState.BAD_PASSWORD
),
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_ERROR
flows = hass.config_entries.flow.async_progress()
assert flows[0]["step_id"] == "reauth_validate"
async def test_http_failure(hass):
"""Config entry state is SETUP_RETRY when august is offline."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data=_mock_get_config()[DOMAIN],
title="August august",
)
config_entry.add_to_hass(hass)
assert hass.config_entries.flow.async_progress() == []
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(
"yalexs.authenticator_async.AuthenticatorAsync.async_authenticate",
side_effect=ClientResponseError(None, None, status=500),
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_RETRY
assert hass.config_entries.flow.async_progress() == []
async def test_unknown_auth_state(hass):
"""Config entry state is SETUP_ERROR when august is in an unknown auth state."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data=_mock_get_config()[DOMAIN],
title="August august",
)
config_entry.add_to_hass(hass)
assert hass.config_entries.flow.async_progress() == []
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(
"yalexs.authenticator_async.AuthenticatorAsync.async_authenticate",
return_value=_mock_august_authentication("original_token", 1234, None),
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_ERROR
flows = hass.config_entries.flow.async_progress()
assert flows[0]["step_id"] == "reauth_validate"
async def test_requires_validation_state(hass):
"""Config entry state is SETUP_ERROR when august requires validation."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data=_mock_get_config()[DOMAIN],
title="August august",
)
config_entry.add_to_hass(hass)
assert hass.config_entries.flow.async_progress() == []
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(
"yalexs.authenticator_async.AuthenticatorAsync.async_authenticate",
return_value=_mock_august_authentication(
"original_token", 1234, AuthenticationState.REQUIRES_VALIDATION
),
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_ERROR
assert len(hass.config_entries.flow.async_progress()) == 1
assert hass.config_entries.flow.async_progress()[0]["context"]["source"] == "reauth"
async def test_unknown_auth_http_401(hass):
"""Config entry state is SETUP_ERROR when august gets an http."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data=_mock_get_config()[DOMAIN],
title="August august",
)
config_entry.add_to_hass(hass)
assert hass.config_entries.flow.async_progress() == []
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(
"yalexs.authenticator_async.AuthenticatorAsync.async_authenticate",
return_value=_mock_august_authentication("original_token", 1234, None),
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_ERROR
flows = hass.config_entries.flow.async_progress()
assert flows[0]["step_id"] == "reauth_validate"
async def test_load_unload(hass):
"""Config entry can be unloaded."""
august_operative_lock = await _mock_operative_august_lock_detail(hass)
august_inoperative_lock = await _mock_inoperative_august_lock_detail(hass)
config_entry = await _create_august_with_devices(
hass, [august_operative_lock, august_inoperative_lock]
)
assert config_entry.state is ConfigEntryState.LOADED
await hass.config_entries.async_unload(config_entry.entry_id)
await hass.async_block_till_done()
|
tomkun/stem
|
refs/heads/DescReader
|
test/unit/version.py
|
5
|
"""
Unit tests for the stem.version.Version parsing and class.
"""
import unittest
import stem.util.system
import stem.version
from stem.version import Version
from test import mocking
TOR_VERSION_OUTPUT = """Mar 22 23:09:37.088 [notice] Tor v0.2.2.35 \
(git-73ff13ab3cc9570d). This is experimental software. Do not rely on it for \
strong anonymity. (Running on Linux i686)
Tor version 0.2.2.35 (git-73ff13ab3cc9570d)."""
class TestVersion(unittest.TestCase):
def tearDown(self):
mocking.revert_mocking()
def test_get_system_tor_version(self):
# Clear the version cache both before and after the test. Without this
# prior results short circuit the system call, and future calls will
# provide this mocked value.
stem.version.VERSION_CACHE = {}
def _mock_call(command):
if command == "tor --version":
return TOR_VERSION_OUTPUT.splitlines()
else:
raise ValueError("stem.util.system.call received an unexpected command: %s" % command)
mocking.mock(stem.util.system.call, _mock_call)
version = stem.version.get_system_tor_version()
self.assert_versions_match(version, 0, 2, 2, 35, None, "git-73ff13ab3cc9570d")
self.assertEqual("73ff13ab3cc9570d", version.git_commit)
stem.version.VERSION_CACHE = {}
def test_parsing(self):
"""
Tests parsing by the Version class constructor.
"""
# valid versions with various number of compontents to the version
version = Version("0.1.2.3-tag")
self.assert_versions_match(version, 0, 1, 2, 3, "tag", None)
version = Version("0.1.2.3")
self.assert_versions_match(version, 0, 1, 2, 3, None, None)
version = Version("0.1.2-tag")
self.assert_versions_match(version, 0, 1, 2, None, "tag", None)
version = Version("0.1.2")
self.assert_versions_match(version, 0, 1, 2, None, None, None)
# checks an empty tag
version = Version("0.1.2.3-")
self.assert_versions_match(version, 0, 1, 2, 3, "", None)
version = Version("0.1.2-")
self.assert_versions_match(version, 0, 1, 2, None, "", None)
# check with extra informaton
version = Version("0.1.2.3-tag (git-73ff13ab3cc9570d)")
self.assert_versions_match(version, 0, 1, 2, 3, "tag", "git-73ff13ab3cc9570d")
self.assertEqual("73ff13ab3cc9570d", version.git_commit)
version = Version("0.1.2.3-tag ()")
self.assert_versions_match(version, 0, 1, 2, 3, "tag", "")
version = Version("0.1.2 (git-73ff13ab3cc9570d)")
self.assert_versions_match(version, 0, 1, 2, None, None, "git-73ff13ab3cc9570d")
# checks invalid version strings
self.assertRaises(ValueError, stem.version.Version, "")
self.assertRaises(ValueError, stem.version.Version, "1.2.3.4nodash")
self.assertRaises(ValueError, stem.version.Version, "1.2.3.a")
self.assertRaises(ValueError, stem.version.Version, "1.2.a.4")
self.assertRaises(ValueError, stem.version.Version, "1x2x3x4")
self.assertRaises(ValueError, stem.version.Version, "12.3")
self.assertRaises(ValueError, stem.version.Version, "1.-2.3")
def test_comparison(self):
"""
Tests comparision between Version instances.
"""
# check for basic incrementing in each portion
self.assert_version_is_greater("1.1.2.3-tag", "0.1.2.3-tag")
self.assert_version_is_greater("0.2.2.3-tag", "0.1.2.3-tag")
self.assert_version_is_greater("0.1.3.3-tag", "0.1.2.3-tag")
self.assert_version_is_greater("0.1.2.4-tag", "0.1.2.3-tag")
self.assert_version_is_greater("0.1.2.3-ugg", "0.1.2.3-tag")
self.assert_version_is_equal("0.1.2.3-tag", "0.1.2.3-tag")
# check with common tags
self.assert_version_is_greater("0.1.2.3-beta", "0.1.2.3-alpha")
self.assert_version_is_greater("0.1.2.3-rc", "0.1.2.3-beta")
# checks that a missing patch level equals zero
self.assert_version_is_equal("0.1.2", "0.1.2.0")
self.assert_version_is_equal("0.1.2-tag", "0.1.2.0-tag")
# checks for missing patch or status
self.assert_version_is_greater("0.1.2.3-tag", "0.1.2.3")
self.assert_version_is_greater("0.1.2.3-tag", "0.1.2-tag")
self.assert_version_is_greater("0.1.2.3-tag", "0.1.2")
self.assert_version_is_equal("0.1.2.3", "0.1.2.3")
self.assert_version_is_equal("0.1.2", "0.1.2")
def test_nonversion_comparison(self):
"""
Checks that we can be compared with other types.
In python 3 on only equality comparisons work, greater than and less than
comparisons result in a TypeError.
"""
test_version = Version("0.1.2.3")
self.assertNotEqual(test_version, None)
self.assertNotEqual(test_version, 5)
def test_string(self):
"""
Tests the Version -> string conversion.
"""
# checks conversion with various numbers of arguments
self.assert_string_matches("0.1.2.3-tag")
self.assert_string_matches("0.1.2.3")
self.assert_string_matches("0.1.2")
def test_requirements_greater_than(self):
"""
Checks a VersionRequirements with a single greater_than rule.
"""
requirements = stem.version._VersionRequirements()
requirements.greater_than(Version("0.2.2.36"))
self.assertTrue(Version("0.2.2.36") >= requirements)
self.assertTrue(Version("0.2.2.37") >= requirements)
self.assertTrue(Version("0.2.3.36") >= requirements)
self.assertFalse(Version("0.2.2.35") >= requirements)
self.assertFalse(Version("0.2.1.38") >= requirements)
requirements = stem.version._VersionRequirements()
requirements.greater_than(Version("0.2.2.36"), False)
self.assertFalse(Version("0.2.2.35") >= requirements)
self.assertFalse(Version("0.2.2.36") >= requirements)
self.assertTrue(Version("0.2.2.37") >= requirements)
def test_requirements_less_than(self):
"""
Checks a VersionRequirements with a single less_than rule.
"""
requirements = stem.version._VersionRequirements()
requirements.less_than(Version("0.2.2.36"))
self.assertTrue(Version("0.2.2.36") >= requirements)
self.assertTrue(Version("0.2.2.35") >= requirements)
self.assertTrue(Version("0.2.1.38") >= requirements)
self.assertFalse(Version("0.2.2.37") >= requirements)
self.assertFalse(Version("0.2.3.36") >= requirements)
requirements = stem.version._VersionRequirements()
requirements.less_than(Version("0.2.2.36"), False)
self.assertFalse(Version("0.2.2.37") >= requirements)
self.assertFalse(Version("0.2.2.36") >= requirements)
self.assertTrue(Version("0.2.2.35") >= requirements)
def test_requirements_in_range(self):
"""
Checks a VersionRequirements with a single in_range rule.
"""
requirements = stem.version._VersionRequirements()
requirements.in_range(Version("0.2.2.36"), Version("0.2.2.38"))
self.assertFalse(Version("0.2.2.35") >= requirements)
self.assertTrue(Version("0.2.2.36") >= requirements)
self.assertTrue(Version("0.2.2.37") >= requirements)
self.assertFalse(Version("0.2.2.38") >= requirements)
# rule for 'anything in the 0.2.2.x series'
requirements = stem.version._VersionRequirements()
requirements.in_range(Version("0.2.2.0"), Version("0.2.3.0"))
for index in xrange(0, 100):
self.assertTrue(Version("0.2.2.%i" % index) >= requirements)
def test_requirements_multiple_rules(self):
"""
Checks a VersionRequirements is the logical 'or' when it has multiple rules.
"""
# rule to say 'anything but the 0.2.2.x series'
requirements = stem.version._VersionRequirements()
requirements.greater_than(Version("0.2.3.0"))
requirements.less_than(Version("0.2.2.0"), False)
self.assertTrue(Version("0.2.3.0") >= requirements)
self.assertFalse(Version("0.2.2.0") >= requirements)
for index in xrange(0, 100):
self.assertFalse(Version("0.2.2.%i" % index) >= requirements)
def assert_versions_match(self, version, major, minor, micro, patch, status, extra):
"""
Asserts that the values for a types.Version instance match the given
values.
"""
self.assertEqual(major, version.major)
self.assertEqual(minor, version.minor)
self.assertEqual(micro, version.micro)
self.assertEqual(patch, version.patch)
self.assertEqual(status, version.status)
self.assertEqual(extra, version.extra)
if extra is None:
self.assertEqual(None, version.git_commit)
def assert_version_is_greater(self, first_version, second_version):
"""
Asserts that the parsed version of the first version is greate than the
second (also checking the inverse).
"""
version1 = Version(first_version)
version2 = Version(second_version)
self.assertEqual(version1 > version2, True)
self.assertEqual(version1 < version2, False)
def assert_version_is_equal(self, first_version, second_version):
"""
Asserts that the parsed version of the first version equals the second.
"""
version1 = Version(first_version)
version2 = Version(second_version)
self.assertEqual(version1, version2)
def assert_string_matches(self, version):
"""
Parses the given version string then checks that its string representation
matches the input.
"""
self.assertEqual(version, str(Version(version)))
|
devssay/newbook
|
refs/heads/master
|
bs4/builder/__init__.py
|
73
|
from collections import defaultdict
import itertools
import sys
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
whitespace_re
)
__all__ = [
'HTMLTreeBuilder',
'SAXTreeBuilder',
'TreeBuilder',
'TreeBuilderRegistry',
]
# Some useful features for a TreeBuilder to have.
FAST = 'fast'
PERMISSIVE = 'permissive'
STRICT = 'strict'
XML = 'xml'
HTML = 'html'
HTML_5 = 'html5'
class TreeBuilderRegistry(object):
def __init__(self):
self.builders_for_feature = defaultdict(list)
self.builders = []
def register(self, treebuilder_class):
"""Register a treebuilder based on its advertised features."""
for feature in treebuilder_class.features:
self.builders_for_feature[feature].insert(0, treebuilder_class)
self.builders.insert(0, treebuilder_class)
def lookup(self, *features):
if len(self.builders) == 0:
# There are no builders at all.
return None
if len(features) == 0:
# They didn't ask for any features. Give them the most
# recently registered builder.
return self.builders[0]
# Go down the list of features in order, and eliminate any builders
# that don't match every feature.
features = list(features)
features.reverse()
candidates = None
candidate_set = None
while len(features) > 0:
feature = features.pop()
we_have_the_feature = self.builders_for_feature.get(feature, [])
if len(we_have_the_feature) > 0:
if candidates is None:
candidates = we_have_the_feature
candidate_set = set(candidates)
else:
# Eliminate any candidates that don't have this feature.
candidate_set = candidate_set.intersection(
set(we_have_the_feature))
# The only valid candidates are the ones in candidate_set.
# Go through the original list of candidates and pick the first one
# that's in candidate_set.
if candidate_set is None:
return None
for candidate in candidates:
if candidate in candidate_set:
return candidate
return None
# The BeautifulSoup class will take feature lists from developers and use them
# to look up builders in this registry.
builder_registry = TreeBuilderRegistry()
class TreeBuilder(object):
"""Turn a document into a Beautiful Soup object tree."""
NAME = "[Unknown tree builder]"
ALTERNATE_NAMES = []
features = []
is_xml = False
picklable = False
preserve_whitespace_tags = set()
empty_element_tags = None # A tag will be considered an empty-element
# tag when and only when it has no contents.
# A value for these tag/attribute combinations is a space- or
# comma-separated list of CDATA, rather than a single CDATA.
cdata_list_attributes = {}
def __init__(self):
self.soup = None
def reset(self):
pass
def can_be_empty_element(self, tag_name):
"""Might a tag with this name be an empty-element tag?
The final markup may or may not actually present this tag as
self-closing.
For instance: an HTMLBuilder does not consider a <p> tag to be
an empty-element tag (it's not in
HTMLBuilder.empty_element_tags). This means an empty <p> tag
will be presented as "<p></p>", not "<p />".
The default implementation has no opinion about which tags are
empty-element tags, so a tag will be presented as an
empty-element tag if and only if it has no contents.
"<foo></foo>" will become "<foo />", and "<foo>bar</foo>" will
be left alone.
"""
if self.empty_element_tags is None:
return True
return tag_name in self.empty_element_tags
def feed(self, markup):
raise NotImplementedError()
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
return markup, None, None, False
def test_fragment_to_document(self, fragment):
"""Wrap an HTML fragment to make it look like a document.
Different parsers do this differently. For instance, lxml
introduces an empty <head> tag, and html5lib
doesn't. Abstracting this away lets us write simple tests
which run HTML fragments through the parser and compare the
results against other HTML fragments.
This method should not be used outside of tests.
"""
return fragment
def set_up_substitutions(self, tag):
return False
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
"""Replaces class="foo bar" with class=["foo", "bar"]
Modifies its input in place.
"""
if not attrs:
return attrs
if self.cdata_list_attributes:
universal = self.cdata_list_attributes.get('*', [])
tag_specific = self.cdata_list_attributes.get(
tag_name.lower(), None)
for attr in attrs.keys():
if attr in universal or (tag_specific and attr in tag_specific):
# We have a "class"-type attribute whose string
# value is a whitespace-separated list of
# values. Split it into a list.
value = attrs[attr]
if isinstance(value, basestring):
values = whitespace_re.split(value)
else:
# html5lib sometimes calls setAttributes twice
# for the same tag when rearranging the parse
# tree. On the second call the attribute value
# here is already a list. If this happens,
# leave the value alone rather than trying to
# split it again.
values = value
attrs[attr] = values
return attrs
class SAXTreeBuilder(TreeBuilder):
"""A Beautiful Soup treebuilder that listens for SAX events."""
def feed(self, markup):
raise NotImplementedError()
def close(self):
pass
def startElement(self, name, attrs):
attrs = dict((key[1], value) for key, value in list(attrs.items()))
#print "Start %s, %r" % (name, attrs)
self.soup.handle_starttag(name, attrs)
def endElement(self, name):
#print "End %s" % name
self.soup.handle_endtag(name)
def startElementNS(self, nsTuple, nodeName, attrs):
# Throw away (ns, nodeName) for now.
self.startElement(nodeName, attrs)
def endElementNS(self, nsTuple, nodeName):
# Throw away (ns, nodeName) for now.
self.endElement(nodeName)
#handler.endElementNS((ns, node.nodeName), node.nodeName)
def startPrefixMapping(self, prefix, nodeValue):
# Ignore the prefix for now.
pass
def endPrefixMapping(self, prefix):
# Ignore the prefix for now.
# handler.endPrefixMapping(prefix)
pass
def characters(self, content):
self.soup.handle_data(content)
def startDocument(self):
pass
def endDocument(self):
pass
class HTMLTreeBuilder(TreeBuilder):
"""This TreeBuilder knows facts about HTML.
Such as which tags are empty-element tags.
"""
preserve_whitespace_tags = set(['pre', 'textarea'])
empty_element_tags = set(['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
# The HTML standard defines these attributes as containing a
# space-separated list of values, not a single value. That is,
# class="foo bar" means that the 'class' attribute has two values,
# 'foo' and 'bar', not the single value 'foo bar'. When we
# encounter one of these attributes, we will parse its value into
# a list of values if possible. Upon output, the list will be
# converted back into a string.
cdata_list_attributes = {
"*" : ['class', 'accesskey', 'dropzone'],
"a" : ['rel', 'rev'],
"link" : ['rel', 'rev'],
"td" : ["headers"],
"th" : ["headers"],
"td" : ["headers"],
"form" : ["accept-charset"],
"object" : ["archive"],
# These are HTML5 specific, as are *.accesskey and *.dropzone above.
"area" : ["rel"],
"icon" : ["sizes"],
"iframe" : ["sandbox"],
"output" : ["for"],
}
def set_up_substitutions(self, tag):
# We are only interested in <meta> tags
if tag.name != 'meta':
return False
http_equiv = tag.get('http-equiv')
content = tag.get('content')
charset = tag.get('charset')
# We are interested in <meta> tags that say what encoding the
# document was originally in. This means HTML 5-style <meta>
# tags that provide the "charset" attribute. It also means
# HTML 4-style <meta> tags that provide the "content"
# attribute and have "http-equiv" set to "content-type".
#
# In both cases we will replace the value of the appropriate
# attribute with a standin object that can take on any
# encoding.
meta_encoding = None
if charset is not None:
# HTML 5 style:
# <meta charset="utf8">
meta_encoding = charset
tag['charset'] = CharsetMetaAttributeValue(charset)
elif (content is not None and http_equiv is not None
and http_equiv.lower() == 'content-type'):
# HTML 4 style:
# <meta http-equiv="content-type" content="text/html; charset=utf8">
tag['content'] = ContentMetaAttributeValue(content)
return (meta_encoding is not None)
def register_treebuilders_from(module):
"""Copy TreeBuilders from the given module into this module."""
# I'm fairly sure this is not the best way to do this.
this_module = sys.modules['bs4.builder']
for name in module.__all__:
obj = getattr(module, name)
if issubclass(obj, TreeBuilder):
setattr(this_module, name, obj)
this_module.__all__.append(name)
# Register the builder while we're at it.
this_module.builder_registry.register(obj)
class ParserRejectedMarkup(Exception):
pass
# Builders are registered in reverse order of priority, so that custom
# builder registrations will take precedence. In general, we want lxml
# to take precedence over html5lib, because it's faster. And we only
# want to use HTMLParser as a last result.
from . import _htmlparser
register_treebuilders_from(_htmlparser)
try:
from . import _html5lib
register_treebuilders_from(_html5lib)
except ImportError:
# They don't have html5lib installed.
pass
try:
from . import _lxml
register_treebuilders_from(_lxml)
except ImportError:
# They don't have lxml installed.
pass
|
juvoinc/airflow
|
refs/heads/master
|
airflow/utils/db.py
|
13
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
from functools import wraps
import logging
import os
from alembic.config import Config
from alembic import command
from alembic.migration import MigrationContext
from sqlalchemy import event, exc
from sqlalchemy.pool import Pool
from airflow import settings
from airflow import configuration
def provide_session(func):
"""
Function decorator that provides a session if it isn't provided.
If you want to reuse a session or run the function as part of a
database transaction, you pass it to the function, if not this wrapper
will create one and close it for you.
"""
@wraps(func)
def wrapper(*args, **kwargs):
needs_session = False
arg_session = 'session'
func_params = func.__code__.co_varnames
session_in_args = arg_session in func_params and \
func_params.index(arg_session) < len(args)
if not (arg_session in kwargs or session_in_args):
needs_session = True
session = settings.Session()
kwargs[arg_session] = session
result = func(*args, **kwargs)
if needs_session:
session.expunge_all()
session.commit()
session.close()
return result
return wrapper
def pessimistic_connection_handling():
@event.listens_for(Pool, "checkout")
def ping_connection(dbapi_connection, connection_record, connection_proxy):
'''
Disconnect Handling - Pessimistic, taken from:
http://docs.sqlalchemy.org/en/rel_0_9/core/pooling.html
'''
cursor = dbapi_connection.cursor()
try:
cursor.execute("SELECT 1")
except:
raise exc.DisconnectionError()
cursor.close()
@provide_session
def merge_conn(conn, session=None):
from airflow import models
C = models.Connection
if not session.query(C).filter(C.conn_id == conn.conn_id).first():
session.add(conn)
session.commit()
@event.listens_for(settings.engine, "connect")
def connect(dbapi_connection, connection_record):
connection_record.info['pid'] = os.getpid()
@event.listens_for(settings.engine, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy):
pid = os.getpid()
if connection_record.info['pid'] != pid:
connection_record.connection = connection_proxy.connection = None
raise exc.DisconnectionError(
"Connection record belongs to pid {}, "
"attempting to check out in pid {}".format(connection_record.info['pid'], pid)
)
def initdb():
session = settings.Session()
from airflow import models
upgradedb()
merge_conn(
models.Connection(
conn_id='airflow_db', conn_type='mysql',
host='localhost', login='root', password='',
schema='airflow'))
merge_conn(
models.Connection(
conn_id='airflow_ci', conn_type='mysql',
host='localhost', login='root',
schema='airflow_ci'))
merge_conn(
models.Connection(
conn_id='beeline_default', conn_type='beeline', port="10000",
host='localhost', extra="{\"use_beeline\": true, \"auth\": \"\"}",
schema='default'))
merge_conn(
models.Connection(
conn_id='bigquery_default', conn_type='bigquery'))
merge_conn(
models.Connection(
conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow', password='airflow',
schema='airflow'))
merge_conn(
models.Connection(
conn_id='presto_default', conn_type='presto',
host='localhost',
schema='hive', port=3400))
merge_conn(
models.Connection(
conn_id='hive_cli_default', conn_type='hive_cli',
schema='default',))
merge_conn(
models.Connection(
conn_id='hiveserver2_default', conn_type='hiveserver2',
host='localhost',
schema='default', port=10000))
merge_conn(
models.Connection(
conn_id='metastore_default', conn_type='hive_metastore',
host='localhost', extra="{\"authMechanism\": \"PLAIN\"}",
port=9083))
merge_conn(
models.Connection(
conn_id='mysql_default', conn_type='mysql',
login='root',
host='localhost'))
merge_conn(
models.Connection(
conn_id='postgres_default', conn_type='postgres',
login='postgres',
schema='airflow',
host='localhost'))
merge_conn(
models.Connection(
conn_id='sqlite_default', conn_type='sqlite',
host='/tmp/sqlite_default.db'))
merge_conn(
models.Connection(
conn_id='http_default', conn_type='http',
host='https://www.google.com/'))
merge_conn(
models.Connection(
conn_id='mssql_default', conn_type='mssql',
host='localhost', port=1433))
merge_conn(
models.Connection(
conn_id='vertica_default', conn_type='vertica',
host='localhost', port=5433))
merge_conn(
models.Connection(
conn_id='webhdfs_default', conn_type='hdfs',
host='localhost', port=50070))
merge_conn(
models.Connection(
conn_id='ssh_default', conn_type='ssh',
host='localhost'))
merge_conn(
models.Connection(
conn_id='fs_default', conn_type='fs',
extra='{"path": "/"}'))
merge_conn(
models.Connection(
conn_id='aws_default', conn_type='aws',
extra='{"region_name": "us-east-1"}'))
merge_conn(
models.Connection(
conn_id='emr_default', conn_type='emr',
extra='''
{ "Name": "default_job_flow_name",
"LogUri": "s3://my-emr-log-bucket/default_job_flow_location",
"ReleaseLabel": "emr-4.6.0",
"Instances": {
"InstanceGroups": [
{
"Name": "Master nodes",
"Market": "ON_DEMAND",
"InstanceRole": "MASTER",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
},
{
"Name": "Slave nodes",
"Market": "ON_DEMAND",
"InstanceRole": "CORE",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
}
]
},
"Ec2KeyName": "mykey",
"KeepJobFlowAliveWhenNoSteps": false,
"TerminationProtected": false,
"Ec2SubnetId": "somesubnet",
"Applications":[
{ "Name": "Spark" }
],
"VisibleToAllUsers": true,
"JobFlowRole": "EMR_EC2_DefaultRole",
"ServiceRole": "EMR_DefaultRole",
"Tags": [
{
"Key": "app",
"Value": "analytics"
},
{
"Key": "environment",
"Value": "development"
}
]
}
'''))
# Known event types
KET = models.KnownEventType
if not session.query(KET).filter(KET.know_event_type == 'Holiday').first():
session.add(KET(know_event_type='Holiday'))
if not session.query(KET).filter(KET.know_event_type == 'Outage').first():
session.add(KET(know_event_type='Outage'))
if not session.query(KET).filter(
KET.know_event_type == 'Natural Disaster').first():
session.add(KET(know_event_type='Natural Disaster'))
if not session.query(KET).filter(
KET.know_event_type == 'Marketing Campaign').first():
session.add(KET(know_event_type='Marketing Campaign'))
session.commit()
dagbag = models.DagBag()
# Save individual DAGs in the ORM
now = datetime.utcnow()
for dag in dagbag.dags.values():
models.DAG.sync_to_db(dag, dag.owner, now)
# Deactivate the unknown ones
models.DAG.deactivate_unknown_dags(dagbag.dags.keys())
Chart = models.Chart
chart_label = "Airflow task instance by type"
chart = session.query(Chart).filter(Chart.label == chart_label).first()
if not chart:
chart = Chart(
label=chart_label,
conn_id='airflow_db',
chart_type='bar',
x_is_date=False,
sql=(
"SELECT state, COUNT(1) as number "
"FROM task_instance "
"WHERE dag_id LIKE 'example%' "
"GROUP BY state"),
)
session.add(chart)
session.commit()
def upgradedb():
logging.info("Creating tables")
current_dir = os.path.dirname(os.path.abspath(__file__))
package_dir = os.path.normpath(os.path.join(current_dir, '..'))
directory = os.path.join(package_dir, 'migrations')
config = Config(os.path.join(package_dir, 'alembic.ini'))
config.set_main_option('script_location', directory)
config.set_main_option('sqlalchemy.url',
configuration.get('core', 'SQL_ALCHEMY_CONN'))
command.upgrade(config, 'heads')
def resetdb():
'''
Clear out the database
'''
from airflow import models
logging.info("Dropping tables that exist")
models.Base.metadata.drop_all(settings.engine)
mc = MigrationContext.configure(settings.engine)
if mc._version.exists(settings.engine):
mc._version.drop(settings.engine)
initdb()
|
kirca/odoo
|
refs/heads/master
|
addons/website_sale/controllers/__init__.py
|
7372
|
import main
|
macs03/demo-cms
|
refs/heads/master
|
cms/lib/python2.7/site-packages/cms/test_utils/project/customuserapp/models.py
|
11
|
# -*- coding: utf-8 -*-
from django.core.mail import send_mail
from django.db import models
from django.utils import timezone
from django.utils.http import urlquote
try:
import re
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from django.core import validators
from django.utils.translation import ugettext_lazy as _
class CustomUserManager(UserManager):
def _create_user(self, username, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser, last_login=now,
**extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile('^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
my_new_field = models.IntegerField(null=True, blank=True, default=42)
objects = CustomUserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def get_absolute_url(self):
return "/users/%s/" % urlquote(self.username)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
return "A user called %s" % self.username
def get_short_name(self):
"Returns the short name for the user."
return self.username
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
except ImportError:
raise
from django.contrib.auth.models import User # nopyflakes
|
opps/opps
|
refs/heads/master
|
opps/images/forms.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
from opps.core.widgets import OppsEditor
from .models import Image
from .widgets import CropExample
class ImageModelForm(forms.ModelForm):
crop_example = forms.CharField(label=_('Crop Example'), required=False,
widget=CropExample())
crop_x1 = forms.CharField(label=_(u'Crop X1'), required=False,
widget=forms.HiddenInput())
crop_x2 = forms.CharField(label=_(u'Crop X2'), required=False,
widget=forms.HiddenInput())
crop_y1 = forms.CharField(label=_(u'Crop Y1'), required=False,
widget=forms.HiddenInput())
crop_y2 = forms.CharField(label=_(u'Crop Y2'), required=False,
widget=forms.HiddenInput())
archive = forms.ImageField(label=_('Archive'), required=True)
class Meta:
model = Image
widgets = {'description': OppsEditor()}
class PopUpImageForm(ImageModelForm):
source = forms.CharField(
required=True,
label=_(u'Source'),
)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PopUpImageForm, self).__init__(*args, **kwargs)
def save(self, *args, **kwargs):
instance = super(PopUpImageForm, self).save(commit=False)
if not instance.pk:
instance.published = True
instance.user = self.user
instance.save()
return instance
class Meta:
model = Image
widgets = {'description': OppsEditor()}
fields = ('site', 'title', 'archive', 'description', 'tags',
'source')
|
squirrelo/qiita
|
refs/heads/master
|
qiita_pet/uimodules/processed_data_tab.py
|
2
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from qiita_core.util import execute_as_transaction
from qiita_core.qiita_settings import qiita_config
from qiita_pet.util import STATUS_STYLER, is_localhost
from .base_uimodule import BaseUIModule
class ProcessedDataTab(BaseUIModule):
@execute_as_transaction
def render(self, study, full_access, allow_approval, approval_deny_msg):
# currently all process data are 'BIOM'
pd_gen = [ar for ar in study.artifacts()
if ar.artifact_type == 'BIOM']
avail_pd = [(pd, STATUS_STYLER[pd.visibility]) for pd in pd_gen
if full_access or pd.visibility == 'public']
return self.render_string(
"study_description_templates/processed_data_tab.html",
available_processed_data=avail_pd,
study_id=study.id,
allow_approval=allow_approval,
approval_deny_msg=approval_deny_msg)
class ProcessedDataInfoTab(BaseUIModule):
@execute_as_transaction
def render(self, study_id, processed_data, allow_approval,
approval_deny_msg):
user = self.current_user
# The request approval, approve processed data and make public buttons
# are mutually exclusive. Only one of them will be shown, depending on
# the current status of the processed data
status = processed_data.visibility
btn_to_show = None
if status == 'sandbox' and qiita_config.require_approval:
# The request approval button only appears if the processed data is
# sandboxed and the qiita_config specifies that the approval should
# be requested
btn_to_show = 'request_approval'
elif (user.level == 'admin' and status == 'awaiting_approval' and
qiita_config.require_approval):
# The approve processed data button only appears if the user is an
# admin, the processed data is waiting to be approved and the qiita
# config requires processed data approval
btn_to_show = 'approve'
elif status == 'private':
# The make public button only appears if the status is private
btn_to_show = 'make_public'
# The revert to sandbox button only appears if the processed data is
# not sandboxed or public
show_revert_btn = status not in {'sandbox', 'public'}
# process data can only have one preprocess_data
preprocessed_data_id = processed_data.parents[0].id
process_date = str(processed_data.timestamp)
filepaths = processed_data.filepaths
is_local_request = is_localhost(self.request.headers['host'])
return self.render_string(
"study_description_templates/processed_data_info_tab.html",
pd_id=processed_data.id,
preprocessed_data_id=preprocessed_data_id,
process_date=process_date,
filepaths=filepaths,
is_local_request=is_local_request,
btn_to_show=btn_to_show,
show_revert_btn=show_revert_btn,
allow_approval=allow_approval,
approval_deny_msg=approval_deny_msg)
|
ryfeus/lambda-packs
|
refs/heads/master
|
Tensorflow_Pandas_Numpy/source3.6/pandas/io/gbq.py
|
1
|
""" Google BigQuery support """
def _try_import():
# since pandas is a dependency of pandas-gbq
# we need to import on first use
try:
import pandas_gbq
except ImportError:
# give a nice error message
raise ImportError("Load data from Google BigQuery\n"
"\n"
"the pandas-gbq package is not installed\n"
"see the docs: https://pandas-gbq.readthedocs.io\n"
"\n"
"you can install via pip or conda:\n"
"pip install pandas-gbq\n"
"conda install pandas-gbq -c conda-forge\n")
return pandas_gbq
def read_gbq(query, project_id=None, index_col=None, col_order=None,
reauth=False, verbose=None, private_key=None, dialect='legacy',
**kwargs):
"""
Load data from Google BigQuery.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
Authentication to the Google BigQuery service is via OAuth 2.0.
- If "private_key" is not provided:
By default "application default credentials" are used.
If default application credentials are not found or are restrictive,
user account credentials are used. In this case, you will be asked to
grant permissions for product name 'pandas GBQ'.
- If "private_key" is provided:
Service account credentials will be used to authenticate.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str
Google BigQuery Account project ID.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
private_key : str, optional
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
dialect : str, default 'legacy'
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
verbose : boolean, deprecated
*Deprecated in Pandas-GBQ 0.4.0.* Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
kwargs : dict
Arbitrary keyword arguments.
configuration (dict): query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery SQL Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__
Returns
-------
df: DataFrame
DataFrame representing results of query.
See Also
--------
pandas_gbq.read_gbq : This function in the pandas-gbq library.
pandas.DataFrame.to_gbq : Write a DataFrame to Google BigQuery.
"""
pandas_gbq = _try_import()
return pandas_gbq.read_gbq(
query, project_id=project_id,
index_col=index_col, col_order=col_order,
reauth=reauth, verbose=verbose,
private_key=private_key,
dialect=dialect,
**kwargs)
def to_gbq(dataframe, destination_table, project_id, chunksize=None,
verbose=None, reauth=False, if_exists='fail', private_key=None,
auth_local_webserver=False, table_schema=None):
pandas_gbq = _try_import()
return pandas_gbq.to_gbq(
dataframe, destination_table, project_id, chunksize=chunksize,
verbose=verbose, reauth=reauth, if_exists=if_exists,
private_key=private_key, auth_local_webserver=auth_local_webserver,
table_schema=table_schema)
|
40223209/2015cdbg5_0420
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/signal.py
|
743
|
"""This module provides mechanisms to use signal handlers in Python.
Functions:
alarm() -- cause SIGALRM after a specified time [Unix only]
setitimer() -- cause a signal (described below) after a specified
float time and the timer may restart then [Unix only]
getitimer() -- get current value of timer [Unix only]
signal() -- set the action for a given signal
getsignal() -- get the signal action for a given signal
pause() -- wait until a signal arrives [Unix only]
default_int_handler() -- default SIGINT handler
signal constants:
SIG_DFL -- used to refer to the system default handler
SIG_IGN -- used to ignore the signal
NSIG -- number of defined signals
SIGINT, SIGTERM, etc. -- signal numbers
itimer constants:
ITIMER_REAL -- decrements in real time, and delivers SIGALRM upon
expiration
ITIMER_VIRTUAL -- decrements only when the process is executing,
and delivers SIGVTALRM upon expiration
ITIMER_PROF -- decrements both when the process is executing and
when the system is executing on behalf of the process.
Coupled with ITIMER_VIRTUAL, this timer is usually
used to profile the time spent by the application
in user and kernel space. SIGPROF is delivered upon
expiration.
*** IMPORTANT NOTICE ***
A signal handler function is called with two arguments:
the first is the signal number, the second is the interrupted stack frame."""
CTRL_BREAK_EVENT=1
CTRL_C_EVENT=0
NSIG=23
SIGABRT=22
SIGBREAK=21
SIGFPE=8
SIGILL=4
SIGINT=2
SIGSEGV=11
SIGTERM=15
SIG_DFL=0
SIG_IGN=1
def signal(signalnum, handler) :
pass
|
atruberg/django-custom
|
refs/heads/master
|
django/contrib/gis/tests/geogapp/__init__.py
|
12133432
| |
myles/dayone
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
StrellaGroup/frappe
|
refs/heads/develop
|
frappe/docs/assets/img/app-development/__init__.py
|
12133432
| |
adoosii/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/content/course_overviews/migrations/0002_add_days_early_for_beta.py
|
66
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseOverview.days_early_for_beta'
# The default value for the days_early_for_beta column is null. However,
# for courses already in the table that have a non-null value for
# days_early_for_beta, this would be invalid. So, we must clear the
# table before adding the new column.
db.clear_table('course_overviews_courseoverview')
db.add_column('course_overviews_courseoverview', 'days_early_for_beta',
self.gf('django.db.models.fields.FloatField')(null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseOverview.days_early_for_beta'
db.delete_column('course_overviews_courseoverview', 'days_early_for_beta')
models = {
'course_overviews.courseoverview': {
'Meta': {'object_name': 'CourseOverview'},
'_location': ('xmodule_django.models.UsageKeyField', [], {'max_length': '255'}),
'_pre_requisite_courses_json': ('django.db.models.fields.TextField', [], {}),
'advertised_start': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'cert_name_long': ('django.db.models.fields.TextField', [], {}),
'cert_name_short': ('django.db.models.fields.TextField', [], {}),
'certificates_display_behavior': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'certificates_show_before_end': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_image_url': ('django.db.models.fields.TextField', [], {}),
'days_early_for_beta': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'display_name': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'display_number_with_default': ('django.db.models.fields.TextField', [], {}),
'display_org_with_default': ('django.db.models.fields.TextField', [], {}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'end_of_course_survey_url': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'facebook_url': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'has_any_active_web_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'primary_key': 'True', 'db_index': 'True'}),
'lowest_passing_grade': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'mobile_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'social_sharing_url': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'visible_to_staff_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
}
}
complete_apps = ['course_overviews']
|
brahle/I-Rcbot
|
refs/heads/master
|
irc/mysocket.py
|
1
|
#!/usr/bin/env python2.6
# Zeckviz IRC bot
# Copyright (C) 2011 Bruno Rahle
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import socket
class MySocket(socket.socket):
"""Extends socket.socket class and adds the functionality to reads the data
from socket line by line.
"""
BUFFER_SIZE = 4096 # size of the buffer to read
def __init__(self, host, port):
"""Creates the socket.
"""
super(MySocket, self).__init__()
self.connect((host, port))
self._buffer = ''
self._pos = 0
def readline(self):
"""Reads the next line from the socket.
NOTE: Ignores the timeout and blocking status. It just waits for the
complete line to be sent to the socket and returns it.
TODO: account for timeout and blocking status.
"""
line = ''
i = 0
while True:
while (self._pos == len(self._buffer)):
self._buffer = self.recv(self.BUFFER_SIZE)
self._pos = 0
end = self._buffer.find('\n', self._pos)
line = line + self._buffer[self._pos:end]
if end == -1:
self._pos = len(self._buffer)
else:
self._pos = end + 1
return line
|
altsen/diandiyun-platform
|
refs/heads/master
|
cms/lib/xblock/mixin.py
|
25
|
"""
Mixin defining common Studio functionality
"""
import datetime
from xblock.fields import Scope, Field, Integer, XBlockMixin
class DateTuple(Field):
"""
Field that stores datetime objects as time tuples
"""
def from_json(self, value):
return datetime.datetime(*value[0:6])
def to_json(self, value):
if value is None:
return None
return list(value.timetuple())
class CmsBlockMixin(XBlockMixin):
"""
Mixin with fields common to all blocks in Studio
"""
published_date = DateTuple(help="Date when the module was published", scope=Scope.settings)
published_by = Integer(help="Id of the user who published this module", scope=Scope.settings)
|
sppalkia/weld
|
refs/heads/master
|
weld-python/weld/encoders/numpy.py
|
2
|
"""
Implements encoders for NumPy values.
The Weld package includes native convertors for NumPy arrays because NumPy is
the standard way for interacting with C-like array data.
The encoder in this package accepts ndarray or its subclasses. The decoder in
this module returns a subclass of ndarray called `weldbasearray`, which may
hold a reference to a `WeldContext`. This prevents arrays backed by memory
allocated in Weld from being freed before the array's reference count drops to
0.
Zero-copy conversions (in particular, to 1D arrays) are implemented here
directly since they only involve a pointer copy.
"""
import ctypes
import numpy as np
from weld.encoders.struct import StructWeldEncoder, StructWeldDecoder
from weld.types import *
# We just need this for the path.
import weld.encoders._strings
class weldbasearray(np.ndarray):
""" A NumPy array possibly backed by a `WeldContext`.
This class is a wrapper around the NumPy `ndarray` class, but it contains
an additional `weld_context` attribute. This attribute references the
memory that backs the array, if the array was returned by Weld (or created
from another array that was returned by Weld). It prevents memory owned by
the context from being freed before all references to the array are
deleted.
This class also contains an additional method, `copy2numpy`, which
deep-copies the data referenced by this array to a regular `ndarray`. The
resulting array does not hold a reference to the context or the original
array.
If the `weld_context` attribtue is `None`, this class acts like a regular
`ndarray`, and the `copy2numpy` function simply copies this array.
"""
def __new__(cls, input_array, weld_context=None):
""" Instance initializer.
Parameters
----------
weld_context : WeldContext or None
If this is not `None`, it should be the context that owns the
memory for `input_array`.
"""
obj = np.asarray(input_array).view(cls)
obj.weld_context = weld_context
return obj
def __array_finalize__(self, obj):
""" Finalizes array. See the NumPy documentation. """
if obj is None:
return
self.weld_context = getattr(obj, 'weld_context', None)
def copy2numpy(self):
""" Copies this array's data into a new NumPy `ndarray`.
This is an alias for `np.array(arr, copy=True)`
Examples
--------
>>> arr = weldbasearray([1, 2, 3])
>>> arr
weldbasearray([1, 2, 3])
>>> arr.copy2numpy()
array([1, 2, 3])
"""
return np.array(self, copy=True)
# Maps a string dtype representation to a Weld scalar type.
_known_types = {
'int8': I8(),
'int16': I16(),
'int32': I32(),
'int64': I64(),
'uint8': U8(),
'uint16': U16(),
'uint32': U32(),
'uint64': U64(),
'float32': F32(),
'float': F64(),
'double': F64(),
'float64': F64(),
'bool': Bool(),
}
# Reverse of the above.
_known_types_weld2dtype = {v: k for k, v in _known_types.items()}
def binop_output_type(left_ty, right_ty, truediv=False):
"""
Returns the output type when applying an arithmetic binary operator
with the given input types.
Parameters
----------
left_ty : WeldType
right_ty : WeldType
truediv: boolean
Division has some special rules.
Returns
-------
WeldType
Examples
--------
>>> binop_output_type(Bool(), Bool())
bool
>>> binop_output_type(I8(), U16())
i32
>>> binop_output_type(U8(), U16())
u16
>>> binop_output_type(F32(), U16())
f32
>>> binop_output_type(I8(), U64())
f64
"""
if not truediv and left_ty == right_ty:
return left_ty
if truediv and left_ty == F32() and right_ty == F32():
return F32()
size_to_ty = [(ty.size, ty) for ty in _known_types.values()]
float_types = set([F32(), F64()])
int_types = set([I8(), I16(), I32(), I64()])
uint_types = set([U8(), U16(), U32(), U64()])
left_size = left_ty.size
right_size = right_ty.size
max_size = max([left_size, right_size])
has_float = left_ty in float_types or right_ty in float_types
both_uint = left_ty in uint_types and right_ty in uint_types
both_int = left_ty in int_types and right_ty in int_types
if has_float:
if max_size <= 4:
if left_size != right_size:
# float and something smaller: use float
return F32()
else:
# two floats: use double
return F64()
# one input is a double: use double.
return F64()
elif truediv:
return F64()
else:
# Rule here is to use the biggest type if the two types have different
# sizes, or to use one (signed) bigger size if they have the same size
# (but are different types, e.g., 'i8' and 'u8').
if left_ty == Bool():
return right_ty
elif right_ty == Bool():
return left_ty
if both_uint or both_int:
if left_size > right_size:
return left_ty
elif right_size > left_size:
return right_ty
# Sizes are same
elif max_size == 1:
return U16() if both_uint else I16()
elif max_size == 2:
return U32() if both_uint else I32()
else:
return U64() if both_uint else I64()
else:
# Use the int if its bigger, otherwise use one int
# bigger than max size.
if left_size != right_size:
if left_ty in int_types and max_size == left_size:
return left_ty
elif right_ty in int_types and max_size == right_size:
return right_ty
if max_size == 1:
return I16()
elif max_size == 2:
return I32()
elif max_size == 4:
return I64()
else:
# For higher precisions, always cast to f64.
return F64()
def weld_type_to_dtype(ty):
"""Converts a Weld type to a NumPy dtype.
Examples
--------
>>> weld_type_to_dtype(I32())
dtype('int32')
>>> weld_type_to_dtype(F32())
dtype('float32')
>>> weld_type_to_dtype(F64())
dtype('float64')
Parameters
----------
ty: WeldType
The type to convert
Returns
-------
dtype or None
Returns None if the type is not recognized.
"""
if ty in _known_types_weld2dtype:
return np.dtype(_known_types_weld2dtype[ty])
def dtype_to_weld_type(ty):
"""Converts a NumPy data type to a Weld type.
The data type can be a any type that can be converted to a NumPy dtype,
e.g., a string (e.g., 'int32') or a NumPy scalar type (e.g., np.int32). The
type chosen follows the rules specified by NumPy here:
https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html#dtype
For example, 'i8' will map to an int64 in Weld, since it indicates a signed
integer that has eight bytes.
Examples
--------
>>> dtype_to_weld_type('int32')
i32
>>> dtype_to_weld_type('float')
f64
>>> dtype_to_weld_type('i8')
i64
>>> dtype_to_weld_type(np.int16)
i16
Parameters
----------
ty : str or dtype or NumPy scalar type
The NumPy type to convert
Returns
-------
WeldType, or None if dtype not supported.
"""
if not isinstance(ty, np.dtype):
ty = np.dtype(ty)
ty = str(ty)
return _known_types.get(ty)
class StringConversionFuncs(object):
"""
Wrapper around string functions from the _strings module.
"""
stringfuncs = ctypes.PyDLL(weld.encoders._strings.__file__)
string_cclass = WeldVec(WeldVec(I8())).ctype_class
@staticmethod
def numpy_string_array_to_weld(arr):
func = StringConversionFuncs.stringfuncs.NumpyArrayOfStringsToWeld
func.argtypes = [ctypes.py_object]
func.restype = StringConversionFuncs.string_cclass
# Verify that the array is a NumPy array that we support.
if not isinstance(arr, np.ndarray):
raise TypeError("Expected a 'np.ndarray instance'")
if arr.dtype.char != 'S':
raise TypeError("dtype string ndarray must be 'S'")
result = func(arr)
return result
@staticmethod
def weld_string_array_to_numpy(arr):
func = StringConversionFuncs.stringfuncs.WeldArrayOfStringsToNumPy
func.argtypes = [StringConversionFuncs.string_cclass]
func.restype = ctypes.py_object
result = func(arr)
assert result.dtype.char == 'S'
return result
class NumPyWeldEncoder(StructWeldEncoder):
"""
Encodes NumPy arrays as Weld arrays.
"""
@staticmethod
def _convert_1d_array(array, check_type=None):
"""Converts a 1D NumPy array into a Weld vector.
The vector holds a reference to the array.
Examples
--------
>>> arr = np.array([1, 2, 3])
>>> encoded = NumPyWeldEncoder._convert_1d_array(arr)
>>> encoded.size
3
>>> encoded.data.contents
c_long(1)
Parameters
----------
array : ndarray
A one-dimensional NumPy array.
check_type : WeldType, optional
If this value is passed, this function will check whether the
array's derived WeldType is equal to the passed type. Defaults to
None.
Returns
-------
WeldVec
"""
elem_type = dtype_to_weld_type(array.dtype)
if elem_type is None:
raise NotImplementedError
vec_type = WeldVec(elem_type)
if check_type is not None:
assert check_type == vec_type
data = array.ctypes.data_as(ctypes.POINTER(elem_type.ctype_class))
length = ctypes.c_int64(len(array))
vec = vec_type.ctype_class()
vec.data = data
vec.size = length
return vec
@staticmethod
def _is_string_array(obj):
if not isinstance(obj, np.ndarray):
return False
if obj.ndim != 1:
return False
if obj.dtype.char != 'S':
return False
return True
def encode_element(self, obj, ty):
if NumPyWeldEncoder._is_string_array(obj):
assert ty == WeldVec(WeldVec(I8()))
return StringConversionFuncs.numpy_string_array_to_weld(obj)
if isinstance(obj, np.ndarray):
if obj.ndim == 1:
return NumPyWeldEncoder._convert_1d_array(obj, check_type=ty)
else:
raise NotImplementedError
else:
raise TypeError("Unexpected type {} in NumPy encoder".format(type(obj)))
class NumPyWeldDecoder(StructWeldDecoder):
"""
Decodes an encoded Weld array into a NumPy array.
Examples
--------
>>> arr = np.array([1,2,3], dtype='int32')
>>> encoded = NumPyWeldEncoder().encode(arr, WeldVec(I32()))
>>> NumPyWeldDecoder().decode(ctypes.pointer(encoded), WeldVec(I32()))
weldbasearray([1, 2, 3], dtype=int32)
"""
@staticmethod
def _memory_buffer(c_pointer, length, dtype):
"""Creates a Python memory buffer from the pointer.
Parameters
----------
c_pointer : ctypes pointer
the pointer the buffer points to
length : int
the array length
dtype : NumPy dtype
the type of the elements in the buffer.
Returns
-------
memory
"""
arr_size = dtype.itemsize * length
buf_from_mem = ctypes.pythonapi.PyMemoryView_FromMemory
buf_from_mem.restype = ctypes.py_object
buf_from_mem.argtypes = (ctypes.c_void_p, ctypes.c_int, ctypes.c_int)
return buf_from_mem(c_pointer, arr_size, 0x100)
@staticmethod
def _numpy_type(weld_type):
"""Infers the ndarray dimensions and dtype from a Weld type.
Throws a TypeError if the weld_type cannot be represented as an ndarray
of some scalar type.
Parameters
----------
weld_type : WeldType
The type to check
Returns
-------
(int, dtype) tuple
The first element is the nubmer of dimensions and the second
element is the dtype.
>>> NumPyWeldDecoder._numpy_type(WeldVec(I8()))
(1, dtype('int8'))
>>> NumPyWeldDecoder._numpy_type(WeldVec(WeldVec(F32())))
(2, dtype('float32'))
>>> NumPyWeldDecoder._numpy_type(I32())
Traceback (most recent call last):
...
TypeError: type cannot be represented as ndarray
"""
if not isinstance(weld_type, WeldVec):
raise TypeError("type cannot be represented as ndarray")
dimension = 1
elem_type = weld_type.elem_type
if isinstance(elem_type, WeldVec):
(inner_dims, inner_ty) = NumPyWeldDecoder._numpy_type(elem_type)
dimension += inner_dims
else:
try:
inner_ty = weld_type_to_dtype(elem_type)
except:
raise TypeError("unknown element type {}".format(elem_type))
return (dimension, inner_ty)
@staticmethod
def _is_string_array(restype):
"""
Determine whether restype is an array of strings.
This is the case if the type is `vec[vec[i8]]`.
"""
if isinstance(restype, WeldVec):
if isinstance(restype.elem_type, WeldVec):
if isinstance(restype.elem_type.elem_type, I8):
return True
return False
def decode_element(self, obj, restype, context=None):
# A 1D NumPy array
obj = obj.contents
if NumPyWeldDecoder._is_string_array(restype):
return weldbasearray(StringConversionFuncs.weld_string_array_to_numpy(obj), weld_context=context)
(dims, dtype) = NumPyWeldDecoder._numpy_type(restype)
if dims == 1:
elem_type = restype.elem_type
buf = NumPyWeldDecoder._memory_buffer(obj.data, obj.size, dtype)
array = np.frombuffer(buf, dtype=dtype, count=obj.size)
return weldbasearray(array, weld_context=context)
else:
raise TypeError("Unsupported type {} in NumPy decoder".format(type(obj)))
|
nxnfufunezn/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/websockets/handlers/empty-message_wsh.py
|
284
|
#!/usr/bin/python
from mod_pywebsocket import msgutil
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
line = msgutil.receive_message(request)
if line == "":
msgutil.send_message(request, 'pass')
else:
msgutil.send_message(request, 'fail')
|
denys-duchier/django
|
refs/heads/master
|
django/middleware/http.py
|
39
|
from django.utils.cache import (
cc_delim_re, get_conditional_response, set_response_etag,
)
from django.utils.deprecation import MiddlewareMixin
from django.utils.http import parse_http_date_safe
class ConditionalGetMiddleware(MiddlewareMixin):
"""
Handle conditional GET operations. If the response has an ETag or
Last-Modified header and the request has If-None-Match or If-Modified-Since,
replace the response with HttpNotModified. Add an ETag header if needed.
"""
def process_response(self, request, response):
# It's too late to prevent an unsafe request with a 412 response, and
# for a HEAD request, the response body is always empty so computing
# an accurate ETag isn't possible.
if request.method != 'GET':
return response
if self.needs_etag(response) and not response.has_header('ETag'):
set_response_etag(response)
etag = response.get('ETag')
last_modified = response.get('Last-Modified')
if last_modified:
last_modified = parse_http_date_safe(last_modified)
if etag or last_modified:
return get_conditional_response(
request,
etag=etag,
last_modified=last_modified,
response=response,
)
return response
def needs_etag(self, response):
"""Return True if an ETag header should be added to response."""
cache_control_headers = cc_delim_re.split(response.get('Cache-Control', ''))
return all(header.lower() != 'no-store' for header in cache_control_headers)
|
rhiever/bokeh
|
refs/heads/master
|
bokeh/crossfilter/plotting.py
|
42
|
from __future__ import absolute_import
import numpy as np
import pandas as pd
from bokeh.models import ColumnDataSource, BoxSelectTool
from ..plotting import figure
def cross(start, facets):
"""Creates a unique combination of provided facets.
A cross product of an initial set of starting facets with a new set of
facets.
Args:
start (list): List of lists of facets
facets (list): List of facets
Returns:
list: a list of lists of unique combinations of facets
"""
new = [[facet] for facet in facets]
result = []
for x in start:
for n in new:
result.append(x + n)
return result
def hide_axes(plot, axes=('x', 'y')):
"""Hides the axes of the plot by setting component alphas.
Args:
plot (Figure): a valid figure with x and y axes
axes (tuple or list or str, optional): the axes to hide the axis on.
"""
if isinstance(axes, str):
axes = tuple(axes)
for label in axes:
axis = getattr(plot, label + 'axis')
axis = axis[0]
axis.major_label_text_alpha = 0.0
axis.major_label_text_font_size = '0pt'
axis.axis_line_alpha = 0.0
axis.major_tick_line_alpha = 0.0
axis.minor_tick_line_alpha = 0.0
plot.min_border = 0
def make_histogram_source(series):
"""Creates a ColumnDataSource containing the bins of the input series.
Args:
series (:py:class:`~pandas.Series`): description
Returns:
ColumnDataSource: includes bin centers with count of items in the bins
"""
counts, bins = np.histogram(series, bins=50)
centers = pd.rolling_mean(bins, 2)[1:]
return ColumnDataSource(data={'counts': counts, 'centers': centers})
def make_continuous_bar_source(df, x_field, y_field='None', df_orig=None, agg='count'):
"""Makes discrete, then creates representation of the bars to be plotted.
Args:
df (DataFrame): contains the data to be converted to a discrete form
x_field (str): the column in df that maps to the x dim of the plot
y_field (str, optional): the column in df that maps to the y dim of the plot
df_orig (DataFrame, optional): original dataframe that the subset ``df`` was
generated from
agg (str, optional): the type of aggregation to be used
Returns:
ColumnDataSource: aggregated, discrete form of x,y values
"""
# Generate dataframe required to use the categorical bar source function
idx, edges = pd.cut(x=df[x_field], bins=8, retbins=True, labels=False)
labels, edges = pd.cut(x=df[x_field], bins=8, retbins=True)
centers = pd.rolling_mean(edges, 2)[1:]
# store new value of x as the bin it fell into
df['centers'] = centers[idx]
df['labels'] = labels
# After making it discrete, create the categorical bar source
return make_categorical_bar_source(df, 'labels', y_field, df_orig, agg)
def make_categorical_bar_source(df, x_field, y_field='None', df_orig=None, agg='count'):
"""Creates representation of the bars to be plotted.
Args:
df (DataFrame): contains the data to be converted to a discrete form
x_field (str): the column in df that maps to the x dim of the plot
y_field (str, optional): the column in df that maps to the y dim of the plot
df_orig (DataFrame, optional): original dataframe that the subset ``df`` was
generated from
agg (str, optional): the type of aggregation to be used
Returns:
ColumnDataSource: aggregated, discrete form of x,y values
"""
if df_orig is None:
df_orig = df
# handle x-only aggregations separately
if agg == 'percent' or agg == 'count':
# percent aggregations are a special case, since pandas doesn't directly support
if agg == 'percent':
# percent on discrete col using proportion, on continuous using percent
if df[y_field].dtype == 'object':
agg_func = 'count'
else:
agg_func = 'sum'
total = float(getattr(df_orig[y_field], agg_func)())
series = df.groupby(x_field)[y_field].apply(lambda x, total_agg=total, f=agg_func:
100*(getattr(x, f)()/total_agg))
elif agg == 'count':
series = df.groupby(x_field).size()
else:
raise ValueError('Unrecognized Aggregation Type for Y of "None"')
# here we have a series where the values are the aggregation for the index (bars)
result = pd.DataFrame(data={'labels': series.index, 'heights': series.values})
# x and y aggregations
else:
# Get the y values after grouping by the x values
group = df.groupby(x_field)[y_field]
aggregate = getattr(group, agg)
result = aggregate().reset_index()
result.rename(columns={x_field: 'labels', y_field: 'heights'}, inplace=True)
return ColumnDataSource(data=result)
def make_factor_source(series):
"""Generate data source that is based on the unique values in the series.
Args:
series (:py:class:`~pandas.Series`): contains categorical-like data
Returns:
ColumnDataSource: contains the unique values from the series
"""
return ColumnDataSource(data={'factors': series.unique()})
def make_bar_plot(datasource, counts_name="counts",
centers_name="centers",
bar_width=0.7,
x_range=None,
y_range=None,
plot_width=500, plot_height=500,
tools="pan,wheel_zoom,box_zoom,save,resize,box_select,reset",
title_text_font_size="12pt"):
"""Utility function to set/calculate default parameters of a bar plot.
Args:
datasource (ColumnDataSource): represents bars to plot
counts_name (str): column corresponding to height of the bars
centers_name (str): column corresponding to the location of the bars
bar_width (float): the width of the bars in the bar plot
x_range (list): list of two values, the min and max of the x axis range
plot_width (float): width of the plot in pixels
plot_height (float): height of the plot in pixels
tools (str): comma separated tool names to add to the plot
title_text_font_size (str): size of the plot title, e.g., '12pt'
Returns:
figure: plot generated from the provided parameters
"""
top = np.max(datasource.data[counts_name])
# Create the figure container
plot = figure(
title="", title_text_font_size=title_text_font_size,
plot_width=plot_width, plot_height=plot_height,
x_range=x_range, y_range=[0, top], tools=tools)
# Get the bar values
y = [val/2.0 for val in datasource.data[counts_name]]
# Generate the bars in the figure
plot.rect(centers_name, y, bar_width, counts_name, source=datasource)
plot.min_border = 0
plot.h_symmetry = False
plot.v_symmetry = False
for tool in plot.select(type=BoxSelectTool):
tool.dimensions = ['width']
return plot
def make_histogram(datasource,
counts_name="counts",
centers_name="centers",
x_range=None,
bar_width=0.7,
plot_width=500,
plot_height=500,
min_border=40,
tools=None,
title_text_font_size="12pt"):
"""Utility function to create a histogram figure.
This is used to create the filter widgets for continuous data in
CrossFilter.
Args:
datasource (ColumnDataSource): represents bars to plot
counts_name (str): column corresponding to height of the bars
centers_name (str): column corresponding to the location of the bars
x_range (list): list of two values, the min and max of the x axis range
bar_width (float): the width of the bars in the bar plot
plot_width (float): width of the plot in pixels
plot_height (float): height of the plot in pixels
min_border (float): minimum border width of figure in pixels
tools (str): comma separated tool names to add to the plot
title_text_font_size (str): size of the plot title, e.g., '12pt'
Returns:
figure: histogram plot generated from the provided parameters
"""
start = np.min(datasource.data[centers_name]) - bar_width
end = np.max(datasource.data[centers_name]) - bar_width
plot = make_bar_plot(
datasource, counts_name=counts_name, centers_name=centers_name,
x_range=[start, end], plot_width=plot_width, plot_height=plot_height,
tools=tools, title_text_font_size=title_text_font_size)
return plot
|
delighted/delighted-python
|
refs/heads/master
|
test/__init__.py
|
1
|
import json
import unittest
from mock import Mock, patch
import delighted
get_headers = {
'Accept': 'application/json',
'Authorization': 'Basic YWJjMTIz',
'User-Agent': "Delighted Python %s" % delighted.__version__
}
post_headers = get_headers.copy()
post_headers.update({'Content-Type': 'application/json'})
class DelightedTestCase(unittest.TestCase):
def setUp(self):
super(DelightedTestCase, self).setUp()
delighted.api_key = 'abc123'
self.request_patcher = patch('requests.request')
self.request_mock = self.request_patcher.start()
def tearDown(self):
super(DelightedTestCase, self).tearDown()
self.request_patcher.stop()
def mock_response(self, status_code, headers, data, links=None):
self.mock_multiple_responses([delighted.http_response.HTTPResponse(status_code, headers, data, links)])
def mock_multiple_responses(self, responses):
mock_responses = []
for response in responses:
mock_response = Mock()
mock_response.status_code = response.status_code
mock_response.headers = response.headers
mock_response.text = json.dumps(response.body)
mock_response.links = response.links
mock_responses.append(mock_response)
self.request_mock.side_effect = mock_responses
def mock_error(self, mock):
mock.exceptions.RequestException = Exception
mock.request.side_effect = mock.exceptions.RequestException()
def check_call(self, meth, url, headers, post_data, get_params):
if post_data is not None:
post_data = json.dumps(post_data)
self.request_mock.assert_called_once_with(meth, url,
headers=headers,
data=post_data,
params=get_params)
def check_multiple_call(self, calls):
self.assertEqual(self.request_mock.call_count, len(calls))
for call in calls:
if call['kwargs']['data'] is not None:
call['kwargs']['data'] = json.dumps(call['kwargs']['data'])
self.request_mock.assert_any_call(call['meth'], call['url'], **call['kwargs'])
|
RUFOTHEONE/RUFOTHEONE
|
refs/heads/master
|
genesisresolvers.py
|
266
|
# -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib,urllib2,urlparse,re,os,sys,xbmc,xbmcgui,xbmcaddon,xbmcvfs
try:
import CommonFunctions as common
except:
import commonfunctionsdummy as common
try:
import json
except:
import simplejson as json
class get(object):
def __init__(self, url):
self.result = self.worker(url)
def worker(self, url):
try:
pz = premiumize().resolve(url)
if not pz == None: return pz
rd = realdebrid().resolve(url)
if not rd == None: return rd
if url.startswith('rtmp'):
if len(re.compile('\s*timeout=(\d*)').findall(url)) == 0: url += ' timeout=10'
return url
u = urlparse.urlparse(url).netloc
u = u.replace('www.', '').replace('embed.', '')
u = u.lower()
import sys, inspect
r = inspect.getmembers(sys.modules[__name__], inspect.isclass)
r = [i for i in r if hasattr(i[1], 'info') and u in eval(i[0])().info()['netloc']][0][0]
r = eval(r)().resolve(url)
if r == None: return r
elif type(r) == list: return r
elif not r.startswith('http'): return r
try: h = dict(urlparse.parse_qsl(r.rsplit('|', 1)[1]))
except: h = dict('')
h.update({'Referer': url, 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:34.0) Gecko/20100101 Firefox/34.0'})
r = '%s|%s' % (r.split('|')[0], urllib.urlencode(h))
return r
except:
return url
class getUrl(object):
def __init__(self, url, close=True, proxy=None, post=None, headers=None, mobile=False, referer=None, cookie=None, output='', timeout='10'):
handlers = []
if not proxy == None:
handlers += [urllib2.ProxyHandler({'http':'%s' % (proxy)}), urllib2.HTTPHandler]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
if output == 'cookie' or not close == True:
import cookielib
cookies = cookielib.LWPCookieJar()
handlers += [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
try:
if sys.version_info < (2, 7, 9): raise Exception()
import ssl; ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
handlers += [urllib2.HTTPSHandler(context=ssl_context)]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
except:
pass
try: headers.update(headers)
except: headers = {}
if 'User-Agent' in headers:
pass
elif not mobile == True:
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; rv:34.0) Gecko/20100101 Firefox/34.0'
else:
headers['User-Agent'] = 'Apple-iPhone/701.341'
if 'referer' in headers:
pass
elif referer == None:
headers['referer'] = url
else:
headers['referer'] = referer
if not 'Accept-Language' in headers:
headers['Accept-Language'] = 'en-US'
if 'cookie' in headers:
pass
elif not cookie == None:
headers['cookie'] = cookie
request = urllib2.Request(url, data=post, headers=headers)
response = urllib2.urlopen(request, timeout=int(timeout))
if output == 'cookie':
result = []
for c in cookies: result.append('%s=%s' % (c.name, c.value))
result = "; ".join(result)
elif output == 'geturl':
result = response.geturl()
else:
result = response.read()
if close == True:
response.close()
self.result = result
class captcha:
def worker(self, data):
self.captcha = {}
self.solvemedia(data)
if not self.type == None: return self.captcha
self.recaptcha(data)
if not self.type == None: return self.captcha
self.capimage(data)
if not self.type == None: return self.captcha
self.numeric(data)
if not self.type == None: return self.captcha
def solvemedia(self, data):
try:
url = common.parseDOM(data, "iframe", ret="src")
url = [i for i in url if 'api.solvemedia.com' in i]
if len(url) > 0: self.type = 'solvemedia'
else: self.type = None ; return
result = getUrl(url[0], referer='').result
response = common.parseDOM(result, "iframe", ret="src")
response += common.parseDOM(result, "img", ret="src")
response = [i for i in response if '/papi/media' in i][0]
response = 'http://api.solvemedia.com' + response
response = self.keyboard(response)
post = {}
f = common.parseDOM(result, "form", attrs = { "action": "verify.noscript" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'adcopy_response': response})
getUrl('http://api.solvemedia.com/papi/verify.noscript', post=urllib.urlencode(post)).result
self.captcha.update({'adcopy_challenge': post['adcopy_challenge'], 'adcopy_response': 'manual_challenge'})
except:
pass
def recaptcha(self, data):
try:
url = []
if data.startswith('http://www.google.com'): url += [data]
url += common.parseDOM(data, "script", ret="src", attrs = { "type": "text/javascript" })
url = [i for i in url if 'http://www.google.com' in i]
if len(url) > 0: self.type = 'recaptcha'
else: self.type = None ; return
result = getUrl(url[0]).result
challenge = re.compile("challenge\s+:\s+'(.+?)'").findall(result)[0]
response = 'http://www.google.com/recaptcha/api/image?c=' + challenge
response = self.keyboard(response)
self.captcha.update({'recaptcha_challenge_field': challenge, 'recaptcha_challenge': challenge, 'recaptcha_response_field': response, 'recaptcha_response': response})
except:
pass
def capimage(self, data):
try:
url = common.parseDOM(data, "img", ret="src")
url = [i for i in url if 'captcha' in i]
if len(url) > 0: self.type = 'capimage'
else: self.type = None ; return
response = self.keyboard(url[0])
self.captcha.update({'code': response})
except:
pass
def numeric(self, data):
try:
url = re.compile("left:(\d+)px;padding-top:\d+px;'>&#(.+?);<").findall(data)
if len(url) > 0: self.type = 'numeric'
else: self.type = None ; return
result = sorted(url[0], key=lambda ltr: int(ltr[0]))
response = ''.join(str(int(num[1])-48) for num in result)
self.captcha.update({'code': response})
except:
pass
def keyboard(self, response):
try:
dataPath = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo("profile"))
i = os.path.join(dataPath.decode("utf-8"),'img')
f = xbmcvfs.File(i, 'w')
f.write(getUrl(response).result)
f.close()
f = xbmcgui.ControlImage(450,5,375,115, i)
d = xbmcgui.WindowDialog()
d.addControl(f)
xbmcvfs.delete(i)
d.show()
xbmc.sleep(3000)
t = 'Type the letters in the image'
c = common.getUserInput(t, '')
d.close()
return c
except:
return
class regex:
def worker(self, data):
try:
data = str(data).replace('\r','').replace('\n','').replace('\t','')
url = re.compile('(.+?)<regex>').findall(data)[0]
regex = re.compile('<regex>(.+?)</regex>').findall(data)
except:
return
for x in regex:
try:
name = re.compile('<name>(.+?)</name>').findall(x)[0]
expres = re.compile('<expres>(.+?)</expres>').findall(x)[0]
referer = re.compile('<referer>(.+?)</referer>').findall(x)[0]
referer = urllib.unquote_plus(referer)
referer = common.replaceHTMLCodes(referer)
referer = referer.encode('utf-8')
page = re.compile('<page>(.+?)</page>').findall(x)[0]
page = urllib.unquote_plus(page)
page = common.replaceHTMLCodes(page)
page = page.encode('utf-8')
result = getUrl(page, referer=referer).result
result = str(result).replace('\r','').replace('\n','').replace('\t','')
result = str(result).replace('\/','/')
r = re.compile(expres).findall(result)[0]
url = url.replace('$doregex[%s]' % name, r)
except:
pass
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
class unwise:
def worker(self, str_eval):
page_value=""
try:
ss="w,i,s,e=("+str_eval+')'
exec (ss)
page_value=self.__unwise(w,i,s,e)
except: return
return page_value
def __unwise(self, w, i, s, e):
lIll = 0;
ll1I = 0;
Il1l = 0;
ll1l = [];
l1lI = [];
while True:
if (lIll < 5):
l1lI.append(w[lIll])
elif (lIll < len(w)):
ll1l.append(w[lIll]);
lIll+=1;
if (ll1I < 5):
l1lI.append(i[ll1I])
elif (ll1I < len(i)):
ll1l.append(i[ll1I])
ll1I+=1;
if (Il1l < 5):
l1lI.append(s[Il1l])
elif (Il1l < len(s)):
ll1l.append(s[Il1l]);
Il1l+=1;
if (len(w) + len(i) + len(s) + len(e) == len(ll1l) + len(l1lI) + len(e)):
break;
lI1l = ''.join(ll1l)
I1lI = ''.join(l1lI)
ll1I = 0;
l1ll = [];
for lIll in range(0,len(ll1l),2):
ll11 = -1;
if ( ord(I1lI[ll1I]) % 2):
ll11 = 1;
l1ll.append(chr( int(lI1l[lIll: lIll+2], 36) - ll11));
ll1I+=1;
if (ll1I >= len(l1lI)):
ll1I = 0;
ret=''.join(l1ll)
if 'eval(function(w,i,s,e)' in ret:
ret=re.compile('eval\(function\(w,i,s,e\).*}\((.*?)\)').findall(ret)[0]
return self.worker(ret)
else:
return ret
class js:
def worker(self, script):
aSplit = script.split(";',")
p = str(aSplit[0])
aSplit = aSplit[1].split(",")
a = int(aSplit[0])
c = int(aSplit[1])
k = aSplit[2].split(".")[0].replace("'", '').split('|')
e = ''
d = ''
sUnpacked = str(self.__unpack(p, a, c, k, e, d))
sUnpacked = sUnpacked.replace('\\', '')
url = self.__parse(sUnpacked)
return url
def __unpack(self, p, a, c, k, e, d):
while (c > 1):
c = c -1
if (k[c]):
p = re.sub('\\b' + str(self.__itoa(c, a)) +'\\b', k[c], p)
return p
def __itoa(self, num, radix):
result = ""
while num > 0:
result = "0123456789abcdefghijklmnopqrstuvwxyz"[num % radix] + result
num /= radix
return result
def __parse(self, sUnpacked):
url = re.compile("'file' *, *'(.+?)'").findall(sUnpacked)
url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(sUnpacked)
url += re.compile("playlist=(.+?)&").findall(sUnpacked)
url += common.parseDOM(sUnpacked, "embed", ret="src")
url = [i for i in url if not i.endswith('.srt')]
url = 'http://' + url[-1].split('://', 1)[-1]
return url
class premiumize:
def __init__(self):
self.user = xbmcaddon.Addon().getSetting("premiumize_user")
self.password = xbmcaddon.Addon().getSetting("premiumize_password")
def info(self):
return {
'netloc': ['bitshare.com', 'filefactory.com', 'k2s.cc', 'oboom.com', 'rapidgator.net', 'uploaded.net'],
'host': ['Bitshare', 'Filefactory', 'K2S', 'Oboom', 'Rapidgator', 'Uploaded'],
'quality': 'High',
'captcha': False,
'a/c': True
}
def status(self):
if (self.user == '' or self.password == ''): return False
else: return True
def hosts(self):
try:
if self.status() == False: raise Exception()
url = 'http://api.premiumize.me/pm-api/v1.php?method=hosterlist¶ms[login]=%s¶ms[pass]=%s' % (self.user, self.password)
result = getUrl(url).result
pz = json.loads(result)['result']['hosterlist']
pz = [i.rsplit('.' ,1)[0].lower() for i in pz]
return pz
except:
return
def resolve(self, url):
try:
if self.status() == False: raise Exception()
url = 'http://api.premiumize.me/pm-api/v1.php?method=directdownloadlink¶ms[login]=%s¶ms[pass]=%s¶ms[link]=%s' % (self.user, self.password, urllib.quote_plus(url))
result = getUrl(url, close=False).result
url = json.loads(result)['result']['location']
return url
except:
return
class realdebrid:
def __init__(self):
self.user = xbmcaddon.Addon().getSetting("realdedrid_user")
self.password = xbmcaddon.Addon().getSetting("realdedrid_password")
def info(self):
return {
'netloc': ['bitshare.com', 'filefactory.com', 'k2s.cc', 'oboom.com', 'rapidgator.net', 'uploaded.net'],
'host': ['Bitshare', 'Filefactory', 'K2S', 'Oboom', 'Rapidgator', 'Uploaded'],
'quality': 'High',
'captcha': False,
'a/c': True
}
def status(self):
if (self.user == '' or self.password == ''): return False
else: return True
def hosts(self):
try:
if self.status() == False: raise Exception()
url = 'http://real-debrid.com/api/hosters.php'
result = getUrl(url).result
rd = json.loads('[%s]' % result)
rd = [i.rsplit('.' ,1)[0].lower() for i in rd]
return rd
except:
return
def resolve(self, url):
try:
if self.status() == False: raise Exception()
login_data = urllib.urlencode({'user' : self.user, 'pass' : self.password})
login_link = 'http://real-debrid.com/ajax/login.php?%s' % login_data
result = getUrl(login_link, close=False).result
result = json.loads(result)
error = result['error']
if not error == 0: raise Exception()
url = 'http://real-debrid.com/ajax/unrestrict.php?link=%s' % url
url = url.replace('filefactory.com/stream/', 'filefactory.com/file/')
result = getUrl(url).result
result = json.loads(result)
url = result['generated_links'][0][-1]
return url
except:
return
class _180upload:
def info(self):
return {
'netloc': ['180upload.com'],
'host': ['180upload'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://180upload.com/embed-%s.html' % url
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "form", attrs = { "id": "captchaForm" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class allmyvideos:
def info(self):
return {
'netloc': ['allmyvideos.net'],
'host': ['Allmyvideos'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://allmyvideos.net/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('"file" *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class bestreams:
def info(self):
return {
'netloc': ['bestreams.net'],
'host': ['Bestreams'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://bestreams.net/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class clicknupload:
def info(self):
return {
'netloc': ['clicknupload.com'],
'host': ['Clicknupload'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = common.parseDOM(result, "a", ret="onClick")
url = [i for i in url if i.startswith('window.open')][0]
url = re.compile('[\'|\"](.+?)[\'|\"]').findall(url)[0]
return url
except:
return
class cloudzilla:
def info(self):
return {
'netloc': ['cloudzilla.to'],
'host': ['Cloudzilla'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/share/file/', '/embed/')
result = getUrl(url).result
url = re.compile('var\s+vurl *= *"(http.+?)"').findall(result)[0]
return url
except:
return
class coolcdn:
def info(self):
return {
'netloc': ['movshare.net', 'novamov.com', 'nowvideo.sx', 'videoweed.es'],
'host': ['Movshare', 'Novamov', 'Nowvideo', 'Videoweed'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
netloc = urlparse.urlparse(url).netloc
netloc = netloc.replace('www.', '').replace('embed.', '')
netloc = netloc.lower()
id = re.compile('//.+?/.+?/([\w]+)').findall(url)
id += re.compile('//.+?/.+?v=([\w]+)').findall(url)
id = id[0]
url = 'http://embed.%s/embed.php?v=%s' % (netloc, id)
result = getUrl(url).result
key = re.compile('flashvars.filekey=(.+?);').findall(result)[-1]
try: key = re.compile('\s+%s="(.+?)"' % key).findall(result)[-1]
except: pass
url = 'http://www.%s/api/player.api.php?key=%s&file=%s' % (netloc, key, id)
result = getUrl(url).result
url = re.compile('url=(.+?)&').findall(result)[0]
return url
except:
return
class daclips:
def info(self):
return {
'netloc': ['daclips.in'],
'host': ['Daclips'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class datemule:
def info(self):
return {
'netloc': ['datemule.com']
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[0]
return url
except:
return
class fastvideo:
def info(self):
return {
'netloc': ['fastvideo.in', 'faststream.in'],
'host': ['Fastvideo', 'Faststream'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://fastvideo.in/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class filehoot:
def info(self):
return {
'netloc': ['filehoot.com'],
'host': ['Filehoot'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://filehoot.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[0]
return url
except:
return
class filenuke:
def info(self):
return {
'netloc': ['filenuke.com', 'sharesix.com'],
'host': ['Filenuke', 'Sharesix'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
try: f = common.parseDOM(result, "form", attrs = { "method": "POST" })[0]
except: f = ''
k = common.parseDOM(f, "input", ret="name")
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = re.compile("var\s+lnk\d* *= *'(http.+?)'").findall(result)[0]
return url
except:
return
class googledocs:
def info(self):
return {
'netloc': ['docs.google.com', 'drive.google.com']
}
def resolve(self, url):
try:
url = url.split('/preview', 1)[0]
url = url.replace('drive.google.com', 'docs.google.com')
result = getUrl(url).result
result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]
u = json.loads(result)
u = [i.split('|')[-1] for i in u.split(',')]
u = sum([self.tag(i) for i in u], [])
url = []
try: url += [[i for i in u if i['quality'] == '1080p'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'HD'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'SD'][0]]
except: pass
if url == []: return
return url
except:
return
def tag(self, url):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try: quality = quality[0]
except: return []
if quality in ['37', '137', '299', '96', '248', '303', '46']:
return [{'quality': '1080p', 'url': url}]
elif quality in ['22', '84', '136', '298', '120', '95', '247', '302', '45', '102']:
return [{'quality': 'HD', 'url': url}]
elif quality in ['35', '44', '135', '244', '94']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['18', '34', '43', '82', '100', '101', '134', '243', '93']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['5', '6', '36', '83', '133', '242', '92', '132']:
return [{'quality': 'SD', 'url': url}]
else:
return []
class googleplus:
def info(self):
return {
'netloc': ['plus.google.com', 'picasaweb.google.com']
}
def resolve(self, url):
try:
if 'picasaweb' in url.lower():
result = getUrl(url).result
aid = re.compile('aid=(\d*)').findall(result)[0]
pid = urlparse.urlparse(url).fragment
oid = re.compile('/(\d*)/').findall(urlparse.urlparse(url).path)[0]
key = urlparse.parse_qs(urlparse.urlparse(url).query)['authkey'][0]
url = 'http://plus.google.com/photos/%s/albums/%s/%s?authkey=%s' % (oid, aid, pid, key)
result = getUrl(url, mobile=True).result
u = re.compile('"(http[s]*://.+?videoplayback[?].+?)"').findall(result)[::-1]
u = [i.replace('\\u003d','=').replace('\\u0026','&') for i in u]
u = sum([self.tag(i) for i in u], [])
url = []
try: url += [[i for i in u if i['quality'] == '1080p'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'HD'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'SD'][0]]
except: pass
if url == []: return
return url
except:
return
def tag(self, url):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try: quality = quality[0]
except: return []
if quality in ['37', '137', '299', '96', '248', '303', '46']:
return [{'quality': '1080p', 'url': url}]
elif quality in ['22', '84', '136', '298', '120', '95', '247', '302', '45', '102']:
return [{'quality': 'HD', 'url': url}]
elif quality in ['35', '44', '135', '244', '94']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['18', '34', '43', '82', '100', '101', '134', '243', '93']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['5', '6', '36', '83', '133', '242', '92', '132']:
return [{'quality': 'SD', 'url': url}]
else:
return []
class gorillavid:
def info(self):
return {
'netloc': ['gorillavid.com', 'gorillavid.in'],
'host': ['Gorillavid'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://gorillavid.in/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
request = urllib2.Request(url)
response = urllib2.urlopen(request, timeout=30)
response.close()
type = str(response.info()["Content-Type"])
if type == 'text/html': raise Exception()
return url
except:
return
class grifthost:
def info(self):
return {
'netloc': ['grifthost.com'],
'host': ['Grifthost'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://grifthost.com/embed-%s.html' % url
result = getUrl(url).result
try:
post = {}
f = common.parseDOM(result, "Form", attrs = { "method": "POST" })[0]
f = f.replace('"submit"', '"hidden"')
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
except:
pass
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class hugefiles:
def info(self):
return {
'netloc': ['hugefiles.net'],
'host': ['Hugefiles'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })
f += common.parseDOM(result, "form", attrs = { "action": "" })
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = re.compile('fileUrl\s*=\s*[\'|\"](.+?)[\'|\"]').findall(result)[0]
return url
except:
return
class ipithos:
def info(self):
return {
'netloc': ['ipithos.to'],
'host': ['Ipithos'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://ipithos.to/embed-%s.html' % url
result = getUrl(url, mobile=True).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class ishared:
def info(self):
return {
'netloc': ['ishared.eu'],
'host': ['iShared'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
url = re.compile('path *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class kingfiles:
def info(self):
return {
'netloc': ['kingfiles.net'],
'host': ['Kingfiles'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': ' '})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': ' '})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = re.compile("var\s+download_url *= *'(.+?)'").findall(result)[0]
return url
except:
return
class mailru:
def info(self):
return {
'netloc': ['mail.ru', 'my.mail.ru', 'videoapi.my.mail.ru']
}
def resolve(self, url):
try:
usr = re.compile('/mail/(.+?)/').findall(url)[0]
vid = re.compile('(\d*)[.]html').findall(url)[0]
url = 'http://videoapi.my.mail.ru/videos/mail/%s/_myvideo/%s.json?ver=0.2.60' % (usr, vid)
import requests
result = requests.get(url).content
cookie = requests.get(url).headers['Set-Cookie']
u = json.loads(result)['videos']
h = "|Cookie=%s" % urllib.quote(cookie)
url = []
try: url += [[{'quality': '1080p', 'url': i['url'] + h} for i in u if i['key'] == '1080p'][0]]
except: pass
try: url += [[{'quality': 'HD', 'url': i['url'] + h} for i in u if i['key'] == '720p'][0]]
except: pass
try: url += [[{'quality': 'SD', 'url': i['url'] + h} for i in u if not (i['key'] == '1080p' or i ['key'] == '720p')][0]]
except: pass
if url == []: return
return url
except:
return
class mightyupload:
def info(self):
return {
'netloc': ['mightyupload.com'],
'host': ['Mightyupload'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://www.mightyupload.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile("file *: *'(.+?)'").findall(result)
if len(url) > 0: return url[0]
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class mooshare:
def info(self):
return {
'netloc': ['mooshare.biz'],
'host': ['Mooshare'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://mooshare.biz/embed-%s.html?play=1&confirm=Close+Ad+and+Watch+as+Free+User' % url
result = getUrl(url).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class movdivx:
def info(self):
return {
'netloc': ['movdivx.com'],
'host': ['Movdivx'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://www.movdivx.com/%s' % url
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class movpod:
def info(self):
return {
'netloc': ['movpod.net', 'movpod.in'],
'host': ['Movpod'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = url.replace('/vid/', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://movpod.in/embed-%s.html' % url
result = getUrl(url).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
request = urllib2.Request(url)
response = urllib2.urlopen(request, timeout=30)
response.close()
type = str(response.info()["Content-Type"])
if type == 'text/html': raise Exception()
return url
except:
return
class movreel:
def info(self):
return {
'netloc': ['movreel.com'],
'host': ['Movreel'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
user = xbmcaddon.Addon().getSetting("movreel_user")
password = xbmcaddon.Addon().getSetting("movreel_password")
login = 'http://movreel.com/login.html'
post = {'op': 'login', 'login': user, 'password': password, 'redirect': url}
post = urllib.urlencode(post)
result = getUrl(url, close=False).result
result += getUrl(login, post=post, close=False).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[-1]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': '', 'method_premium': ''})
post = urllib.urlencode(post)
import time
request = urllib2.Request(url, post)
for i in range(0, 3):
try:
response = urllib2.urlopen(request, timeout=10)
result = response.read()
response.close()
url = re.compile('(<a .+?</a>)').findall(result)
url = [i for i in url if 'Download Link' in i][-1]
url = common.parseDOM(url, "a", ret="href")[0]
return url
except:
time.sleep(1)
except:
return
class mrfile:
def info(self):
return {
'netloc': ['mrfile.me'],
'host': ['Mrfile'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[-1]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': '', 'method_premium': ''})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = re.compile('(<a\s+href=.+?>Download\s+.+?</a>)').findall(result)[-1]
url = common.parseDOM(url, "a", ret="href")[0]
return url
except:
return
class mybeststream:
def info(self):
return {
'netloc': ['mybeststream.xyz']
}
def resolve(self, url):
try:
referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
page = url.replace(referer, '').replace('&referer=', '').replace('referer=', '')
result = getUrl(url, referer=referer).result
result = re.compile("}[(]('.+?' *, *'.+?' *, *'.+?' *, *'.+?')[)]").findall(result)[-1]
result = unwise().worker(result)
strm = re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
strm = [i for i in strm if i.startswith('rtmp')][0]
url = '%s pageUrl=%s live=1 timeout=10' % (strm, page)
return url
except:
return
class nosvideo:
def info(self):
return {
'netloc': ['nosvideo.com'],
'host': ['Nosvideo'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "method": "POST" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = re.compile('(eval.*?\)\)\))').findall(result)[0]
url = js().worker(result)
result = getUrl(url).result
url = common.parseDOM(result, "file")[0]
return url
except:
return
class openload:
def info(self):
return {
'netloc': ['openload.io'],
'host': ['Openload'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
url = common.parseDOM(result, "span", attrs = { "id": "realdownload" })[0]
url = common.parseDOM(url, "a", ret="href")[0]
return url
except:
return
class played:
def info(self):
return {
'netloc': ['played.to'],
'host': ['Played'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = url.replace('//', '/')
url = re.compile('/.+?/([\w]+)').findall(url)[0]
url = 'http://played.to/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class primeshare:
def info(self):
return {
'netloc': ['primeshare.tv'],
'host': ['Primeshare'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
url = common.parseDOM(result, "video")[0]
url = common.parseDOM(url, "source", ret="src", attrs = { "type": ".+?" })[0]
return url
except:
return
class sharerepo:
def info(self):
return {
'netloc': ['sharerepo.com'],
'host': ['Sharerepo'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
url = re.compile("file *: *'(http.+?)'").findall(result)[-1]
return url
except:
return
class stagevu:
def info(self):
return {
'netloc': ['stagevu.com'],
'host': ['StageVu'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
url = common.parseDOM(result, "embed", ret="src", attrs = { "type": "video.+?" })[0]
return url
except:
return
class streamcloud:
def info(self):
return {
'netloc': ['streamcloud.eu'],
'host': ['Streamcloud'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://streamcloud.eu/%s' % url
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "form", attrs = { "class": "proform" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
post = post.replace('op=download1', 'op=download2')
result = getUrl(url, post=post).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class streamin:
def info(self):
return {
'netloc': ['streamin.to'],
'host': ['Streamin'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://streamin.to/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile("file *: *[\'|\"](http.+?)[\'|\"]").findall(result)[-1]
return url
except:
return
class thefile:
def info(self):
return {
'netloc': ['thefile.me'],
'host': ['Thefile'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://thefile.me/embed-%s.html' % url
result = getUrl(url, mobile=True).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class thevideo:
def info(self):
return {
'netloc': ['thevideo.me'],
'host': ['Thevideo'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://thevideo.me/embed-%s.html' % url
result = getUrl(url).result
result = result.replace('\n','')
import ast
url = re.compile("'sources' *: *(\[.+?\])").findall(result)[-1]
url = ast.literal_eval(url)
url = url[-1]['file']
return url
except:
return
class tusfiles:
def info(self):
return {
'netloc': ['tusfiles.net'],
'host': ['Tusfiles'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class uploadc:
def info(self):
return {
'netloc': ['uploadc.com', 'zalaa.com'],
'host': ['Uploadc', 'Zalaa'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://uploadc.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile("'file' *, *'(.+?)'").findall(result)
if len(url) > 0: return url[0]
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class uploadrocket:
def info(self):
return {
'netloc': ['uploadrocket.net'],
'host': ['Uploadrocket'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
result = result.decode('iso-8859-1').encode('utf-8')
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "freeorpremium" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_isfree': 'Click for Free Download'})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = result.decode('iso-8859-1').encode('utf-8')
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = result.decode('iso-8859-1').encode('utf-8')
url = common.parseDOM(result, "a", ret="href", attrs = { "onclick": "DL.+?" })[0]
return url
except:
return
class uptobox:
def info(self):
return {
'netloc': ['uptobox.com'],
'host': ['Uptobox'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "form", attrs = { "name": "F1" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = common.parseDOM(result, "div", attrs = { "align": ".+?" })
url = [i for i in url if 'button_upload' in i][0]
url = common.parseDOM(url, "a", ret="href")[0]
url = ['http' + i for i in url.split('http') if 'uptobox.com' in i][0]
return url
except:
return
class v_vids:
def info(self):
return {
'netloc': ['v-vids.com'],
'host': ['V-vids'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': '', 'method_premium': ''})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = common.parseDOM(result, "a", ret="href", attrs = { "id": "downloadbutton" })[0]
return url
except:
return
class veehd:
def info(self):
return {
'netloc': ['veehd.com'],
}
def resolve(self, url):
try:
result = getUrl(url, close=False).result
result = result.replace('\n','')
url = re.compile('function\s*load_download.+?src\s*:\s*"(.+?)"').findall(result)[0]
url = urlparse.urljoin('http://veehd.com', url)
result = getUrl(url, close=False).result
i = common.parseDOM(result, "iframe", ret="src")
if len(i) > 0:
i = urlparse.urljoin('http://veehd.com', i[0])
getUrl(i, close=False).result
result = getUrl(url).result
url = re.compile('href *= *"([^"]+(?:mkv|mp4|avi))"').findall(result)
url += re.compile('src *= *"([^"]+(?:divx|avi))"').findall(result)
url += re.compile('"url" *: *"(.+?)"').findall(result)
url = urllib.unquote(url[0])
return url
except:
return
class vidbull:
def info(self):
return {
'netloc': ['vidbull.com'],
'host': ['Vidbull'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
url = common.parseDOM(result, "source", ret="src", attrs = { "type": "video.+?" })[0]
return url
except:
return
class videomega:
def info(self):
return {
'netloc': ['videomega.tv']
}
def resolve(self, url):
try:
url = urlparse.urlparse(url).query
url = urlparse.parse_qsl(url)[0][1]
url = 'http://videomega.tv/cdn.php?ref=%s' % url
result = getUrl(url, mobile=True).result
url = common.parseDOM(result, "source", ret="src", attrs = { "type": "video.+?" })[0]
return url
except:
return
class vidplay:
def info(self):
return {
'netloc': ['vidplay.net'],
'host': ['Vidplay'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
u = 'http://vidplay.net/vidembed-%s' % url
url = getUrl(u, output='geturl').result
if u == url: raise Exception()
return url
except:
return
class vidspot:
def info(self):
return {
'netloc': ['vidspot.net'],
'host': ['Vidspot'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://vidspot.net/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('"file" *: *"(http.+?)"').findall(result)[-1]
query = urlparse.urlparse(url).query
url = url[:url.find('?')]
url = '%s?%s&direct=false' % (url, query)
return url
except:
return
class vidto:
def info(self):
return {
'netloc': ['vidto.me'],
'host': ['Vidto'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://vidto.me/embed-%s.html' % url
result = getUrl(url).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
result = re.sub(r'(\',\d*,\d*,)', r';\1', result)
url = js().worker(result)
return url
except:
return
class vidzi:
def info(self):
return {
'netloc': ['vidzi.tv'],
'host': ['Vidzi'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
result = result.replace('\n','')
result = re.compile('sources *: *\[.+?\]').findall(result)[-1]
result = re.compile('file *: *"(http.+?)"').findall(result)
url = [i for i in result if '.m3u8' in i]
if len(url) > 0: return url[0]
url = [i for i in result if not '.m3u8' in i]
if len(url) > 0: return url[0]
except:
return
class vimeo:
def info(self):
return {
'netloc': ['vimeo.com']
}
def resolve(self, url):
try:
url = [i for i in url.split('/') if i.isdigit()][-1]
url = 'http://player.vimeo.com/video/%s/config' % url
result = getUrl(url).result
result = json.loads(result)
u = result['request']['files']['h264']
url = None
try: url = u['hd']['url']
except: pass
try: url = u['sd']['url']
except: pass
return url
except:
return
class vk:
def info(self):
return {
'netloc': ['vk.com']
}
def resolve(self, url):
try:
url = url.replace('https://', 'http://')
result = getUrl(url).result
u = re.compile('url(720|540|480|360|240)=(.+?)&').findall(result)
url = []
try: url += [[{'quality': 'HD', 'url': i[1]} for i in u if i[0] == '720'][0]]
except: pass
try: url += [[{'quality': 'SD', 'url': i[1]} for i in u if i[0] == '540'][0]]
except: pass
try: url += [[{'quality': 'SD', 'url': i[1]} for i in u if i[0] == '480'][0]]
except: pass
if not url == []: return url
try: url += [[{'quality': 'SD', 'url': i[1]} for i in u if i[0] == '360'][0]]
except: pass
if not url == []: return url
try: url += [[{'quality': 'SD', 'url': i[1]} for i in u if i[0] == '240'][0]]
except: pass
if url == []: return
return url
except:
return
class vodlocker:
def info(self):
return {
'netloc': ['vodlocker.com'],
'host': ['Vodlocker'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://vodlocker.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class xfileload:
def info(self):
return {
'netloc': ['xfileload.com'],
'host': ['Xfileload'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, close=False).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
import time
request = urllib2.Request(url, post)
for i in range(0, 5):
try:
response = urllib2.urlopen(request, timeout=10)
result = response.read()
response.close()
if 'download2' in result: raise Exception()
url = common.parseDOM(result, "a", ret="href", attrs = { "target": "" })[0]
return url
except:
time.sleep(1)
except:
return
class xvidstage:
def info(self):
return {
'netloc': ['xvidstage.com'],
'host': ['Xvidstage'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://xvidstage.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class youtube:
def info(self):
return {
'netloc': ['youtube.com'],
'host': ['Youtube'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
id = url.split("?v=")[-1].split("/")[-1].split("?")[0].split("&")[0]
result = getUrl('http://www.youtube.com/watch?v=%s' % id).result
message = common.parseDOM(result, "div", attrs = { "id": "unavailable-submessage" })
message = ''.join(message)
alert = common.parseDOM(result, "div", attrs = { "id": "watch7-notification-area" })
if len(alert) > 0: raise Exception()
if re.search('[a-zA-Z]', message): raise Exception()
url = 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % id
return url
except:
return
class zettahost:
def info(self):
return {
'netloc': ['zettahost.tv'],
'host': ['Zettahost'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://zettahost.tv/embed-%s.html' % url
result = getUrl(url, mobile=True).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
|
wyojustin/clockthreesr
|
refs/heads/master
|
arduino/libraries/Time_X/Examples/pps/cumulative.py
|
11
|
from scipy import *
from pylab import *
from numpy import *
f = open('uSec.csv')
f.readline()
for l in f.readlines():
try:
x, y = l.split(',')
float(x)
float(y)
except:
print l
raise
dat = loadtxt('uSec.csv', skiprows=1, delimiter=",")
# plot(dat[:,0])
# plot(dat[:,1])
# show()
diff = cumsum(dat[:,0]) - cumsum(dat[:,1])
diff -= mean(diff)
diff %= 1000
diff = unwrap(diff * 2 * pi /1000) * 1000 / (2 * pi)
# diff -= 1e6 + 19
A = ones((len(diff), 2))
A[:,0] = range(len(diff))
m, b = dot(linalg.inv(dot(A.T, A)), dot(A.T, diff))
i_s = arange(len(diff))
y = m * i_s + b
plot(diff)
# i_s = where(greater(diff, y))[0]
plot(i_s, diff[i_s])
N = len(i_s)
A = ones((N, 2))
A[:,0] = i_s
m, b = dot(linalg.inv(dot(A.T, A)), dot(A.T, diff[i_s]))
y = m * i_s + b
plot(i_s, y, '--')
print m, 'us/s =', m * 86400 * 365.25/1e6, 's/year'
print m*1000, 'ns/s'
print 1000/m / 3600., ' h/ms'
xlabel('Seconds')
ylabel('uSec')
text(10000, -120, 'drift: %.2f ns/s' % (m*1000), rotation=-36)
show()
|
gdimitris/ChessPuzzler
|
refs/heads/master
|
Virtual_Environment/lib/python2.7/site-packages/werkzeug/_reloader.py
|
116
|
import os
import sys
import time
import subprocess
import threading
from itertools import chain
from werkzeug._internal import _log
from werkzeug._compat import PY2, iteritems, text_type
def _iter_module_files():
"""This iterates over all relevant Python files. It goes through all
loaded files from modules, all files in folders of already loaded modules
as well as all files reachable through a package.
"""
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
if module is None:
continue
filename = getattr(module, '__file__', None)
if filename:
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
yield filename
def _find_observable_paths(extra_files=None):
"""Finds all paths that should be observed."""
rv = set(os.path.abspath(x) for x in sys.path)
for filename in extra_files or ():
rv.add(os.path.dirname(os.path.abspath(filename)))
for module in list(sys.modules.values()):
fn = getattr(module, '__file__', None)
if fn is None:
continue
fn = os.path.abspath(fn)
rv.add(os.path.dirname(fn))
return _find_common_roots(rv)
def _find_common_roots(paths):
"""Out of some paths it finds the common roots that need monitoring."""
paths = [x.split(os.path.sep) for x in paths]
root = {}
for chunks in sorted(paths, key=len, reverse=True):
node = root
for chunk in chunks:
node = node.setdefault(chunk, {})
node.clear()
rv = set()
def _walk(node, path):
for prefix, child in iteritems(node):
_walk(child, path + (prefix,))
if not node:
rv.add('/'.join(path))
_walk(root, ())
return rv
class ReloaderLoop(object):
name = None
# monkeypatched by testsuite. wrapping with `staticmethod` is required in
# case time.sleep has been replaced by a non-c function (e.g. by
# `eventlet.monkey_patch`) before we get here
_sleep = staticmethod(time.sleep)
def __init__(self, extra_files=None, interval=1):
self.extra_files = set(os.path.abspath(x)
for x in extra_files or ())
self.interval = interval
def run(self):
pass
def restart_with_reloader(self):
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while 1:
_log('info', ' * Restarting with %s' % self.name)
args = [sys.executable] + sys.argv
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
# a weird bug on windows. sometimes unicode strings end up in the
# environment and subprocess.call does not like this, encode them
# to latin1 and continue.
if os.name == 'nt' and PY2:
for key, value in iteritems(new_environ):
if isinstance(value, text_type):
new_environ[key] = value.encode('iso-8859-1')
exit_code = subprocess.call(args, env=new_environ)
if exit_code != 3:
return exit_code
def trigger_reload(self, filename):
filename = os.path.abspath(filename)
_log('info', ' * Detected change in %r, reloading' % filename)
sys.exit(3)
class StatReloaderLoop(ReloaderLoop):
name = 'stat'
def run(self):
mtimes = {}
while 1:
for filename in chain(_iter_module_files(), self.extra_files):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
self.trigger_reload(filename)
self._sleep(self.interval)
class WatchdogReloaderLoop(ReloaderLoop):
def __init__(self, *args, **kwargs):
ReloaderLoop.__init__(self, *args, **kwargs)
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
self.observable_paths = set()
def _check_modification(filename):
if filename in self.extra_files:
self.trigger_reload(filename)
dirname = os.path.dirname(filename)
if dirname.startswith(tuple(self.observable_paths)):
if filename.endswith(('.pyc', '.pyo')):
self.trigger_reload(filename[:-1])
elif filename.endswith('.py'):
self.trigger_reload(filename)
class _CustomHandler(FileSystemEventHandler):
def on_created(self, event):
_check_modification(event.src_path)
def on_modified(self, event):
_check_modification(event.src_path)
reloader_name = Observer.__name__.lower()
if reloader_name.endswith('observer'):
reloader_name = reloader_name[:-8]
reloader_name += ' reloader'
self.name = reloader_name
self.observer_class = Observer
self.event_handler = _CustomHandler()
self.should_reload = False
def trigger_reload(self, filename):
# This is called inside an event handler, which means we can't throw
# SystemExit here. https://github.com/gorakhargosh/watchdog/issues/294
self.should_reload = True
ReloaderLoop.trigger_reload(self, filename)
def run(self):
watches = {}
observer = self.observer_class()
observer.start()
while not self.should_reload:
to_delete = set(watches)
paths = _find_observable_paths(self.extra_files)
for path in paths:
if path not in watches:
try:
watches[path] = observer.schedule(
self.event_handler, path, recursive=True)
except OSError:
# "Path is not a directory". We could filter out
# those paths beforehand, but that would cause
# additional stat calls.
watches[path] = None
to_delete.discard(path)
for path in to_delete:
watch = watches.pop(path, None)
if watch is not None:
observer.unschedule(watch)
self.observable_paths = paths
self._sleep(self.interval)
sys.exit(3)
reloader_loops = {
'stat': StatReloaderLoop,
'watchdog': WatchdogReloaderLoop,
}
try:
__import__('watchdog.observers')
except ImportError:
reloader_loops['auto'] = reloader_loops['stat']
else:
reloader_loops['auto'] = reloader_loops['watchdog']
def run_with_reloader(main_func, extra_files=None, interval=1,
reloader_type='auto'):
"""Run the given function in an independent python interpreter."""
import signal
reloader = reloader_loops[reloader_type](extra_files, interval)
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
t = threading.Thread(target=main_func, args=())
t.setDaemon(True)
t.start()
reloader.run()
else:
sys.exit(reloader.restart_with_reloader())
except KeyboardInterrupt:
pass
|
gcallah/Indra
|
refs/heads/master
|
indraV1/models/standing_ovation_run.py
|
1
|
#!/usr/bin/env python
"""
This file runs the standing_ovation model.
"""
import indra.prop_args2 as props
MODEL_NM = "standing_ovation"
def run(prop_dict=None):
pa = props.PropArgs.create_props(MODEL_NM, prop_dict)
import indra.utils as utils
import models.standing_ovation as wsm
(prog_file, log_file, prop_file, results_file) = utils.gen_file_names(MODEL_NM)
env = wsm.Auditorium("Auditorium",
pa["grid_width"],
pa["grid_height"],
model_nm=MODEL_NM,
preact=True,
props=pa)
num_agents = int(pa["grid_width"] * pa["grid_height"])
for i in range(num_agents):
env.add_agent(wsm.Member("member" + str(i), "Enjoying the show", pa["noise_level"]))
return utils.run_model(env, prog_file, results_file)
if __name__ == "__main__":
run()
|
eric-stanley/robotframework
|
refs/heads/master
|
atest/testdata/variables/extended_assign_vars.py
|
38
|
__all__ = ['VAR', 'JVAR']
class Demeter(object):
loves = ''
@property
def hates(self):
return self.loves.upper()
class Variable(object):
attr = 'value'
_attr2 = 'v2'
attr2 = property(lambda self: self._attr2,
lambda self, value: setattr(self, '_attr2', value.upper()))
demeter = Demeter()
@property
def not_settable(self):
return None
VAR = Variable()
try:
import JavaClass
except ImportError:
JVAR = None
else:
JVAR = JavaClass()
|
OriHoch/Open-Knesset
|
refs/heads/master
|
laws/migrations/0015_add_bill_content_html.py
|
15
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PrivateProposal.content_html'
db.add_column('laws_privateproposal', 'content_html', self.gf('django.db.models.fields.TextField')(default='', blank=True), keep_default=False)
# Adding field 'KnessetProposal.content_html'
db.add_column('laws_knessetproposal', 'content_html', self.gf('django.db.models.fields.TextField')(default='', blank=True), keep_default=False)
# Adding field 'GovProposal.content_html'
db.add_column('laws_govproposal', 'content_html', self.gf('django.db.models.fields.TextField')(default='', blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'PrivateProposal.content_html'
db.delete_column('laws_privateproposal', 'content_html')
# Deleting field 'KnessetProposal.content_html'
db.delete_column('laws_knessetproposal', 'content_html')
# Deleting field 'GovProposal.content_html'
db.delete_column('laws_govproposal', 'content_html')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'committees.committee': {
'Meta': {'object_name': 'Committee'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'committees.committeemeeting': {
'Meta': {'object_name': 'CommitteeMeeting'},
'committee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'meetings'", 'to': "orm['committees.Committee']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'date_string': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mks_attended': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'protocol_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'topics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'votes_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committee_meetings'", 'blank': 'True', 'to': "orm['laws.Vote']"})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'laws.bill': {
'Meta': {'object_name': 'Bill'},
'approval_vote': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'bill_approved'", 'unique': 'True', 'null': 'True', 'to': "orm['laws.Vote']"}),
'first_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'first_vote': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'to': "orm['laws.Vote']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': "orm['laws.Law']"}),
'pre_votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_pre_votes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'second_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_second'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'stage': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'stage_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.govlegislationcommitteedecision': {
'Meta': {'object_name': 'GovLegislationCommitteeDecision'},
'bill': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'gov_decisions'", 'null': 'True', 'to': "orm['laws.Bill']"}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'stand': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.govproposal': {
'Meta': {'object_name': 'GovProposal'},
'bill': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'gov_proposal'", 'unique': 'True', 'null': 'True', 'to': "orm['laws.Bill']"}),
'booklet_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_govproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'content_html': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_govproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_govproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"})
},
'laws.knessetproposal': {
'Meta': {'object_name': 'KnessetProposal'},
'bill': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'knesset_proposal'", 'unique': 'True', 'null': 'True', 'to': "orm['laws.Bill']"}),
'booklet_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'committee': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': "orm['committees.Committee']"}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'content_html': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}),
'originals': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'knesset_proposals'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.PrivateProposal']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"})
},
'laws.law': {
'Meta': {'object_name': 'Law'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'merged_into': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'duplicates'", 'null': 'True', 'to': "orm['laws.Law']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.membervotingstatistics': {
'Meta': {'object_name': 'MemberVotingStatistics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': "orm['mks.Member']"})
},
'laws.partyvotingstatistics': {
'Meta': {'object_name': 'PartyVotingStatistics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'party': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': "orm['mks.Party']"})
},
'laws.privateproposal': {
'Meta': {'object_name': 'PrivateProposal'},
'bill': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'proposals'", 'null': 'True', 'to': "orm['laws.Bill']"}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'content_html': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'joiners': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_joined'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}),
'proposal_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_proposed'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"})
},
'laws.vote': {
'Meta': {'object_name': 'Vote'},
'against_party': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'controversy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'full_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_text_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'meeting_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'time_string': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'vote_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'votes'", 'blank': 'True', 'through': "orm['laws.VoteAction']", 'to': "orm['mks.Member']"}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'laws.voteaction': {
'Meta': {'object_name': 'VoteAction'},
'against_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'against_opposition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'against_party': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['laws.Vote']"})
},
'mks.member': {
'Meta': {'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'planet.blog': {
'Meta': {'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
},
'tagging.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'tagging.taggeditem': {
'Meta': {'unique_together': "(('tag', 'content_type', 'object_id'),)", 'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['tagging.Tag']"})
}
}
complete_apps = ['laws']
|
zding5/Microblog-Flask
|
refs/heads/master
|
flask/lib/python2.7/site-packages/sqlalchemy/sql/naming.py
|
21
|
# sqlalchemy/naming.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Establish constraint and index naming conventions.
"""
from .schema import Constraint, ForeignKeyConstraint, PrimaryKeyConstraint, \
UniqueConstraint, CheckConstraint, Index, Table, Column
from .. import event, events
from .. import exc
from .elements import _truncated_label, _defer_name, _defer_none_name, conv
import re
class ConventionDict(object):
def __init__(self, const, table, convention):
self.const = const
self._is_fk = isinstance(const, ForeignKeyConstraint)
self.table = table
self.convention = convention
self._const_name = const.name
def _key_table_name(self):
return self.table.name
def _column_X(self, idx):
if self._is_fk:
fk = self.const.elements[idx]
return fk.parent
else:
return list(self.const.columns)[idx]
def _key_constraint_name(self):
if isinstance(self._const_name, (type(None), _defer_none_name)):
raise exc.InvalidRequestError(
"Naming convention including "
"%(constraint_name)s token requires that "
"constraint is explicitly named."
)
if not isinstance(self._const_name, conv):
self.const.name = None
return self._const_name
def _key_column_X_name(self, idx):
return self._column_X(idx).name
def _key_column_X_label(self, idx):
return self._column_X(idx)._label
def _key_referred_table_name(self):
fk = self.const.elements[0]
refs = fk.target_fullname.split(".")
if len(refs) == 3:
refschema, reftable, refcol = refs
else:
reftable, refcol = refs
return reftable
def _key_referred_column_X_name(self, idx):
fk = self.const.elements[idx]
refs = fk.target_fullname.split(".")
if len(refs) == 3:
refschema, reftable, refcol = refs
else:
reftable, refcol = refs
return refcol
def __getitem__(self, key):
if key in self.convention:
return self.convention[key](self.const, self.table)
elif hasattr(self, '_key_%s' % key):
return getattr(self, '_key_%s' % key)()
else:
col_template = re.match(r".*_?column_(\d+)_.+", key)
if col_template:
idx = col_template.group(1)
attr = "_key_" + key.replace(idx, "X")
idx = int(idx)
if hasattr(self, attr):
return getattr(self, attr)(idx)
raise KeyError(key)
_prefix_dict = {
Index: "ix",
PrimaryKeyConstraint: "pk",
CheckConstraint: "ck",
UniqueConstraint: "uq",
ForeignKeyConstraint: "fk"
}
def _get_convention(dict_, key):
for super_ in key.__mro__:
if super_ in _prefix_dict and _prefix_dict[super_] in dict_:
return dict_[_prefix_dict[super_]]
elif super_ in dict_:
return dict_[super_]
else:
return None
def _constraint_name_for_table(const, table):
metadata = table.metadata
convention = _get_convention(metadata.naming_convention, type(const))
if isinstance(const.name, conv):
return const.name
elif convention is not None and (
const.name is None or not isinstance(const.name, conv) and
"constraint_name" in convention
):
return conv(
convention % ConventionDict(const, table,
metadata.naming_convention)
)
elif isinstance(convention, _defer_none_name):
return None
@event.listens_for(Constraint, "after_parent_attach")
@event.listens_for(Index, "after_parent_attach")
def _constraint_name(const, table):
if isinstance(table, Column):
# for column-attached constraint, set another event
# to link the column attached to the table as this constraint
# associated with the table.
event.listen(table, "after_parent_attach",
lambda col, table: _constraint_name(const, table)
)
elif isinstance(table, Table):
if isinstance(const.name, (conv, _defer_name)):
return
newname = _constraint_name_for_table(const, table)
if newname is not None:
const.name = newname
|
Frank-Wu/RamCloud
|
refs/heads/master
|
ft/server.py
|
20
|
# Copyright (c) 2010 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import sys
from driver import UDPDriver as Driver
from transport import Transport, TEST_ADDRESS
def main():
d = Driver(TEST_ADDRESS)
t = Transport(d, isServer=True)
while True:
r = t.serverRecv()
r.replyPayload.extend(r.recvPayload)
r.sendReply()
d.stat()
if __name__ == '__main__':
main()
|
bixbydev/Bixby
|
refs/heads/master
|
google/gdata-2.0.18/tests/gdata_tests/apps/emailsettings/live_client_test.py
|
23
|
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'Claudio Cherubino <ccherubino@google.com>'
import unittest
import gdata.apps.emailsettings.client
import gdata.apps.emailsettings.data
import gdata.client
import gdata.data
import gdata.gauth
import gdata.test_config as conf
conf.options.register_option(conf.APPS_DOMAIN_OPTION)
conf.options.register_option(conf.TARGET_USERNAME_OPTION)
class EmailSettingsClientTest(unittest.TestCase):
def setUp(self):
self.client = gdata.apps.emailsettings.client.EmailSettingsClient(
domain='example.com')
if conf.options.get_value('runlive') == 'true':
self.client = gdata.apps.emailsettings.client.EmailSettingsClient(
domain=conf.options.get_value('appsdomain'))
if conf.options.get_value('ssl') == 'true':
self.client.ssl = True
conf.configure_client(self.client, 'EmailSettingsClientTest',
self.client.auth_service, True)
self.username = conf.options.get_value('appsusername').split('@')[0]
def tearDown(self):
conf.close_client(self.client)
def testClientConfiguration(self):
self.assertEqual('apps-apis.google.com', self.client.host)
self.assertEqual('2.0', self.client.api_version)
self.assertEqual('apps', self.client.auth_service)
if conf.options.get_value('runlive') == 'true':
self.assertEqual(self.client.domain, conf.options.get_value('appsdomain'))
else:
self.assertEqual(self.client.domain, 'example.com')
def testMakeEmailSettingsUri(self):
self.assertEqual('/a/feeds/emailsettings/2.0/%s/%s/%s' % (self.client.domain,
'abc', 'label'),
self.client.MakeEmailSettingsUri('abc', 'label'))
def testCreateDeleteLabel(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testCreateLabel')
new_label = self.client.CreateLabel(
username=conf.options.get_value('targetusername'),
name='status updates')
self.assert_(isinstance(new_label,
gdata.apps.emailsettings.data.EmailSettingsLabel))
self.assertEqual(new_label.name, 'status updates')
self.client.DeleteLabel(
username=conf.options.get_value('targetusername'),
label='status updates')
def testCreateFilter(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testCreateFilter')
new_filter = self.client.CreateFilter(
username=conf.options.get_value('targetusername'),
from_address='alice@gmail.com',
has_the_word='project proposal', mark_as_read=True)
self.assert_(isinstance(new_filter,
gdata.apps.emailsettings.data.EmailSettingsFilter))
self.assertEqual(new_filter.from_address, 'alice@gmail.com')
self.assertEqual(new_filter.has_the_word, 'project proposal')
self.assertEqual(new_filter.mark_as_read, 'True')
new_filter = self.client.CreateFilter(
username=conf.options.get_value('targetusername'),
to_address='announcements@example.com',
label="announcements")
self.assert_(isinstance(new_filter,
gdata.apps.emailsettings.data.EmailSettingsFilter))
self.assertEqual(new_filter.to_address, 'announcements@example.com')
self.assertEqual(new_filter.label, 'announcements')
new_filter = self.client.CreateFilter(
username=conf.options.get_value('targetusername'),
subject='urgent',
does_not_have_the_word='spam',
has_attachments=True,
archive=True)
self.assert_(isinstance(new_filter,
gdata.apps.emailsettings.data.EmailSettingsFilter))
self.assertEqual(new_filter.subject, 'urgent')
self.assertEqual(new_filter.does_not_have_the_word, 'spam')
self.assertEqual(new_filter.has_attachments, 'True')
self.assertEqual(new_filter.archive, 'True')
def testCreateSendAs(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testCreateSendAs')
new_sendas = self.client.CreateSendAs(
username=conf.options.get_value('targetusername'),
name='Sales', address=conf.options.get_value('appsusername'),
reply_to='abc@gmail.com',
make_default=True)
self.assert_(isinstance(new_sendas,
gdata.apps.emailsettings.data.EmailSettingsSendAsAlias))
self.assertEqual(new_sendas.name, 'Sales')
self.assertEqual(new_sendas.address,
conf.options.get_value('appsusername'))
self.assertEqual(new_sendas.reply_to, 'abc@gmail.com')
self.assertEqual(new_sendas.make_default, 'True')
def testUpdateWebclip(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateWebclip')
new_webclip = self.client.UpdateWebclip(
username=conf.options.get_value('targetusername'),
enable=True)
self.assert_(isinstance(new_webclip,
gdata.apps.emailsettings.data.EmailSettingsWebClip))
self.assertEqual(new_webclip.enable, 'True')
new_webclip = self.client.UpdateWebclip(
username=conf.options.get_value('targetusername'),
enable=False)
self.assert_(isinstance(new_webclip,
gdata.apps.emailsettings.data.EmailSettingsWebClip))
self.assertEqual(new_webclip.enable, 'False')
def testUpdateForwarding(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateForwarding')
new_forwarding = self.client.UpdateForwarding(
username=conf.options.get_value('targetusername'),
enable=True,
forward_to=conf.options.get_value('appsusername'),
action='KEEP')
self.assert_(isinstance(new_forwarding,
gdata.apps.emailsettings.data.EmailSettingsForwarding))
self.assertEqual(new_forwarding.enable, 'True')
self.assertEqual(new_forwarding.forward_to,
conf.options.get_value('appsusername'))
self.assertEqual(new_forwarding.action, 'KEEP')
new_forwarding = self.client.UpdateForwarding(
username=conf.options.get_value('targetusername'),
enable=False)
self.assert_(isinstance(new_forwarding,
gdata.apps.emailsettings.data.EmailSettingsForwarding))
self.assertEqual(new_forwarding.enable, 'False')
def testUpdatePop(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdatePop')
new_pop = self.client.UpdatePop(
username=conf.options.get_value('targetusername'),
enable=True, enable_for='MAIL_FROM_NOW_ON', action='KEEP')
self.assert_(isinstance(new_pop,
gdata.apps.emailsettings.data.EmailSettingsPop))
self.assertEqual(new_pop.enable, 'True')
self.assertEqual(new_pop.enable_for, 'MAIL_FROM_NOW_ON')
self.assertEqual(new_pop.action, 'KEEP')
new_pop = self.client.UpdatePop(
username=conf.options.get_value('targetusername'),
enable=False)
self.assert_(isinstance(new_pop,
gdata.apps.emailsettings.data.EmailSettingsPop))
self.assertEqual(new_pop.enable, 'False')
def testUpdateImap(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateImap')
new_imap = self.client.UpdateImap(
username=conf.options.get_value('targetusername'),
enable=True)
self.assert_(isinstance(new_imap,
gdata.apps.emailsettings.data.EmailSettingsImap))
self.assertEqual(new_imap.enable, 'True')
new_imap = self.client.UpdateImap(
username=conf.options.get_value('targetusername'),
enable=False)
self.assert_(isinstance(new_imap,
gdata.apps.emailsettings.data.EmailSettingsImap))
self.assertEqual(new_imap.enable, 'False')
def testUpdateVacation(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateVacation')
new_vacation = self.client.UpdateVacation(
username=conf.options.get_value('targetusername'),
enable=True, subject='Out of office',
message='If urgent call me at 555-5555.',
start_date='2011-12-05', end_date='2011-12-06',
contacts_only=True, domain_only=False)
self.assert_(isinstance(new_vacation,
gdata.apps.emailsettings.data.EmailSettingsVacationResponder))
self.assertEqual(new_vacation.enable, 'True')
self.assertEqual(new_vacation.subject, 'Out of office')
self.assertEqual(new_vacation.message, 'If urgent call me at 555-5555.')
self.assertEqual(new_vacation.start_date, '2011-12-05')
self.assertEqual(new_vacation.end_date, '2011-12-06')
self.assertEqual(new_vacation.contacts_only, 'True')
self.assertEqual(new_vacation.domain_only, 'False')
new_vacation = self.client.UpdateVacation(
username=conf.options.get_value('targetusername'),
enable=False)
self.assert_(isinstance(new_vacation,
gdata.apps.emailsettings.data.EmailSettingsVacationResponder))
self.assertEqual(new_vacation.enable, 'False')
def testUpdateSignature(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateSignature')
new_signature = self.client.UpdateSignature(
username=conf.options.get_value('targetusername'),
signature='Regards, Joe')
self.assert_(isinstance(new_signature,
gdata.apps.emailsettings.data.EmailSettingsSignature))
self.assertEqual(new_signature.signature_value, 'Regards, Joe')
new_signature = self.client.UpdateSignature(
username=conf.options.get_value('targetusername'),
signature='')
self.assert_(isinstance(new_signature,
gdata.apps.emailsettings.data.EmailSettingsSignature))
self.assertEqual(new_signature.signature_value, '')
def testUpdateLanguage(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateLanguage')
new_language = self.client.UpdateLanguage(
username=conf.options.get_value('targetusername'),
language='es')
self.assert_(isinstance(new_language,
gdata.apps.emailsettings.data.EmailSettingsLanguage))
self.assertEqual(new_language.language_tag, 'es')
def testUpdateGeneral(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateGeneral')
new_general = self.client.UpdateGeneralSettings(
username=conf.options.get_value('targetusername'),
page_size=25, arrows=True)
self.assert_(isinstance(new_general,
gdata.apps.emailsettings.data.EmailSettingsGeneral))
self.assertEqual(new_general.page_size, '25')
self.assertEqual(new_general.arrows, 'True')
new_general = self.client.UpdateGeneralSettings(
username=conf.options.get_value('targetusername'),
shortcuts=False, snippets=True, use_unicode=False)
self.assert_(isinstance(new_general,
gdata.apps.emailsettings.data.EmailSettingsGeneral))
self.assertEqual(new_general.shortcuts, 'False')
self.assertEqual(new_general.snippets, 'True')
self.assertEqual(new_general.use_unicode, 'False')
def suite():
return conf.build_suite([EmailSettingsClientTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
|
kubeflow/kfp-tekton-backend
|
refs/heads/master
|
samples/tutorials/Data passing in python components/Data passing in python components - Files.py
|
2
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# %% [markdown]
# # Data passing tutorial
# Data passing is the most important aspect of Pipelines.
#
# In Kubeflow Pipelines, the pipeline authors compose pipelines by creating component instances (tasks) and connecting them together.
#
# Component have inputs and outputs. They can consume and produce arbitrary data.
#
# Pipeline authors establish connections between component tasks by connecting their data inputs and outputs - by passing the output of one task as an argument to another task's input.
#
# The system takes care of storing the data produced by components and later passing that data to other components for consumption as instructed by the pipeline.
#
# This tutorial shows how to create python components that produce, consume and transform data.
# It shows how to create data passing pipelines by instantiating components and connecting them together.
# %%
from typing import NamedTuple
import kfp
from kfp.components import func_to_container_op, InputPath, OutputPath
# %% [markdown]
# ## Small data
#
# Small data is the data that you'll be comfortable passing as program's command-line argument. Small data size should not exceed few kilobytes.
#
# Some examples of typical types of small data are: number, URL, small string (e.g. column name).
#
# Small lists, dictionaries and JSON structures are fine, but keep an eye on the size and consider switching to file-based data passing methods taht are more suitable for bigger data (more than several kilobytes) or binary data.
#
# All small data outputs will be at some point serialized to strings and all small data input values will be at some point deserialized from strings (passed as command-line argumants). There are built-in serializers and deserializers for several common types (e.g. `str`, `int`, `float`, `bool`, `list`, `dict`). All other types of data need to be serialized manually before returning the data. Make sure to properly specify type annotations, otherwize there would be no automatic deserialization and the component function will receive strings instead of deserialized objects.
# %% [markdown]
# ## Bigger data (files)
#
# Bigger data should be read from files and written to files.
#
# The paths for the input and output files are chosen by the system and are passed into the function (as strings).
#
# Use the `InputPath` parameter annotation to tell the system that the function wants to consume the corresponding input data as a file. The system will download the data, write it to a local file and then pass the **path** of that file to the function.
#
# Use the `OutputPath` parameter annotation to tell the system that the function wants to produce the corresponding output data as a file. The system will prepare and pass the **path** of a file where the function should write the output data. After the function exits, the system will upload the data to the storage system so that it can be passed to downstream components.
#
# You can specify the type of the consumed/produced data by specifying the type argument to `InputPath` and `OutputPath`. The type can be a python type or an arbitrary type name string. `OutputPath('TFModel')` means that the function states that the data it has written to a file has type 'TFModel'. `InputPath('TFModel')` means that the function states that it expect the data it reads from a file to have type 'TFModel'. When the pipeline author connects inputs to outputs the system checks whether the types match.
#
# Note on input/output names: When the function is converted to component, the input and output names generally follow the parameter names, but the "\_path" and "\_file" suffixes are stripped from file/path inputs and outputs. E.g. the `number_file_path: InputPath(int)` parameter becomes the `number: int` input. This makes the argument passing look more natural: `number=42` instead of `number_file_path=42`.
# %% [markdown]
#
# ### Writing and reading bigger data
# %%
# Writing bigger data
@func_to_container_op
def repeat_line(line: str, output_text_path: OutputPath(str), count: int = 10):
'''Repeat the line specified number of times'''
with open(output_text_path, 'w') as writer:
for i in range(count):
writer.write(line + '\n')
# Reading bigger data
@func_to_container_op
def print_text(text_path: InputPath()): # The "text" input is untyped so that any data can be printed
'''Print text'''
with open(text_path, 'r') as reader:
for line in reader:
print(line, end = '')
def print_repeating_lines_pipeline():
repeat_lines_task = repeat_line(line='Hello', count=5000)
print_text(repeat_lines_task.output) # Don't forget .output !
# Submit the pipeline for execution:
#kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(print_repeating_lines_pipeline, arguments={})
# %% [markdown]
# ### Processing bigger data
# %%
@func_to_container_op
def split_text_lines(source_path: InputPath(str), odd_lines_path: OutputPath(str), even_lines_path: OutputPath(str)):
with open(source_path, 'r') as reader:
with open(odd_lines_path, 'w') as odd_writer:
with open(even_lines_path, 'w') as even_writer:
while True:
line = reader.readline()
if line == "":
break
odd_writer.write(line)
line = reader.readline()
if line == "":
break
even_writer.write(line)
def text_splitting_pipeline():
text = '\n'.join(['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten'])
split_text_task = split_text_lines(text)
print_text(split_text_task.outputs['odd_lines'])
print_text(split_text_task.outputs['even_lines'])
# Submit the pipeline for execution:
#kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(text_splitting_pipeline, arguments={})
# %% [markdown]
# ### Example: Pipeline that generates then sums many numbers
# %%
# Writing many numbers
@func_to_container_op
def write_numbers(numbers_path: OutputPath(str), start: int = 0, count: int = 10):
with open(numbers_path, 'w') as writer:
for i in range(start, count):
writer.write(str(i) + '\n')
# Reading and summing many numbers
@func_to_container_op
def sum_numbers(numbers_path: InputPath(str)) -> int:
sum = 0
with open(numbers_path, 'r') as reader:
for line in reader:
sum = sum + int(line)
return sum
# Pipeline to sum 100000 numbers
def sum_pipeline(count: int = 100000):
numbers_task = write_numbers(count=count)
print_text(numbers_task.output)
sum_task = sum_numbers(numbers_task.outputs['numbers'])
print_text(sum_task.output)
# Submit the pipeline for execution:
#kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(sum_pipeline, arguments={})
# Combining all pipelines together in a single pipeline
def file_passing_pipelines():
print_repeating_lines_pipeline()
text_splitting_pipeline()
sum_pipeline()
if __name__ == '__main__':
# Compiling the pipeline
kfp.compiler.Compiler().compile(file_passing_pipelines, __file__ + '.yaml')
|
poiesisconsulting/openerp-restaurant
|
refs/heads/master
|
hr_timesheet_invoice/report/hr_timesheet_invoice_report.py
|
40
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.tools.sql import drop_view_if_exists
class report_timesheet_line(osv.osv):
_name = "report.timesheet.line"
_description = "Timesheet Line"
_auto = False
_columns = {
'name': fields.char('Year',size=64,required=False, readonly=True),
'user_id': fields.many2one('res.users', 'User', readonly=True),
'date': fields.date('Date', readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'quantity': fields.float('Time', readonly=True),
'cost': fields.float('Cost', readonly=True),
'product_id': fields.many2one('product.product', 'Product',readonly=True),
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'general_account_id': fields.many2one('account.account', 'General Account', readonly=True),
'invoice_id': fields.many2one('account.invoice', 'Invoiced', readonly=True),
'month': fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month',readonly=True),
}
_order = 'name desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_line')
cr.execute("""
create or replace view report_timesheet_line as (
select
min(l.id) as id,
l.date as date,
to_char(l.date,'YYYY') as name,
to_char(l.date,'MM') as month,
l.user_id,
to_char(l.date, 'YYYY-MM-DD') as day,
l.invoice_id,
l.product_id,
l.account_id,
l.general_account_id,
sum(l.unit_amount) as quantity,
sum(l.amount) as cost
from
account_analytic_line l
where
l.user_id is not null
group by
l.date,
l.user_id,
l.product_id,
l.account_id,
l.general_account_id,
l.invoice_id
)
""")
class report_timesheet_user(osv.osv):
_name = "report_timesheet.user"
_description = "Timesheet per day"
_auto = False
_columns = {
'name': fields.char('Year',size=64,required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'quantity': fields.float('Time', readonly=True),
'cost': fields.float('Cost', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month',readonly=True),
}
_order = 'name desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_user')
cr.execute("""
create or replace view report_timesheet_user as (
select
min(l.id) as id,
to_char(l.date,'YYYY') as name,
to_char(l.date,'MM') as month,
l.user_id,
sum(l.unit_amount) as quantity,
sum(l.amount) as cost
from
account_analytic_line l
where
user_id is not null
group by l.date, to_char(l.date,'YYYY'),to_char(l.date,'MM'), l.user_id
)
""")
class report_timesheet_account(osv.osv):
_name = "report_timesheet.account"
_description = "Timesheet per account"
_auto = False
_columns = {
'name': fields.char('Year',size=64,required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'quantity': fields.float('Time', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month',readonly=True),
}
_order = 'name desc,account_id desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_account')
cr.execute("""
create or replace view report_timesheet_account as (
select
min(id) as id,
to_char(create_date, 'YYYY') as name,
to_char(create_date,'MM') as month,
user_id,
account_id,
sum(unit_amount) as quantity
from
account_analytic_line
group by
to_char(create_date, 'YYYY'),to_char(create_date, 'MM'), user_id, account_id
)
""")
class report_timesheet_account_date(osv.osv):
_name = "report_timesheet.account.date"
_description = "Daily timesheet per account"
_auto = False
_columns = {
'name': fields.char('Year',size=64,required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'quantity': fields.float('Time', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month',readonly=True),
}
_order = 'name desc,account_id desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_account_date')
cr.execute("""
create or replace view report_timesheet_account_date as (
select
min(id) as id,
to_char(date,'YYYY') as name,
to_char(date,'MM') as month,
user_id,
account_id,
sum(unit_amount) as quantity
from
account_analytic_line
group by
to_char(date,'YYYY'),to_char(date,'MM'), user_id, account_id
)
""")
class report_timesheet_invoice(osv.osv):
_name = "report_timesheet.invoice"
_description = "Costs to invoice"
_auto = False
_columns = {
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Project', readonly=True),
'manager_id':fields.many2one('res.users', 'Manager', readonly=True),
'quantity': fields.float('Time', readonly=True),
'amount_invoice': fields.float('To invoice', readonly=True)
}
_rec_name = 'user_id'
_order = 'user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_invoice')
cr.execute("""
create or replace view report_timesheet_invoice as (
select
min(l.id) as id,
l.user_id as user_id,
l.account_id as account_id,
a.user_id as manager_id,
sum(l.unit_amount) as quantity,
sum(l.unit_amount * t.list_price) as amount_invoice
from account_analytic_line l
left join hr_timesheet_invoice_factor f on (l.to_invoice=f.id)
left join account_analytic_account a on (l.account_id=a.id)
left join product_product p on (l.to_invoice=f.id)
left join product_template t on (l.to_invoice=f.id)
where
l.to_invoice is not null and
l.invoice_id is null
group by
l.user_id,
l.account_id,
a.user_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
rjschwei/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-network/azure/mgmt/network/models/load_balancer.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class LoadBalancer(Resource):
"""LoadBalancer resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict
:param frontend_ip_configurations: Object representing the frontend IPs to
be used for the load balancer
:type frontend_ip_configurations: list of :class:`FrontendIPConfiguration
<azure.mgmt.network.models.FrontendIPConfiguration>`
:param backend_address_pools: Collection of backend address pools used by
a load balancer
:type backend_address_pools: list of :class:`BackendAddressPool
<azure.mgmt.network.models.BackendAddressPool>`
:param load_balancing_rules: Object collection representing the load
balancing rules Gets the provisioning
:type load_balancing_rules: list of :class:`LoadBalancingRule
<azure.mgmt.network.models.LoadBalancingRule>`
:param probes: Collection of probe objects used in the load balancer
:type probes: list of :class:`Probe <azure.mgmt.network.models.Probe>`
:param inbound_nat_rules: Collection of inbound NAT Rules used by a load
balancer. Defining inbound NAT rules on your load balancer is mutually
exclusive with defining an inbound NAT pool. Inbound NAT pools are
referenced from virtual machine scale sets. NICs that are associated with
individual virtual machines cannot reference an Inbound NAT pool. They
have to reference individual inbound NAT rules.
:type inbound_nat_rules: list of :class:`InboundNatRule
<azure.mgmt.network.models.InboundNatRule>`
:param inbound_nat_pools: Defines an external port range for inbound NAT
to a single backend port on NICs associated with a load balancer. Inbound
NAT rules are created automatically for each NIC associated with the Load
Balancer using an external port from this range. Defining an Inbound NAT
pool on your Load Balancer is mutually exclusive with defining inbound Nat
rules. Inbound NAT pools are referenced from virtual machine scale sets.
NICs that are associated with individual virtual machines cannot reference
an inbound NAT pool. They have to reference individual inbound NAT rules.
:type inbound_nat_pools: list of :class:`InboundNatPool
<azure.mgmt.network.models.InboundNatPool>`
:param outbound_nat_rules: The outbound NAT rules.
:type outbound_nat_rules: list of :class:`OutboundNatRule
<azure.mgmt.network.models.OutboundNatRule>`
:param resource_guid: The resource GUID property of the load balancer
resource.
:type resource_guid: str
:param provisioning_state: Gets the provisioning state of the PublicIP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'frontend_ip_configurations': {'key': 'properties.frontendIPConfigurations', 'type': '[FrontendIPConfiguration]'},
'backend_address_pools': {'key': 'properties.backendAddressPools', 'type': '[BackendAddressPool]'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[LoadBalancingRule]'},
'probes': {'key': 'properties.probes', 'type': '[Probe]'},
'inbound_nat_rules': {'key': 'properties.inboundNatRules', 'type': '[InboundNatRule]'},
'inbound_nat_pools': {'key': 'properties.inboundNatPools', 'type': '[InboundNatPool]'},
'outbound_nat_rules': {'key': 'properties.outboundNatRules', 'type': '[OutboundNatRule]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, frontend_ip_configurations=None, backend_address_pools=None, load_balancing_rules=None, probes=None, inbound_nat_rules=None, inbound_nat_pools=None, outbound_nat_rules=None, resource_guid=None, provisioning_state=None, etag=None):
super(LoadBalancer, self).__init__(id=id, location=location, tags=tags)
self.frontend_ip_configurations = frontend_ip_configurations
self.backend_address_pools = backend_address_pools
self.load_balancing_rules = load_balancing_rules
self.probes = probes
self.inbound_nat_rules = inbound_nat_rules
self.inbound_nat_pools = inbound_nat_pools
self.outbound_nat_rules = outbound_nat_rules
self.resource_guid = resource_guid
self.provisioning_state = provisioning_state
self.etag = etag
|
bfirsh/django-old
|
refs/heads/master
|
django/contrib/contenttypes/__init__.py
|
12133432
| |
andymckay/addons-server
|
refs/heads/master
|
src/olympia/tags/management/__init__.py
|
12133432
| |
jank3/django
|
refs/heads/master
|
tests/model_regress/__init__.py
|
12133432
| |
napkindrawing/ansible
|
refs/heads/devel
|
test/units/modules/network/eos/__init__.py
|
12133432
| |
gilt/nova
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
vvv1559/intellij-community
|
refs/heads/master
|
python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorFallbackResolveResultForPositionalChunk.py
|
31
|
numbers = ('eins', 'zwei', 'drei')
'%s %s %s' % nu<caret>mbers
|
ArcherSys/ArcherSys
|
refs/heads/master
|
Lib/site-packages/django/contrib/sitemaps/apps.py
|
590
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class SiteMapsConfig(AppConfig):
name = 'django.contrib.sitemaps'
verbose_name = _("Site Maps")
|
RachitKansal/scikit-learn
|
refs/heads/master
|
sklearn/tests/test_kernel_approximation.py
|
244
|
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
|
technologiescollege/s2a_fr
|
refs/heads/portable
|
Snap!Files/Snap!Mobile/arduino/serial/rfc2217.py
|
141
|
#! python
#
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# see __init__.py
#
# This module implements a RFC2217 compatible client. RF2217 descibes a
# protocol to access serial ports over TCP/IP and allows setting the baud rate,
# modem control lines etc.
#
# (C) 2001-2013 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
# TODO:
# - setting control line -> answer is not checked (had problems with one of the
# severs). consider implementing a compatibility mode flag to make check
# conditional
# - write timeout not implemented at all
##############################################################################
# observations and issues with servers
#=============================================================================
# sredird V2.2.1
# - http://www.ibiblio.org/pub/Linux/system/serial/ sredird-2.2.2.tar.gz
# - does not acknowledge SET_CONTROL (RTS/DTR) correctly, always responding
# [105 1] instead of the actual value.
# - SET_BAUDRATE answer contains 4 extra null bytes -> probably for larger
# numbers than 2**32?
# - To get the signature [COM_PORT_OPTION 0] has to be sent.
# - run a server: while true; do nc -l -p 7000 -c "sredird debug /dev/ttyUSB0 /var/lock/sredir"; done
#=============================================================================
# telnetcpcd (untested)
# - http://ftp.wayne.edu/kermit/sredird/telnetcpcd-1.09.tar.gz
# - To get the signature [COM_PORT_OPTION] w/o data has to be sent.
#=============================================================================
# ser2net
# - does not negotiate BINARY or COM_PORT_OPTION for his side but at least
# acknowledges that the client activates these options
# - The configuration may be that the server prints a banner. As this client
# implementation does a flushInput on connect, this banner is hidden from
# the user application.
# - NOTIFY_MODEMSTATE: the poll interval of the server seems to be one
# second.
# - To get the signature [COM_PORT_OPTION 0] has to be sent.
# - run a server: run ser2net daemon, in /etc/ser2net.conf:
# 2000:telnet:0:/dev/ttyS0:9600 remctl banner
##############################################################################
# How to identify ports? pySerial might want to support other protocols in the
# future, so lets use an URL scheme.
# for RFC2217 compliant servers we will use this:
# rfc2217://<host>:<port>[/option[/option...]]
#
# options:
# - "debug" print diagnostic messages
# - "ign_set_control": do not look at the answers to SET_CONTROL
# - "poll_modem": issue NOTIFY_MODEMSTATE requests when CTS/DTR/RI/CD is read.
# Without this option it expects that the server sends notifications
# automatically on change (which most servers do and is according to the
# RFC).
# the order of the options is not relevant
from serial.serialutil import *
import time
import struct
import socket
import threading
import Queue
import logging
# port string is expected to be something like this:
# rfc2217://host:port
# host may be an IP or including domain, whatever.
# port is 0...65535
# map log level names to constants. used in fromURL()
LOGGER_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
}
# telnet protocol characters
IAC = to_bytes([255]) # Interpret As Command
DONT = to_bytes([254])
DO = to_bytes([253])
WONT = to_bytes([252])
WILL = to_bytes([251])
IAC_DOUBLED = to_bytes([IAC, IAC])
SE = to_bytes([240]) # Subnegotiation End
NOP = to_bytes([241]) # No Operation
DM = to_bytes([242]) # Data Mark
BRK = to_bytes([243]) # Break
IP = to_bytes([244]) # Interrupt process
AO = to_bytes([245]) # Abort output
AYT = to_bytes([246]) # Are You There
EC = to_bytes([247]) # Erase Character
EL = to_bytes([248]) # Erase Line
GA = to_bytes([249]) # Go Ahead
SB = to_bytes([250]) # Subnegotiation Begin
# selected telnet options
BINARY = to_bytes([0]) # 8-bit data path
ECHO = to_bytes([1]) # echo
SGA = to_bytes([3]) # suppress go ahead
# RFC2217
COM_PORT_OPTION = to_bytes([44])
# Client to Access Server
SET_BAUDRATE = to_bytes([1])
SET_DATASIZE = to_bytes([2])
SET_PARITY = to_bytes([3])
SET_STOPSIZE = to_bytes([4])
SET_CONTROL = to_bytes([5])
NOTIFY_LINESTATE = to_bytes([6])
NOTIFY_MODEMSTATE = to_bytes([7])
FLOWCONTROL_SUSPEND = to_bytes([8])
FLOWCONTROL_RESUME = to_bytes([9])
SET_LINESTATE_MASK = to_bytes([10])
SET_MODEMSTATE_MASK = to_bytes([11])
PURGE_DATA = to_bytes([12])
SERVER_SET_BAUDRATE = to_bytes([101])
SERVER_SET_DATASIZE = to_bytes([102])
SERVER_SET_PARITY = to_bytes([103])
SERVER_SET_STOPSIZE = to_bytes([104])
SERVER_SET_CONTROL = to_bytes([105])
SERVER_NOTIFY_LINESTATE = to_bytes([106])
SERVER_NOTIFY_MODEMSTATE = to_bytes([107])
SERVER_FLOWCONTROL_SUSPEND = to_bytes([108])
SERVER_FLOWCONTROL_RESUME = to_bytes([109])
SERVER_SET_LINESTATE_MASK = to_bytes([110])
SERVER_SET_MODEMSTATE_MASK = to_bytes([111])
SERVER_PURGE_DATA = to_bytes([112])
RFC2217_ANSWER_MAP = {
SET_BAUDRATE: SERVER_SET_BAUDRATE,
SET_DATASIZE: SERVER_SET_DATASIZE,
SET_PARITY: SERVER_SET_PARITY,
SET_STOPSIZE: SERVER_SET_STOPSIZE,
SET_CONTROL: SERVER_SET_CONTROL,
NOTIFY_LINESTATE: SERVER_NOTIFY_LINESTATE,
NOTIFY_MODEMSTATE: SERVER_NOTIFY_MODEMSTATE,
FLOWCONTROL_SUSPEND: SERVER_FLOWCONTROL_SUSPEND,
FLOWCONTROL_RESUME: SERVER_FLOWCONTROL_RESUME,
SET_LINESTATE_MASK: SERVER_SET_LINESTATE_MASK,
SET_MODEMSTATE_MASK: SERVER_SET_MODEMSTATE_MASK,
PURGE_DATA: SERVER_PURGE_DATA,
}
SET_CONTROL_REQ_FLOW_SETTING = to_bytes([0]) # Request Com Port Flow Control Setting (outbound/both)
SET_CONTROL_USE_NO_FLOW_CONTROL = to_bytes([1]) # Use No Flow Control (outbound/both)
SET_CONTROL_USE_SW_FLOW_CONTROL = to_bytes([2]) # Use XON/XOFF Flow Control (outbound/both)
SET_CONTROL_USE_HW_FLOW_CONTROL = to_bytes([3]) # Use HARDWARE Flow Control (outbound/both)
SET_CONTROL_REQ_BREAK_STATE = to_bytes([4]) # Request BREAK State
SET_CONTROL_BREAK_ON = to_bytes([5]) # Set BREAK State ON
SET_CONTROL_BREAK_OFF = to_bytes([6]) # Set BREAK State OFF
SET_CONTROL_REQ_DTR = to_bytes([7]) # Request DTR Signal State
SET_CONTROL_DTR_ON = to_bytes([8]) # Set DTR Signal State ON
SET_CONTROL_DTR_OFF = to_bytes([9]) # Set DTR Signal State OFF
SET_CONTROL_REQ_RTS = to_bytes([10]) # Request RTS Signal State
SET_CONTROL_RTS_ON = to_bytes([11]) # Set RTS Signal State ON
SET_CONTROL_RTS_OFF = to_bytes([12]) # Set RTS Signal State OFF
SET_CONTROL_REQ_FLOW_SETTING_IN = to_bytes([13]) # Request Com Port Flow Control Setting (inbound)
SET_CONTROL_USE_NO_FLOW_CONTROL_IN = to_bytes([14]) # Use No Flow Control (inbound)
SET_CONTROL_USE_SW_FLOW_CONTOL_IN = to_bytes([15]) # Use XON/XOFF Flow Control (inbound)
SET_CONTROL_USE_HW_FLOW_CONTOL_IN = to_bytes([16]) # Use HARDWARE Flow Control (inbound)
SET_CONTROL_USE_DCD_FLOW_CONTROL = to_bytes([17]) # Use DCD Flow Control (outbound/both)
SET_CONTROL_USE_DTR_FLOW_CONTROL = to_bytes([18]) # Use DTR Flow Control (inbound)
SET_CONTROL_USE_DSR_FLOW_CONTROL = to_bytes([19]) # Use DSR Flow Control (outbound/both)
LINESTATE_MASK_TIMEOUT = 128 # Time-out Error
LINESTATE_MASK_SHIFTREG_EMPTY = 64 # Transfer Shift Register Empty
LINESTATE_MASK_TRANSREG_EMPTY = 32 # Transfer Holding Register Empty
LINESTATE_MASK_BREAK_DETECT = 16 # Break-detect Error
LINESTATE_MASK_FRAMING_ERROR = 8 # Framing Error
LINESTATE_MASK_PARTIY_ERROR = 4 # Parity Error
LINESTATE_MASK_OVERRUN_ERROR = 2 # Overrun Error
LINESTATE_MASK_DATA_READY = 1 # Data Ready
MODEMSTATE_MASK_CD = 128 # Receive Line Signal Detect (also known as Carrier Detect)
MODEMSTATE_MASK_RI = 64 # Ring Indicator
MODEMSTATE_MASK_DSR = 32 # Data-Set-Ready Signal State
MODEMSTATE_MASK_CTS = 16 # Clear-To-Send Signal State
MODEMSTATE_MASK_CD_CHANGE = 8 # Delta Receive Line Signal Detect
MODEMSTATE_MASK_RI_CHANGE = 4 # Trailing-edge Ring Detector
MODEMSTATE_MASK_DSR_CHANGE = 2 # Delta Data-Set-Ready
MODEMSTATE_MASK_CTS_CHANGE = 1 # Delta Clear-To-Send
PURGE_RECEIVE_BUFFER = to_bytes([1]) # Purge access server receive data buffer
PURGE_TRANSMIT_BUFFER = to_bytes([2]) # Purge access server transmit data buffer
PURGE_BOTH_BUFFERS = to_bytes([3]) # Purge both the access server receive data buffer and the access server transmit data buffer
RFC2217_PARITY_MAP = {
PARITY_NONE: 1,
PARITY_ODD: 2,
PARITY_EVEN: 3,
PARITY_MARK: 4,
PARITY_SPACE: 5,
}
RFC2217_REVERSE_PARITY_MAP = dict((v,k) for k,v in RFC2217_PARITY_MAP.items())
RFC2217_STOPBIT_MAP = {
STOPBITS_ONE: 1,
STOPBITS_ONE_POINT_FIVE: 3,
STOPBITS_TWO: 2,
}
RFC2217_REVERSE_STOPBIT_MAP = dict((v,k) for k,v in RFC2217_STOPBIT_MAP.items())
# Telnet filter states
M_NORMAL = 0
M_IAC_SEEN = 1
M_NEGOTIATE = 2
# TelnetOption and TelnetSubnegotiation states
REQUESTED = 'REQUESTED'
ACTIVE = 'ACTIVE'
INACTIVE = 'INACTIVE'
REALLY_INACTIVE = 'REALLY_INACTIVE'
class TelnetOption(object):
"""Manage a single telnet option, keeps track of DO/DONT WILL/WONT."""
def __init__(self, connection, name, option, send_yes, send_no, ack_yes, ack_no, initial_state, activation_callback=None):
"""\
Initialize option.
:param connection: connection used to transmit answers
:param name: a readable name for debug outputs
:param send_yes: what to send when option is to be enabled.
:param send_no: what to send when option is to be disabled.
:param ack_yes: what to expect when remote agrees on option.
:param ack_no: what to expect when remote disagrees on option.
:param initial_state: options initialized with REQUESTED are tried to
be enabled on startup. use INACTIVE for all others.
"""
self.connection = connection
self.name = name
self.option = option
self.send_yes = send_yes
self.send_no = send_no
self.ack_yes = ack_yes
self.ack_no = ack_no
self.state = initial_state
self.active = False
self.activation_callback = activation_callback
def __repr__(self):
"""String for debug outputs"""
return "%s:%s(%s)" % (self.name, self.active, self.state)
def process_incoming(self, command):
"""A DO/DONT/WILL/WONT was received for this option, update state and
answer when needed."""
if command == self.ack_yes:
if self.state is REQUESTED:
self.state = ACTIVE
self.active = True
if self.activation_callback is not None:
self.activation_callback()
elif self.state is ACTIVE:
pass
elif self.state is INACTIVE:
self.state = ACTIVE
self.connection.telnetSendOption(self.send_yes, self.option)
self.active = True
if self.activation_callback is not None:
self.activation_callback()
elif self.state is REALLY_INACTIVE:
self.connection.telnetSendOption(self.send_no, self.option)
else:
raise ValueError('option in illegal state %r' % self)
elif command == self.ack_no:
if self.state is REQUESTED:
self.state = INACTIVE
self.active = False
elif self.state is ACTIVE:
self.state = INACTIVE
self.connection.telnetSendOption(self.send_no, self.option)
self.active = False
elif self.state is INACTIVE:
pass
elif self.state is REALLY_INACTIVE:
pass
else:
raise ValueError('option in illegal state %r' % self)
class TelnetSubnegotiation(object):
"""\
A object to handle subnegotiation of options. In this case actually
sub-sub options for RFC 2217. It is used to track com port options.
"""
def __init__(self, connection, name, option, ack_option=None):
if ack_option is None: ack_option = option
self.connection = connection
self.name = name
self.option = option
self.value = None
self.ack_option = ack_option
self.state = INACTIVE
def __repr__(self):
"""String for debug outputs."""
return "%s:%s" % (self.name, self.state)
def set(self, value):
"""\
request a change of the value. a request is sent to the server. if
the client needs to know if the change is performed he has to check the
state of this object.
"""
self.value = value
self.state = REQUESTED
self.connection.rfc2217SendSubnegotiation(self.option, self.value)
if self.connection.logger:
self.connection.logger.debug("SB Requesting %s -> %r" % (self.name, self.value))
def isReady(self):
"""\
check if answer from server has been received. when server rejects
the change, raise a ValueError.
"""
if self.state == REALLY_INACTIVE:
raise ValueError("remote rejected value for option %r" % (self.name))
return self.state == ACTIVE
# add property to have a similar interface as TelnetOption
active = property(isReady)
def wait(self, timeout=3):
"""\
wait until the subnegotiation has been acknowledged or timeout. It
can also throw a value error when the answer from the server does not
match the value sent.
"""
timeout_time = time.time() + timeout
while time.time() < timeout_time:
time.sleep(0.05) # prevent 100% CPU load
if self.isReady():
break
else:
raise SerialException("timeout while waiting for option %r" % (self.name))
def checkAnswer(self, suboption):
"""\
check an incoming subnegotiation block. the parameter already has
cut off the header like sub option number and com port option value.
"""
if self.value == suboption[:len(self.value)]:
self.state = ACTIVE
else:
# error propagation done in isReady
self.state = REALLY_INACTIVE
if self.connection.logger:
self.connection.logger.debug("SB Answer %s -> %r -> %s" % (self.name, suboption, self.state))
class RFC2217Serial(SerialBase):
"""Serial port implementation for RFC 2217 remote serial ports."""
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200)
def open(self):
"""\
Open port with current settings. This may throw a SerialException
if the port cannot be opened.
"""
self.logger = None
self._ignore_set_control_answer = False
self._poll_modem_state = False
self._network_timeout = 3
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self._isOpen:
raise SerialException("Port is already open.")
try:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect(self.fromURL(self.portstr))
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except Exception, msg:
self._socket = None
raise SerialException("Could not open port %s: %s" % (self.portstr, msg))
self._socket.settimeout(5) # XXX good value?
# use a thread save queue as buffer. it also simplifies implementing
# the read timeout
self._read_buffer = Queue.Queue()
# to ensure that user writes does not interfere with internal
# telnet/rfc2217 options establish a lock
self._write_lock = threading.Lock()
# name the following separately so that, below, a check can be easily done
mandadory_options = [
TelnetOption(self, 'we-BINARY', BINARY, WILL, WONT, DO, DONT, INACTIVE),
TelnetOption(self, 'we-RFC2217', COM_PORT_OPTION, WILL, WONT, DO, DONT, REQUESTED),
]
# all supported telnet options
self._telnet_options = [
TelnetOption(self, 'ECHO', ECHO, DO, DONT, WILL, WONT, REQUESTED),
TelnetOption(self, 'we-SGA', SGA, WILL, WONT, DO, DONT, REQUESTED),
TelnetOption(self, 'they-SGA', SGA, DO, DONT, WILL, WONT, REQUESTED),
TelnetOption(self, 'they-BINARY', BINARY, DO, DONT, WILL, WONT, INACTIVE),
TelnetOption(self, 'they-RFC2217', COM_PORT_OPTION, DO, DONT, WILL, WONT, REQUESTED),
] + mandadory_options
# RFC 2217 specific states
# COM port settings
self._rfc2217_port_settings = {
'baudrate': TelnetSubnegotiation(self, 'baudrate', SET_BAUDRATE, SERVER_SET_BAUDRATE),
'datasize': TelnetSubnegotiation(self, 'datasize', SET_DATASIZE, SERVER_SET_DATASIZE),
'parity': TelnetSubnegotiation(self, 'parity', SET_PARITY, SERVER_SET_PARITY),
'stopsize': TelnetSubnegotiation(self, 'stopsize', SET_STOPSIZE, SERVER_SET_STOPSIZE),
}
# There are more subnegotiation objects, combine all in one dictionary
# for easy access
self._rfc2217_options = {
'purge': TelnetSubnegotiation(self, 'purge', PURGE_DATA, SERVER_PURGE_DATA),
'control': TelnetSubnegotiation(self, 'control', SET_CONTROL, SERVER_SET_CONTROL),
}
self._rfc2217_options.update(self._rfc2217_port_settings)
# cache for line and modem states that the server sends to us
self._linestate = 0
self._modemstate = None
self._modemstate_expires = 0
# RFC 2217 flow control between server and client
self._remote_suspend_flow = False
self._thread = threading.Thread(target=self._telnetReadLoop)
self._thread.setDaemon(True)
self._thread.setName('pySerial RFC 2217 reader thread for %s' % (self._port,))
self._thread.start()
# negotiate Telnet/RFC 2217 -> send initial requests
for option in self._telnet_options:
if option.state is REQUESTED:
self.telnetSendOption(option.send_yes, option.option)
# now wait until important options are negotiated
timeout_time = time.time() + self._network_timeout
while time.time() < timeout_time:
time.sleep(0.05) # prevent 100% CPU load
if sum(o.active for o in mandadory_options) == len(mandadory_options):
break
else:
raise SerialException("Remote does not seem to support RFC2217 or BINARY mode %r" % mandadory_options)
if self.logger:
self.logger.info("Negotiated options: %s" % self._telnet_options)
# fine, go on, set RFC 2271 specific things
self._reconfigurePort()
# all things set up get, now a clean start
self._isOpen = True
if not self._rtscts:
self.setRTS(True)
self.setDTR(True)
self.flushInput()
self.flushOutput()
def _reconfigurePort(self):
"""Set communication parameters on opened port."""
if self._socket is None:
raise SerialException("Can only operate on open ports")
# if self._timeout != 0 and self._interCharTimeout is not None:
# XXX
if self._writeTimeout is not None:
raise NotImplementedError('writeTimeout is currently not supported')
# XXX
# Setup the connection
# to get good performance, all parameter changes are sent first...
if not isinstance(self._baudrate, (int, long)) or not 0 < self._baudrate < 2**32:
raise ValueError("invalid baudrate: %r" % (self._baudrate))
self._rfc2217_port_settings['baudrate'].set(struct.pack('!I', self._baudrate))
self._rfc2217_port_settings['datasize'].set(struct.pack('!B', self._bytesize))
self._rfc2217_port_settings['parity'].set(struct.pack('!B', RFC2217_PARITY_MAP[self._parity]))
self._rfc2217_port_settings['stopsize'].set(struct.pack('!B', RFC2217_STOPBIT_MAP[self._stopbits]))
# and now wait until parameters are active
items = self._rfc2217_port_settings.values()
if self.logger:
self.logger.debug("Negotiating settings: %s" % (items,))
timeout_time = time.time() + self._network_timeout
while time.time() < timeout_time:
time.sleep(0.05) # prevent 100% CPU load
if sum(o.active for o in items) == len(items):
break
else:
raise SerialException("Remote does not accept parameter change (RFC2217): %r" % items)
if self.logger:
self.logger.info("Negotiated settings: %s" % (items,))
if self._rtscts and self._xonxoff:
raise ValueError('xonxoff and rtscts together are not supported')
elif self._rtscts:
self.rfc2217SetControl(SET_CONTROL_USE_HW_FLOW_CONTROL)
elif self._xonxoff:
self.rfc2217SetControl(SET_CONTROL_USE_SW_FLOW_CONTROL)
else:
self.rfc2217SetControl(SET_CONTROL_USE_NO_FLOW_CONTROL)
def close(self):
"""Close port"""
if self._isOpen:
if self._socket:
try:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
except:
# ignore errors.
pass
self._socket = None
if self._thread:
self._thread.join()
self._isOpen = False
# in case of quick reconnects, give the server some time
time.sleep(0.3)
def makeDeviceName(self, port):
raise SerialException("there is no sensible way to turn numbers into URLs")
def fromURL(self, url):
"""extract host and port from an URL string"""
if url.lower().startswith("rfc2217://"): url = url[10:]
try:
# is there a "path" (our options)?
if '/' in url:
# cut away options
url, options = url.split('/', 1)
# process options now, directly altering self
for option in options.split('/'):
if '=' in option:
option, value = option.split('=', 1)
else:
value = None
if option == 'logging':
logging.basicConfig() # XXX is that good to call it here?
self.logger = logging.getLogger('pySerial.rfc2217')
self.logger.setLevel(LOGGER_LEVELS[value])
self.logger.debug('enabled logging')
elif option == 'ign_set_control':
self._ignore_set_control_answer = True
elif option == 'poll_modem':
self._poll_modem_state = True
elif option == 'timeout':
self._network_timeout = float(value)
else:
raise ValueError('unknown option: %r' % (option,))
# get host and port
host, port = url.split(':', 1) # may raise ValueError because of unpacking
port = int(port) # and this if it's not a number
if not 0 <= port < 65536: raise ValueError("port not in range 0...65535")
except ValueError, e:
raise SerialException('expected a string in the form "[rfc2217://]<host>:<port>[/option[/option...]]": %s' % e)
return (host, port)
# - - - - - - - - - - - - - - - - - - - - - - - -
def inWaiting(self):
"""Return the number of characters currently in the input buffer."""
if not self._isOpen: raise portNotOpenError
return self._read_buffer.qsize()
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self._isOpen: raise portNotOpenError
data = bytearray()
try:
while len(data) < size:
if self._thread is None:
raise SerialException('connection failed (reader thread died)')
data.append(self._read_buffer.get(True, self._timeout))
except Queue.Empty: # -> timeout
pass
return bytes(data)
def write(self, data):
"""\
Output the given string over the serial port. Can block if the
connection is blocked. May raise SerialException if the connection is
closed.
"""
if not self._isOpen: raise portNotOpenError
self._write_lock.acquire()
try:
try:
self._socket.sendall(to_bytes(data).replace(IAC, IAC_DOUBLED))
except socket.error, e:
raise SerialException("connection failed (socket error): %s" % e) # XXX what exception if socket connection fails
finally:
self._write_lock.release()
return len(data)
def flushInput(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self._isOpen: raise portNotOpenError
self.rfc2217SendPurge(PURGE_RECEIVE_BUFFER)
# empty read buffer
while self._read_buffer.qsize():
self._read_buffer.get(False)
def flushOutput(self):
"""\
Clear output buffer, aborting the current output and
discarding all that is in the buffer.
"""
if not self._isOpen: raise portNotOpenError
self.rfc2217SendPurge(PURGE_TRANSMIT_BUFFER)
def sendBreak(self, duration=0.25):
"""Send break condition. Timed, returns to idle state after given
duration."""
if not self._isOpen: raise portNotOpenError
self.setBreak(True)
time.sleep(duration)
self.setBreak(False)
def setBreak(self, level=True):
"""\
Set break: Controls TXD. When active, to transmitting is
possible.
"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('set BREAK to %s' % ('inactive', 'active')[bool(level)])
if level:
self.rfc2217SetControl(SET_CONTROL_BREAK_ON)
else:
self.rfc2217SetControl(SET_CONTROL_BREAK_OFF)
def setRTS(self, level=True):
"""Set terminal status line: Request To Send."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('set RTS to %s' % ('inactive', 'active')[bool(level)])
if level:
self.rfc2217SetControl(SET_CONTROL_RTS_ON)
else:
self.rfc2217SetControl(SET_CONTROL_RTS_OFF)
def setDTR(self, level=True):
"""Set terminal status line: Data Terminal Ready."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('set DTR to %s' % ('inactive', 'active')[bool(level)])
if level:
self.rfc2217SetControl(SET_CONTROL_DTR_ON)
else:
self.rfc2217SetControl(SET_CONTROL_DTR_OFF)
def getCTS(self):
"""Read terminal status line: Clear To Send."""
if not self._isOpen: raise portNotOpenError
return bool(self.getModemState() & MODEMSTATE_MASK_CTS)
def getDSR(self):
"""Read terminal status line: Data Set Ready."""
if not self._isOpen: raise portNotOpenError
return bool(self.getModemState() & MODEMSTATE_MASK_DSR)
def getRI(self):
"""Read terminal status line: Ring Indicator."""
if not self._isOpen: raise portNotOpenError
return bool(self.getModemState() & MODEMSTATE_MASK_RI)
def getCD(self):
"""Read terminal status line: Carrier Detect."""
if not self._isOpen: raise portNotOpenError
return bool(self.getModemState() & MODEMSTATE_MASK_CD)
# - - - platform specific - - -
# None so far
# - - - RFC2217 specific - - -
def _telnetReadLoop(self):
"""read loop for the socket."""
mode = M_NORMAL
suboption = None
try:
while self._socket is not None:
try:
data = self._socket.recv(1024)
except socket.timeout:
# just need to get out of recv form time to time to check if
# still alive
continue
except socket.error, e:
# connection fails -> terminate loop
if self.logger:
self.logger.debug("socket error in reader thread: %s" % (e,))
break
if not data: break # lost connection
for byte in data:
if mode == M_NORMAL:
# interpret as command or as data
if byte == IAC:
mode = M_IAC_SEEN
else:
# store data in read buffer or sub option buffer
# depending on state
if suboption is not None:
suboption.append(byte)
else:
self._read_buffer.put(byte)
elif mode == M_IAC_SEEN:
if byte == IAC:
# interpret as command doubled -> insert character
# itself
if suboption is not None:
suboption.append(IAC)
else:
self._read_buffer.put(IAC)
mode = M_NORMAL
elif byte == SB:
# sub option start
suboption = bytearray()
mode = M_NORMAL
elif byte == SE:
# sub option end -> process it now
self._telnetProcessSubnegotiation(bytes(suboption))
suboption = None
mode = M_NORMAL
elif byte in (DO, DONT, WILL, WONT):
# negotiation
telnet_command = byte
mode = M_NEGOTIATE
else:
# other telnet commands
self._telnetProcessCommand(byte)
mode = M_NORMAL
elif mode == M_NEGOTIATE: # DO, DONT, WILL, WONT was received, option now following
self._telnetNegotiateOption(telnet_command, byte)
mode = M_NORMAL
finally:
self._thread = None
if self.logger:
self.logger.debug("read thread terminated")
# - incoming telnet commands and options
def _telnetProcessCommand(self, command):
"""Process commands other than DO, DONT, WILL, WONT."""
# Currently none. RFC2217 only uses negotiation and subnegotiation.
if self.logger:
self.logger.warning("ignoring Telnet command: %r" % (command,))
def _telnetNegotiateOption(self, command, option):
"""Process incoming DO, DONT, WILL, WONT."""
# check our registered telnet options and forward command to them
# they know themselves if they have to answer or not
known = False
for item in self._telnet_options:
# can have more than one match! as some options are duplicated for
# 'us' and 'them'
if item.option == option:
item.process_incoming(command)
known = True
if not known:
# handle unknown options
# only answer to positive requests and deny them
if command == WILL or command == DO:
self.telnetSendOption((command == WILL and DONT or WONT), option)
if self.logger:
self.logger.warning("rejected Telnet option: %r" % (option,))
def _telnetProcessSubnegotiation(self, suboption):
"""Process subnegotiation, the data between IAC SB and IAC SE."""
if suboption[0:1] == COM_PORT_OPTION:
if suboption[1:2] == SERVER_NOTIFY_LINESTATE and len(suboption) >= 3:
self._linestate = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("NOTIFY_LINESTATE: %s" % self._linestate)
elif suboption[1:2] == SERVER_NOTIFY_MODEMSTATE and len(suboption) >= 3:
self._modemstate = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("NOTIFY_MODEMSTATE: %s" % self._modemstate)
# update time when we think that a poll would make sense
self._modemstate_expires = time.time() + 0.3
elif suboption[1:2] == FLOWCONTROL_SUSPEND:
self._remote_suspend_flow = True
elif suboption[1:2] == FLOWCONTROL_RESUME:
self._remote_suspend_flow = False
else:
for item in self._rfc2217_options.values():
if item.ack_option == suboption[1:2]:
#~ print "processing COM_PORT_OPTION: %r" % list(suboption[1:])
item.checkAnswer(bytes(suboption[2:]))
break
else:
if self.logger:
self.logger.warning("ignoring COM_PORT_OPTION: %r" % (suboption,))
else:
if self.logger:
self.logger.warning("ignoring subnegotiation: %r" % (suboption,))
# - outgoing telnet commands and options
def _internal_raw_write(self, data):
"""internal socket write with no data escaping. used to send telnet stuff."""
self._write_lock.acquire()
try:
self._socket.sendall(data)
finally:
self._write_lock.release()
def telnetSendOption(self, action, option):
"""Send DO, DONT, WILL, WONT."""
self._internal_raw_write(to_bytes([IAC, action, option]))
def rfc2217SendSubnegotiation(self, option, value=''):
"""Subnegotiation of RFC2217 parameters."""
value = value.replace(IAC, IAC_DOUBLED)
self._internal_raw_write(to_bytes([IAC, SB, COM_PORT_OPTION, option] + list(value) + [IAC, SE]))
def rfc2217SendPurge(self, value):
item = self._rfc2217_options['purge']
item.set(value) # transmit desired purge type
item.wait(self._network_timeout) # wait for acknowledge from the server
def rfc2217SetControl(self, value):
item = self._rfc2217_options['control']
item.set(value) # transmit desired control type
if self._ignore_set_control_answer:
# answers are ignored when option is set. compatibility mode for
# servers that answer, but not the expected one... (or no answer
# at all) i.e. sredird
time.sleep(0.1) # this helps getting the unit tests passed
else:
item.wait(self._network_timeout) # wait for acknowledge from the server
def rfc2217FlowServerReady(self):
"""\
check if server is ready to receive data. block for some time when
not.
"""
#~ if self._remote_suspend_flow:
#~ wait---
def getModemState(self):
"""\
get last modem state (cached value. if value is "old", request a new
one. this cache helps that we don't issue to many requests when e.g. all
status lines, one after the other is queried by te user (getCTS, getDSR
etc.)
"""
# active modem state polling enabled? is the value fresh enough?
if self._poll_modem_state and self._modemstate_expires < time.time():
if self.logger:
self.logger.debug('polling modem state')
# when it is older, request an update
self.rfc2217SendSubnegotiation(NOTIFY_MODEMSTATE)
timeout_time = time.time() + self._network_timeout
while time.time() < timeout_time:
time.sleep(0.05) # prevent 100% CPU load
# when expiration time is updated, it means that there is a new
# value
if self._modemstate_expires > time.time():
if self.logger:
self.logger.warning('poll for modem state failed')
break
# even when there is a timeout, do not generate an error just
# return the last known value. this way we can support buggy
# servers that do not respond to polls, but send automatic
# updates.
if self._modemstate is not None:
if self.logger:
self.logger.debug('using cached modem state')
return self._modemstate
else:
# never received a notification from the server
raise SerialException("remote sends no NOTIFY_MODEMSTATE")
# assemble Serial class with the platform specific implementation and the base
# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
# library, derive from io.RawIOBase
try:
import io
except ImportError:
# classic version with our own file-like emulation
class Serial(RFC2217Serial, FileLike):
pass
else:
# io library present
class Serial(RFC2217Serial, io.RawIOBase):
pass
#############################################################################
# The following is code that helps implementing an RFC 2217 server.
class PortManager(object):
"""\
This class manages the state of Telnet and RFC 2217. It needs a serial
instance and a connection to work with. Connection is expected to implement
a (thread safe) write function, that writes the string to the network.
"""
def __init__(self, serial_port, connection, logger=None):
self.serial = serial_port
self.connection = connection
self.logger = logger
self._client_is_rfc2217 = False
# filter state machine
self.mode = M_NORMAL
self.suboption = None
self.telnet_command = None
# states for modem/line control events
self.modemstate_mask = 255
self.last_modemstate = None
self.linstate_mask = 0
# all supported telnet options
self._telnet_options = [
TelnetOption(self, 'ECHO', ECHO, WILL, WONT, DO, DONT, REQUESTED),
TelnetOption(self, 'we-SGA', SGA, WILL, WONT, DO, DONT, REQUESTED),
TelnetOption(self, 'they-SGA', SGA, DO, DONT, WILL, WONT, INACTIVE),
TelnetOption(self, 'we-BINARY', BINARY, WILL, WONT, DO, DONT, INACTIVE),
TelnetOption(self, 'they-BINARY', BINARY, DO, DONT, WILL, WONT, REQUESTED),
TelnetOption(self, 'we-RFC2217', COM_PORT_OPTION, WILL, WONT, DO, DONT, REQUESTED, self._client_ok),
TelnetOption(self, 'they-RFC2217', COM_PORT_OPTION, DO, DONT, WILL, WONT, INACTIVE, self._client_ok),
]
# negotiate Telnet/RFC2217 -> send initial requests
if self.logger:
self.logger.debug("requesting initial Telnet/RFC 2217 options")
for option in self._telnet_options:
if option.state is REQUESTED:
self.telnetSendOption(option.send_yes, option.option)
# issue 1st modem state notification
def _client_ok(self):
"""\
callback of telnet option. it gets called when option is activated.
this one here is used to detect when the client agrees on RFC 2217. a
flag is set so that other functions like check_modem_lines know if the
client is ok.
"""
# The callback is used for we and they so if one party agrees, we're
# already happy. it seems not all servers do the negotiation correctly
# and i guess there are incorrect clients too.. so be happy if client
# answers one or the other positively.
self._client_is_rfc2217 = True
if self.logger:
self.logger.info("client accepts RFC 2217")
# this is to ensure that the client gets a notification, even if there
# was no change
self.check_modem_lines(force_notification=True)
# - outgoing telnet commands and options
def telnetSendOption(self, action, option):
"""Send DO, DONT, WILL, WONT."""
self.connection.write(to_bytes([IAC, action, option]))
def rfc2217SendSubnegotiation(self, option, value=''):
"""Subnegotiation of RFC 2217 parameters."""
value = value.replace(IAC, IAC_DOUBLED)
self.connection.write(to_bytes([IAC, SB, COM_PORT_OPTION, option] + list(value) + [IAC, SE]))
# - check modem lines, needs to be called periodically from user to
# establish polling
def check_modem_lines(self, force_notification=False):
modemstate = (
(self.serial.getCTS() and MODEMSTATE_MASK_CTS) |
(self.serial.getDSR() and MODEMSTATE_MASK_DSR) |
(self.serial.getRI() and MODEMSTATE_MASK_RI) |
(self.serial.getCD() and MODEMSTATE_MASK_CD)
)
# check what has changed
deltas = modemstate ^ (self.last_modemstate or 0) # when last is None -> 0
if deltas & MODEMSTATE_MASK_CTS:
modemstate |= MODEMSTATE_MASK_CTS_CHANGE
if deltas & MODEMSTATE_MASK_DSR:
modemstate |= MODEMSTATE_MASK_DSR_CHANGE
if deltas & MODEMSTATE_MASK_RI:
modemstate |= MODEMSTATE_MASK_RI_CHANGE
if deltas & MODEMSTATE_MASK_CD:
modemstate |= MODEMSTATE_MASK_CD_CHANGE
# if new state is different and the mask allows this change, send
# notification. suppress notifications when client is not rfc2217
if modemstate != self.last_modemstate or force_notification:
if (self._client_is_rfc2217 and (modemstate & self.modemstate_mask)) or force_notification:
self.rfc2217SendSubnegotiation(
SERVER_NOTIFY_MODEMSTATE,
to_bytes([modemstate & self.modemstate_mask])
)
if self.logger:
self.logger.info("NOTIFY_MODEMSTATE: %s" % (modemstate,))
# save last state, but forget about deltas.
# otherwise it would also notify about changing deltas which is
# probably not very useful
self.last_modemstate = modemstate & 0xf0
# - outgoing data escaping
def escape(self, data):
"""\
this generator function is for the user. all outgoing data has to be
properly escaped, so that no IAC character in the data stream messes up
the Telnet state machine in the server.
socket.sendall(escape(data))
"""
for byte in data:
if byte == IAC:
yield IAC
yield IAC
else:
yield byte
# - incoming data filter
def filter(self, data):
"""\
handle a bunch of incoming bytes. this is a generator. it will yield
all characters not of interest for Telnet/RFC 2217.
The idea is that the reader thread pushes data from the socket through
this filter:
for byte in filter(socket.recv(1024)):
# do things like CR/LF conversion/whatever
# and write data to the serial port
serial.write(byte)
(socket error handling code left as exercise for the reader)
"""
for byte in data:
if self.mode == M_NORMAL:
# interpret as command or as data
if byte == IAC:
self.mode = M_IAC_SEEN
else:
# store data in sub option buffer or pass it to our
# consumer depending on state
if self.suboption is not None:
self.suboption.append(byte)
else:
yield byte
elif self.mode == M_IAC_SEEN:
if byte == IAC:
# interpret as command doubled -> insert character
# itself
if self.suboption is not None:
self.suboption.append(byte)
else:
yield byte
self.mode = M_NORMAL
elif byte == SB:
# sub option start
self.suboption = bytearray()
self.mode = M_NORMAL
elif byte == SE:
# sub option end -> process it now
self._telnetProcessSubnegotiation(bytes(self.suboption))
self.suboption = None
self.mode = M_NORMAL
elif byte in (DO, DONT, WILL, WONT):
# negotiation
self.telnet_command = byte
self.mode = M_NEGOTIATE
else:
# other telnet commands
self._telnetProcessCommand(byte)
self.mode = M_NORMAL
elif self.mode == M_NEGOTIATE: # DO, DONT, WILL, WONT was received, option now following
self._telnetNegotiateOption(self.telnet_command, byte)
self.mode = M_NORMAL
# - incoming telnet commands and options
def _telnetProcessCommand(self, command):
"""Process commands other than DO, DONT, WILL, WONT."""
# Currently none. RFC2217 only uses negotiation and subnegotiation.
if self.logger:
self.logger.warning("ignoring Telnet command: %r" % (command,))
def _telnetNegotiateOption(self, command, option):
"""Process incoming DO, DONT, WILL, WONT."""
# check our registered telnet options and forward command to them
# they know themselves if they have to answer or not
known = False
for item in self._telnet_options:
# can have more than one match! as some options are duplicated for
# 'us' and 'them'
if item.option == option:
item.process_incoming(command)
known = True
if not known:
# handle unknown options
# only answer to positive requests and deny them
if command == WILL or command == DO:
self.telnetSendOption((command == WILL and DONT or WONT), option)
if self.logger:
self.logger.warning("rejected Telnet option: %r" % (option,))
def _telnetProcessSubnegotiation(self, suboption):
"""Process subnegotiation, the data between IAC SB and IAC SE."""
if suboption[0:1] == COM_PORT_OPTION:
if self.logger:
self.logger.debug('received COM_PORT_OPTION: %r' % (suboption,))
if suboption[1:2] == SET_BAUDRATE:
backup = self.serial.baudrate
try:
(baudrate,) = struct.unpack("!I", suboption[2:6])
if baudrate != 0:
self.serial.baudrate = baudrate
except ValueError, e:
if self.logger:
self.logger.error("failed to set baud rate: %s" % (e,))
self.serial.baudrate = backup
else:
if self.logger:
self.logger.info("%s baud rate: %s" % (baudrate and 'set' or 'get', self.serial.baudrate))
self.rfc2217SendSubnegotiation(SERVER_SET_BAUDRATE, struct.pack("!I", self.serial.baudrate))
elif suboption[1:2] == SET_DATASIZE:
backup = self.serial.bytesize
try:
(datasize,) = struct.unpack("!B", suboption[2:3])
if datasize != 0:
self.serial.bytesize = datasize
except ValueError, e:
if self.logger:
self.logger.error("failed to set data size: %s" % (e,))
self.serial.bytesize = backup
else:
if self.logger:
self.logger.info("%s data size: %s" % (datasize and 'set' or 'get', self.serial.bytesize))
self.rfc2217SendSubnegotiation(SERVER_SET_DATASIZE, struct.pack("!B", self.serial.bytesize))
elif suboption[1:2] == SET_PARITY:
backup = self.serial.parity
try:
parity = struct.unpack("!B", suboption[2:3])[0]
if parity != 0:
self.serial.parity = RFC2217_REVERSE_PARITY_MAP[parity]
except ValueError, e:
if self.logger:
self.logger.error("failed to set parity: %s" % (e,))
self.serial.parity = backup
else:
if self.logger:
self.logger.info("%s parity: %s" % (parity and 'set' or 'get', self.serial.parity))
self.rfc2217SendSubnegotiation(
SERVER_SET_PARITY,
struct.pack("!B", RFC2217_PARITY_MAP[self.serial.parity])
)
elif suboption[1:2] == SET_STOPSIZE:
backup = self.serial.stopbits
try:
stopbits = struct.unpack("!B", suboption[2:3])[0]
if stopbits != 0:
self.serial.stopbits = RFC2217_REVERSE_STOPBIT_MAP[stopbits]
except ValueError, e:
if self.logger:
self.logger.error("failed to set stop bits: %s" % (e,))
self.serial.stopbits = backup
else:
if self.logger:
self.logger.info("%s stop bits: %s" % (stopbits and 'set' or 'get', self.serial.stopbits))
self.rfc2217SendSubnegotiation(
SERVER_SET_STOPSIZE,
struct.pack("!B", RFC2217_STOPBIT_MAP[self.serial.stopbits])
)
elif suboption[1:2] == SET_CONTROL:
if suboption[2:3] == SET_CONTROL_REQ_FLOW_SETTING:
if self.serial.xonxoff:
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_SW_FLOW_CONTROL)
elif self.serial.rtscts:
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_HW_FLOW_CONTROL)
else:
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_NO_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_USE_NO_FLOW_CONTROL:
self.serial.xonxoff = False
self.serial.rtscts = False
if self.logger:
self.logger.info("changed flow control to None")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_NO_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_USE_SW_FLOW_CONTROL:
self.serial.xonxoff = True
if self.logger:
self.logger.info("changed flow control to XON/XOFF")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_SW_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_USE_HW_FLOW_CONTROL:
self.serial.rtscts = True
if self.logger:
self.logger.info("changed flow control to RTS/CTS")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_HW_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_REQ_BREAK_STATE:
if self.logger:
self.logger.warning("requested break state - not implemented")
pass # XXX needs cached value
elif suboption[2:3] == SET_CONTROL_BREAK_ON:
self.serial.setBreak(True)
if self.logger:
self.logger.info("changed BREAK to active")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_BREAK_ON)
elif suboption[2:3] == SET_CONTROL_BREAK_OFF:
self.serial.setBreak(False)
if self.logger:
self.logger.info("changed BREAK to inactive")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_BREAK_OFF)
elif suboption[2:3] == SET_CONTROL_REQ_DTR:
if self.logger:
self.logger.warning("requested DTR state - not implemented")
pass # XXX needs cached value
elif suboption[2:3] == SET_CONTROL_DTR_ON:
self.serial.setDTR(True)
if self.logger:
self.logger.info("changed DTR to active")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_DTR_ON)
elif suboption[2:3] == SET_CONTROL_DTR_OFF:
self.serial.setDTR(False)
if self.logger:
self.logger.info("changed DTR to inactive")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_DTR_OFF)
elif suboption[2:3] == SET_CONTROL_REQ_RTS:
if self.logger:
self.logger.warning("requested RTS state - not implemented")
pass # XXX needs cached value
#~ self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_ON)
elif suboption[2:3] == SET_CONTROL_RTS_ON:
self.serial.setRTS(True)
if self.logger:
self.logger.info("changed RTS to active")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_ON)
elif suboption[2:3] == SET_CONTROL_RTS_OFF:
self.serial.setRTS(False)
if self.logger:
self.logger.info("changed RTS to inactive")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_OFF)
#~ elif suboption[2:3] == SET_CONTROL_REQ_FLOW_SETTING_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_NO_FLOW_CONTROL_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_SW_FLOW_CONTOL_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_HW_FLOW_CONTOL_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_DCD_FLOW_CONTROL:
#~ elif suboption[2:3] == SET_CONTROL_USE_DTR_FLOW_CONTROL:
#~ elif suboption[2:3] == SET_CONTROL_USE_DSR_FLOW_CONTROL:
elif suboption[1:2] == NOTIFY_LINESTATE:
# client polls for current state
self.rfc2217SendSubnegotiation(
SERVER_NOTIFY_LINESTATE,
to_bytes([0]) # sorry, nothing like that implemented
)
elif suboption[1:2] == NOTIFY_MODEMSTATE:
if self.logger:
self.logger.info("request for modem state")
# client polls for current state
self.check_modem_lines(force_notification=True)
elif suboption[1:2] == FLOWCONTROL_SUSPEND:
if self.logger:
self.logger.info("suspend")
self._remote_suspend_flow = True
elif suboption[1:2] == FLOWCONTROL_RESUME:
if self.logger:
self.logger.info("resume")
self._remote_suspend_flow = False
elif suboption[1:2] == SET_LINESTATE_MASK:
self.linstate_mask = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("line state mask: 0x%02x" % (self.linstate_mask,))
elif suboption[1:2] == SET_MODEMSTATE_MASK:
self.modemstate_mask = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("modem state mask: 0x%02x" % (self.modemstate_mask,))
elif suboption[1:2] == PURGE_DATA:
if suboption[2:3] == PURGE_RECEIVE_BUFFER:
self.serial.flushInput()
if self.logger:
self.logger.info("purge in")
self.rfc2217SendSubnegotiation(SERVER_PURGE_DATA, PURGE_RECEIVE_BUFFER)
elif suboption[2:3] == PURGE_TRANSMIT_BUFFER:
self.serial.flushOutput()
if self.logger:
self.logger.info("purge out")
self.rfc2217SendSubnegotiation(SERVER_PURGE_DATA, PURGE_TRANSMIT_BUFFER)
elif suboption[2:3] == PURGE_BOTH_BUFFERS:
self.serial.flushInput()
self.serial.flushOutput()
if self.logger:
self.logger.info("purge both")
self.rfc2217SendSubnegotiation(SERVER_PURGE_DATA, PURGE_BOTH_BUFFERS)
else:
if self.logger:
self.logger.error("undefined PURGE_DATA: %r" % list(suboption[2:]))
else:
if self.logger:
self.logger.error("undefined COM_PORT_OPTION: %r" % list(suboption[1:]))
else:
if self.logger:
self.logger.warning("unknown subnegotiation: %r" % (suboption,))
# simple client test
if __name__ == '__main__':
import sys
s = Serial('rfc2217://localhost:7000', 115200)
sys.stdout.write('%s\n' % s)
#~ s.baudrate = 1898
sys.stdout.write("write...\n")
s.write("hello\n")
s.flush()
sys.stdout.write("read: %s\n" % s.read(5))
#~ s.baudrate = 19200
#~ s.databits = 7
s.close()
|
gavin-feng/odoo
|
refs/heads/8.0
|
addons/account_test/account_test.py
|
342
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2005-2006 TINY SPRL. (http://tiny.be) All Rights Reserved.
#
# $Id: product_expiry.py 4304 2006-10-25 09:54:51Z ged $
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.osv import fields, osv
CODE_EXEC_DEFAULT = '''\
res = []
cr.execute("select id, code from account_journal")
for record in cr.dictfetchall():
res.append(record['code'])
result = res
'''
class accounting_assert_test(osv.osv):
_name = "accounting.assert.test"
_order = "sequence"
_columns = {
'name': fields.char('Test Name', required=True, select=True, translate=True),
'desc': fields.text('Test Description', select=True, translate=True),
'code_exec': fields.text('Python code', required=True),
'active': fields.boolean('Active'),
'sequence': fields.integer('Sequence'),
}
_defaults = {
'code_exec': CODE_EXEC_DEFAULT,
'active': True,
'sequence': 10,
}
|
sadatay/beets
|
refs/heads/master
|
beetsplug/bpd/__init__.py
|
23
|
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A clone of the Music Player Daemon (MPD) that plays music from a
Beets library. Attempts to implement a compatible protocol to allow
use of the wide range of MPD clients.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import re
from string import Template
import traceback
import random
import time
import beets
from beets.plugins import BeetsPlugin
import beets.ui
from beets import logging
from beets import vfs
from beets.util import bluelet
from beets.library import Item
from beets import dbcore
from beets.mediafile import MediaFile
PROTOCOL_VERSION = '0.13.0'
BUFSIZE = 1024
HELLO = 'OK MPD %s' % PROTOCOL_VERSION
CLIST_BEGIN = 'command_list_begin'
CLIST_VERBOSE_BEGIN = 'command_list_ok_begin'
CLIST_END = 'command_list_end'
RESP_OK = 'OK'
RESP_CLIST_VERBOSE = 'list_OK'
RESP_ERR = 'ACK'
NEWLINE = u"\n"
ERROR_NOT_LIST = 1
ERROR_ARG = 2
ERROR_PASSWORD = 3
ERROR_PERMISSION = 4
ERROR_UNKNOWN = 5
ERROR_NO_EXIST = 50
ERROR_PLAYLIST_MAX = 51
ERROR_SYSTEM = 52
ERROR_PLAYLIST_LOAD = 53
ERROR_UPDATE_ALREADY = 54
ERROR_PLAYER_SYNC = 55
ERROR_EXIST = 56
VOLUME_MIN = 0
VOLUME_MAX = 100
SAFE_COMMANDS = (
# Commands that are available when unauthenticated.
u'close', u'commands', u'notcommands', u'password', u'ping',
)
ITEM_KEYS_WRITABLE = set(MediaFile.fields()).intersection(Item._fields.keys())
# Loggers.
log = logging.getLogger('beets.bpd')
global_log = logging.getLogger('beets')
# Gstreamer import error.
class NoGstreamerError(Exception):
pass
# Error-handling, exceptions, parameter parsing.
class BPDError(Exception):
"""An error that should be exposed to the client to the BPD
server.
"""
def __init__(self, code, message, cmd_name='', index=0):
self.code = code
self.message = message
self.cmd_name = cmd_name
self.index = index
template = Template(u'$resp [$code@$index] {$cmd_name} $message')
def response(self):
"""Returns a string to be used as the response code for the
erring command.
"""
return self.template.substitute({
'resp': RESP_ERR,
'code': self.code,
'index': self.index,
'cmd_name': self.cmd_name,
'message': self.message,
})
def make_bpd_error(s_code, s_message):
"""Create a BPDError subclass for a static code and message.
"""
class NewBPDError(BPDError):
code = s_code
message = s_message
cmd_name = ''
index = 0
def __init__(self):
pass
return NewBPDError
ArgumentTypeError = make_bpd_error(ERROR_ARG, 'invalid type for argument')
ArgumentIndexError = make_bpd_error(ERROR_ARG, 'argument out of range')
ArgumentNotFoundError = make_bpd_error(ERROR_NO_EXIST, 'argument not found')
def cast_arg(t, val):
"""Attempts to call t on val, raising a ArgumentTypeError
on ValueError.
If 't' is the special string 'intbool', attempts to cast first
to an int and then to a bool (i.e., 1=True, 0=False).
"""
if t == 'intbool':
return cast_arg(bool, cast_arg(int, val))
else:
try:
return t(val)
except ValueError:
raise ArgumentTypeError()
class BPDClose(Exception):
"""Raised by a command invocation to indicate that the connection
should be closed.
"""
# Generic server infrastructure, implementing the basic protocol.
class BaseServer(object):
"""A MPD-compatible music player server.
The functions with the `cmd_` prefix are invoked in response to
client commands. For instance, if the client says `status`,
`cmd_status` will be invoked. The arguments to the client's commands
are used as function arguments following the connection issuing the
command. The functions may send data on the connection. They may
also raise BPDError exceptions to report errors.
This is a generic superclass and doesn't support many commands.
"""
def __init__(self, host, port, password):
"""Create a new server bound to address `host` and listening
on port `port`. If `password` is given, it is required to do
anything significant on the server.
"""
self.host, self.port, self.password = host, port, password
# Default server values.
self.random = False
self.repeat = False
self.volume = VOLUME_MAX
self.crossfade = 0
self.playlist = []
self.playlist_version = 0
self.current_index = -1
self.paused = False
self.error = None
# Object for random numbers generation
self.random_obj = random.Random()
def run(self):
"""Block and start listening for connections from clients. An
interrupt (^C) closes the server.
"""
self.startup_time = time.time()
bluelet.run(bluelet.server(self.host, self.port,
Connection.handler(self)))
def _item_info(self, item):
"""An abstract method that should response lines containing a
single song's metadata.
"""
raise NotImplementedError
def _item_id(self, item):
"""An abstract method returning the integer id for an item.
"""
raise NotImplementedError
def _id_to_index(self, track_id):
"""Searches the playlist for a song with the given id and
returns its index in the playlist.
"""
track_id = cast_arg(int, track_id)
for index, track in enumerate(self.playlist):
if self._item_id(track) == track_id:
return index
# Loop finished with no track found.
raise ArgumentNotFoundError()
def _random_idx(self):
"""Returns a random index different from the current one.
If there are no songs in the playlist it returns -1.
If there is only one song in the playlist it returns 0.
"""
if len(self.playlist) < 2:
return len(self.playlist) - 1
new_index = self.random_obj.randint(0, len(self.playlist) - 1)
while new_index == self.current_index:
new_index = self.random_obj.randint(0, len(self.playlist) - 1)
return new_index
def _succ_idx(self):
"""Returns the index for the next song to play.
It also considers random and repeat flags.
No boundaries are checked.
"""
if self.repeat:
return self.current_index
if self.random:
return self._random_idx()
return self.current_index + 1
def _prev_idx(self):
"""Returns the index for the previous song to play.
It also considers random and repeat flags.
No boundaries are checked.
"""
if self.repeat:
return self.current_index
if self.random:
return self._random_idx()
return self.current_index - 1
def cmd_ping(self, conn):
"""Succeeds."""
pass
def cmd_kill(self, conn):
"""Exits the server process."""
exit(0)
def cmd_close(self, conn):
"""Closes the connection."""
raise BPDClose()
def cmd_password(self, conn, password):
"""Attempts password authentication."""
if password == self.password:
conn.authenticated = True
else:
conn.authenticated = False
raise BPDError(ERROR_PASSWORD, 'incorrect password')
def cmd_commands(self, conn):
"""Lists the commands available to the user."""
if self.password and not conn.authenticated:
# Not authenticated. Show limited list of commands.
for cmd in SAFE_COMMANDS:
yield u'command: ' + cmd
else:
# Authenticated. Show all commands.
for func in dir(self):
if func.startswith('cmd_'):
yield u'command: ' + func[4:]
def cmd_notcommands(self, conn):
"""Lists all unavailable commands."""
if self.password and not conn.authenticated:
# Not authenticated. Show privileged commands.
for func in dir(self):
if func.startswith('cmd_'):
cmd = func[4:]
if cmd not in SAFE_COMMANDS:
yield u'command: ' + cmd
else:
# Authenticated. No commands are unavailable.
pass
def cmd_status(self, conn):
"""Returns some status information for use with an
implementation of cmd_status.
Gives a list of response-lines for: volume, repeat, random,
playlist, playlistlength, and xfade.
"""
yield (
u'volume: ' + unicode(self.volume),
u'repeat: ' + unicode(int(self.repeat)),
u'random: ' + unicode(int(self.random)),
u'playlist: ' + unicode(self.playlist_version),
u'playlistlength: ' + unicode(len(self.playlist)),
u'xfade: ' + unicode(self.crossfade),
)
if self.current_index == -1:
state = u'stop'
elif self.paused:
state = u'pause'
else:
state = u'play'
yield u'state: ' + state
if self.current_index != -1: # i.e., paused or playing
current_id = self._item_id(self.playlist[self.current_index])
yield u'song: ' + unicode(self.current_index)
yield u'songid: ' + unicode(current_id)
if self.error:
yield u'error: ' + self.error
def cmd_clearerror(self, conn):
"""Removes the persistent error state of the server. This
error is set when a problem arises not in response to a
command (for instance, when playing a file).
"""
self.error = None
def cmd_random(self, conn, state):
"""Set or unset random (shuffle) mode."""
self.random = cast_arg('intbool', state)
def cmd_repeat(self, conn, state):
"""Set or unset repeat mode."""
self.repeat = cast_arg('intbool', state)
def cmd_setvol(self, conn, vol):
"""Set the player's volume level (0-100)."""
vol = cast_arg(int, vol)
if vol < VOLUME_MIN or vol > VOLUME_MAX:
raise BPDError(ERROR_ARG, u'volume out of range')
self.volume = vol
def cmd_crossfade(self, conn, crossfade):
"""Set the number of seconds of crossfading."""
crossfade = cast_arg(int, crossfade)
if crossfade < 0:
raise BPDError(ERROR_ARG, u'crossfade time must be nonnegative')
def cmd_clear(self, conn):
"""Clear the playlist."""
self.playlist = []
self.playlist_version += 1
self.cmd_stop(conn)
def cmd_delete(self, conn, index):
"""Remove the song at index from the playlist."""
index = cast_arg(int, index)
try:
del(self.playlist[index])
except IndexError:
raise ArgumentIndexError()
self.playlist_version += 1
if self.current_index == index: # Deleted playing song.
self.cmd_stop(conn)
elif index < self.current_index: # Deleted before playing.
# Shift playing index down.
self.current_index -= 1
def cmd_deleteid(self, conn, track_id):
self.cmd_delete(conn, self._id_to_index(track_id))
def cmd_move(self, conn, idx_from, idx_to):
"""Move a track in the playlist."""
idx_from = cast_arg(int, idx_from)
idx_to = cast_arg(int, idx_to)
try:
track = self.playlist.pop(idx_from)
self.playlist.insert(idx_to, track)
except IndexError:
raise ArgumentIndexError()
# Update currently-playing song.
if idx_from == self.current_index:
self.current_index = idx_to
elif idx_from < self.current_index <= idx_to:
self.current_index -= 1
elif idx_from > self.current_index >= idx_to:
self.current_index += 1
self.playlist_version += 1
def cmd_moveid(self, conn, idx_from, idx_to):
idx_from = self._id_to_index(idx_from)
return self.cmd_move(conn, idx_from, idx_to)
def cmd_swap(self, conn, i, j):
"""Swaps two tracks in the playlist."""
i = cast_arg(int, i)
j = cast_arg(int, j)
try:
track_i = self.playlist[i]
track_j = self.playlist[j]
except IndexError:
raise ArgumentIndexError()
self.playlist[j] = track_i
self.playlist[i] = track_j
# Update currently-playing song.
if self.current_index == i:
self.current_index = j
elif self.current_index == j:
self.current_index = i
self.playlist_version += 1
def cmd_swapid(self, conn, i_id, j_id):
i = self._id_to_index(i_id)
j = self._id_to_index(j_id)
return self.cmd_swap(conn, i, j)
def cmd_urlhandlers(self, conn):
"""Indicates supported URL schemes. None by default."""
pass
def cmd_playlistinfo(self, conn, index=-1):
"""Gives metadata information about the entire playlist or a
single track, given by its index.
"""
index = cast_arg(int, index)
if index == -1:
for track in self.playlist:
yield self._item_info(track)
else:
try:
track = self.playlist[index]
except IndexError:
raise ArgumentIndexError()
yield self._item_info(track)
def cmd_playlistid(self, conn, track_id=-1):
return self.cmd_playlistinfo(conn, self._id_to_index(track_id))
def cmd_plchanges(self, conn, version):
"""Sends playlist changes since the given version.
This is a "fake" implementation that ignores the version and
just returns the entire playlist (rather like version=0). This
seems to satisfy many clients.
"""
return self.cmd_playlistinfo(conn)
def cmd_plchangesposid(self, conn, version):
"""Like plchanges, but only sends position and id.
Also a dummy implementation.
"""
for idx, track in enumerate(self.playlist):
yield u'cpos: ' + unicode(idx)
yield u'Id: ' + unicode(track.id)
def cmd_currentsong(self, conn):
"""Sends information about the currently-playing song.
"""
if self.current_index != -1: # -1 means stopped.
track = self.playlist[self.current_index]
yield self._item_info(track)
def cmd_next(self, conn):
"""Advance to the next song in the playlist."""
self.current_index = self._succ_idx()
if self.current_index >= len(self.playlist):
# Fallen off the end. Just move to stopped state.
return self.cmd_stop(conn)
else:
return self.cmd_play(conn)
def cmd_previous(self, conn):
"""Step back to the last song."""
self.current_index = self._prev_idx()
if self.current_index < 0:
return self.cmd_stop(conn)
else:
return self.cmd_play(conn)
def cmd_pause(self, conn, state=None):
"""Set the pause state playback."""
if state is None:
self.paused = not self.paused # Toggle.
else:
self.paused = cast_arg('intbool', state)
def cmd_play(self, conn, index=-1):
"""Begin playback, possibly at a specified playlist index."""
index = cast_arg(int, index)
if index < -1 or index > len(self.playlist):
raise ArgumentIndexError()
if index == -1: # No index specified: start where we are.
if not self.playlist: # Empty playlist: stop immediately.
return self.cmd_stop(conn)
if self.current_index == -1: # No current song.
self.current_index = 0 # Start at the beginning.
# If we have a current song, just stay there.
else: # Start with the specified index.
self.current_index = index
self.paused = False
def cmd_playid(self, conn, track_id=0):
track_id = cast_arg(int, track_id)
if track_id == -1:
index = -1
else:
index = self._id_to_index(track_id)
return self.cmd_play(conn, index)
def cmd_stop(self, conn):
"""Stop playback."""
self.current_index = -1
self.paused = False
def cmd_seek(self, conn, index, pos):
"""Seek to a specified point in a specified song."""
index = cast_arg(int, index)
if index < 0 or index >= len(self.playlist):
raise ArgumentIndexError()
self.current_index = index
def cmd_seekid(self, conn, track_id, pos):
index = self._id_to_index(track_id)
return self.cmd_seek(conn, index, pos)
def cmd_profile(self, conn):
"""Memory profiling for debugging."""
from guppy import hpy
heap = hpy().heap()
print(heap)
class Connection(object):
"""A connection between a client and the server. Handles input and
output from and to the client.
"""
def __init__(self, server, sock):
"""Create a new connection for the accepted socket `client`.
"""
self.server = server
self.sock = sock
self.authenticated = False
def send(self, lines):
"""Send lines, which which is either a single string or an
iterable consisting of strings, to the client. A newline is
added after every string. Returns a Bluelet event that sends
the data.
"""
if isinstance(lines, basestring):
lines = [lines]
out = NEWLINE.join(lines) + NEWLINE
log.debug('{}', out[:-1]) # Don't log trailing newline.
if isinstance(out, unicode):
out = out.encode('utf8')
return self.sock.sendall(out)
def do_command(self, command):
"""A coroutine that runs the given command and sends an
appropriate response."""
try:
yield bluelet.call(command.run(self))
except BPDError as e:
# Send the error.
yield self.send(e.response())
else:
# Send success code.
yield self.send(RESP_OK)
def run(self):
"""Send a greeting to the client and begin processing commands
as they arrive.
"""
yield self.send(HELLO)
clist = None # Initially, no command list is being constructed.
while True:
line = yield self.sock.readline()
if not line:
break
line = line.strip()
if not line:
break
log.debug('{}', line)
if clist is not None:
# Command list already opened.
if line == CLIST_END:
yield bluelet.call(self.do_command(clist))
clist = None # Clear the command list.
else:
clist.append(Command(line))
elif line == CLIST_BEGIN or line == CLIST_VERBOSE_BEGIN:
# Begin a command list.
clist = CommandList([], line == CLIST_VERBOSE_BEGIN)
else:
# Ordinary command.
try:
yield bluelet.call(self.do_command(Command(line)))
except BPDClose:
# Command indicates that the conn should close.
self.sock.close()
return
@classmethod
def handler(cls, server):
def _handle(sock):
"""Creates a new `Connection` and runs it.
"""
return cls(server, sock).run()
return _handle
class Command(object):
"""A command issued by the client for processing by the server.
"""
command_re = re.compile(br'^([^ \t]+)[ \t]*')
arg_re = re.compile(br'"((?:\\"|[^"])+)"|([^ \t"]+)')
def __init__(self, s):
"""Creates a new `Command` from the given string, `s`, parsing
the string for command name and arguments.
"""
command_match = self.command_re.match(s)
self.name = command_match.group(1)
self.args = []
arg_matches = self.arg_re.findall(s[command_match.end():])
for match in arg_matches:
if match[0]:
# Quoted argument.
arg = match[0]
arg = arg.replace(b'\\"', b'"').replace(b'\\\\', b'\\')
else:
# Unquoted argument.
arg = match[1]
arg = arg.decode('utf8')
self.args.append(arg)
def run(self, conn):
"""A coroutine that executes the command on the given
connection.
"""
# Attempt to get correct command function.
func_name = 'cmd_' + self.name
if not hasattr(conn.server, func_name):
raise BPDError(ERROR_UNKNOWN, u'unknown command', self.name)
func = getattr(conn.server, func_name)
# Ensure we have permission for this command.
if conn.server.password and \
not conn.authenticated and \
self.name not in SAFE_COMMANDS:
raise BPDError(ERROR_PERMISSION, u'insufficient privileges')
try:
args = [conn] + self.args
results = func(*args)
if results:
for data in results:
yield conn.send(data)
except BPDError as e:
# An exposed error. Set the command name and then let
# the Connection handle it.
e.cmd_name = self.name
raise e
except BPDClose:
# An indication that the connection should close. Send
# it on the Connection.
raise
except Exception as e:
# An "unintentional" error. Hide it from the client.
log.error('{}', traceback.format_exc(e))
raise BPDError(ERROR_SYSTEM, u'server error', self.name)
class CommandList(list):
"""A list of commands issued by the client for processing by the
server. May be verbose, in which case the response is delimited, or
not. Should be a list of `Command` objects.
"""
def __init__(self, sequence=None, verbose=False):
"""Create a new `CommandList` from the given sequence of
`Command`s. If `verbose`, this is a verbose command list.
"""
if sequence:
for item in sequence:
self.append(item)
self.verbose = verbose
def run(self, conn):
"""Coroutine executing all the commands in this list.
"""
for i, command in enumerate(self):
try:
yield bluelet.call(command.run(conn))
except BPDError as e:
# If the command failed, stop executing.
e.index = i # Give the error the correct index.
raise e
# Otherwise, possibly send the output delimeter if we're in a
# verbose ("OK") command list.
if self.verbose:
yield conn.send(RESP_CLIST_VERBOSE)
# A subclass of the basic, protocol-handling server that actually plays
# music.
class Server(BaseServer):
"""An MPD-compatible server using GStreamer to play audio and beets
to store its library.
"""
def __init__(self, library, host, port, password):
try:
from beetsplug.bpd import gstplayer
except ImportError as e:
# This is a little hacky, but it's the best I know for now.
if e.args[0].endswith(' gst'):
raise NoGstreamerError()
else:
raise
super(Server, self).__init__(host, port, password)
self.lib = library
self.player = gstplayer.GstPlayer(self.play_finished)
self.cmd_update(None)
def run(self):
self.player.run()
super(Server, self).run()
def play_finished(self):
"""A callback invoked every time our player finishes a
track.
"""
self.cmd_next(None)
# Metadata helper functions.
def _item_info(self, item):
info_lines = [
u'file: ' + item.destination(fragment=True),
u'Time: ' + unicode(int(item.length)),
u'Title: ' + item.title,
u'Artist: ' + item.artist,
u'Album: ' + item.album,
u'Genre: ' + item.genre,
]
track = unicode(item.track)
if item.tracktotal:
track += u'/' + unicode(item.tracktotal)
info_lines.append(u'Track: ' + track)
info_lines.append(u'Date: ' + unicode(item.year))
try:
pos = self._id_to_index(item.id)
info_lines.append(u'Pos: ' + unicode(pos))
except ArgumentNotFoundError:
# Don't include position if not in playlist.
pass
info_lines.append(u'Id: ' + unicode(item.id))
return info_lines
def _item_id(self, item):
return item.id
# Database updating.
def cmd_update(self, conn, path=u'/'):
"""Updates the catalog to reflect the current database state.
"""
# Path is ignored. Also, the real MPD does this asynchronously;
# this is done inline.
print('Building directory tree...')
self.tree = vfs.libtree(self.lib)
print('... done.')
self.updated_time = time.time()
# Path (directory tree) browsing.
def _resolve_path(self, path):
"""Returns a VFS node or an item ID located at the path given.
If the path does not exist, raises a
"""
components = path.split(u'/')
node = self.tree
for component in components:
if not component:
continue
if isinstance(node, int):
# We're trying to descend into a file node.
raise ArgumentNotFoundError()
if component in node.files:
node = node.files[component]
elif component in node.dirs:
node = node.dirs[component]
else:
raise ArgumentNotFoundError()
return node
def _path_join(self, p1, p2):
"""Smashes together two BPD paths."""
out = p1 + u'/' + p2
return out.replace(u'//', u'/').replace(u'//', u'/')
def cmd_lsinfo(self, conn, path=u"/"):
"""Sends info on all the items in the path."""
node = self._resolve_path(path)
if isinstance(node, int):
# Trying to list a track.
raise BPDError(ERROR_ARG, 'this is not a directory')
else:
for name, itemid in iter(sorted(node.files.items())):
item = self.lib.get_item(itemid)
yield self._item_info(item)
for name, _ in iter(sorted(node.dirs.iteritems())):
dirpath = self._path_join(path, name)
if dirpath.startswith(u"/"):
# Strip leading slash (libmpc rejects this).
dirpath = dirpath[1:]
yield u'directory: %s' % dirpath
def _listall(self, basepath, node, info=False):
"""Helper function for recursive listing. If info, show
tracks' complete info; otherwise, just show items' paths.
"""
if isinstance(node, int):
# List a single file.
if info:
item = self.lib.get_item(node)
yield self._item_info(item)
else:
yield u'file: ' + basepath
else:
# List a directory. Recurse into both directories and files.
for name, itemid in sorted(node.files.iteritems()):
newpath = self._path_join(basepath, name)
# "yield from"
for v in self._listall(newpath, itemid, info):
yield v
for name, subdir in sorted(node.dirs.iteritems()):
newpath = self._path_join(basepath, name)
yield u'directory: ' + newpath
for v in self._listall(newpath, subdir, info):
yield v
def cmd_listall(self, conn, path=u"/"):
"""Send the paths all items in the directory, recursively."""
return self._listall(path, self._resolve_path(path), False)
def cmd_listallinfo(self, conn, path=u"/"):
"""Send info on all the items in the directory, recursively."""
return self._listall(path, self._resolve_path(path), True)
# Playlist manipulation.
def _all_items(self, node):
"""Generator yielding all items under a VFS node.
"""
if isinstance(node, int):
# Could be more efficient if we built up all the IDs and
# then issued a single SELECT.
yield self.lib.get_item(node)
else:
# Recurse into a directory.
for name, itemid in sorted(node.files.iteritems()):
# "yield from"
for v in self._all_items(itemid):
yield v
for name, subdir in sorted(node.dirs.iteritems()):
for v in self._all_items(subdir):
yield v
def _add(self, path, send_id=False):
"""Adds a track or directory to the playlist, specified by the
path. If `send_id`, write each item's id to the client.
"""
for item in self._all_items(self._resolve_path(path)):
self.playlist.append(item)
if send_id:
yield u'Id: ' + unicode(item.id)
self.playlist_version += 1
def cmd_add(self, conn, path):
"""Adds a track or directory to the playlist, specified by a
path.
"""
return self._add(path, False)
def cmd_addid(self, conn, path):
"""Same as `cmd_add` but sends an id back to the client."""
return self._add(path, True)
# Server info.
def cmd_status(self, conn):
for line in super(Server, self).cmd_status(conn):
yield line
if self.current_index > -1:
item = self.playlist[self.current_index]
yield u'bitrate: ' + unicode(item.bitrate / 1000)
# Missing 'audio'.
(pos, total) = self.player.time()
yield u'time: ' + unicode(pos) + u':' + unicode(total)
# Also missing 'updating_db'.
def cmd_stats(self, conn):
"""Sends some statistics about the library."""
with self.lib.transaction() as tx:
statement = 'SELECT COUNT(DISTINCT artist), ' \
'COUNT(DISTINCT album), ' \
'COUNT(id), ' \
'SUM(length) ' \
'FROM items'
artists, albums, songs, totaltime = tx.query(statement)[0]
yield (
u'artists: ' + unicode(artists),
u'albums: ' + unicode(albums),
u'songs: ' + unicode(songs),
u'uptime: ' + unicode(int(time.time() - self.startup_time)),
u'playtime: ' + u'0', # Missing.
u'db_playtime: ' + unicode(int(totaltime)),
u'db_update: ' + unicode(int(self.updated_time)),
)
# Searching.
tagtype_map = {
u'Artist': u'artist',
u'Album': u'album',
u'Title': u'title',
u'Track': u'track',
u'AlbumArtist': u'albumartist',
u'AlbumArtistSort': u'albumartist_sort',
# Name?
u'Genre': u'genre',
u'Date': u'year',
u'Composer': u'composer',
# Performer?
u'Disc': u'disc',
u'filename': u'path', # Suspect.
}
def cmd_tagtypes(self, conn):
"""Returns a list of the metadata (tag) fields available for
searching.
"""
for tag in self.tagtype_map:
yield u'tagtype: ' + tag
def _tagtype_lookup(self, tag):
"""Uses `tagtype_map` to look up the beets column name for an
MPD tagtype (or throw an appropriate exception). Returns both
the canonical name of the MPD tagtype and the beets column
name.
"""
for test_tag, key in self.tagtype_map.items():
# Match case-insensitively.
if test_tag.lower() == tag.lower():
return test_tag, key
raise BPDError(ERROR_UNKNOWN, u'no such tagtype')
def _metadata_query(self, query_type, any_query_type, kv):
"""Helper function returns a query object that will find items
according to the library query type provided and the key-value
pairs specified. The any_query_type is used for queries of
type "any"; if None, then an error is thrown.
"""
if kv: # At least one key-value pair.
queries = []
# Iterate pairwise over the arguments.
it = iter(kv)
for tag, value in zip(it, it):
if tag.lower() == u'any':
if any_query_type:
queries.append(any_query_type(value,
ITEM_KEYS_WRITABLE,
query_type))
else:
raise BPDError(ERROR_UNKNOWN, u'no such tagtype')
else:
_, key = self._tagtype_lookup(tag)
queries.append(query_type(key, value))
return dbcore.query.AndQuery(queries)
else: # No key-value pairs.
return dbcore.query.TrueQuery()
def cmd_search(self, conn, *kv):
"""Perform a substring match for items."""
query = self._metadata_query(dbcore.query.SubstringQuery,
dbcore.query.AnyFieldQuery,
kv)
for item in self.lib.items(query):
yield self._item_info(item)
def cmd_find(self, conn, *kv):
"""Perform an exact match for items."""
query = self._metadata_query(dbcore.query.MatchQuery,
None,
kv)
for item in self.lib.items(query):
yield self._item_info(item)
def cmd_list(self, conn, show_tag, *kv):
"""List distinct metadata values for show_tag, possibly
filtered by matching match_tag to match_term.
"""
show_tag_canon, show_key = self._tagtype_lookup(show_tag)
query = self._metadata_query(dbcore.query.MatchQuery, None, kv)
clause, subvals = query.clause()
statement = 'SELECT DISTINCT ' + show_key + \
' FROM items WHERE ' + clause + \
' ORDER BY ' + show_key
with self.lib.transaction() as tx:
rows = tx.query(statement, subvals)
for row in rows:
yield show_tag_canon + u': ' + unicode(row[0])
def cmd_count(self, conn, tag, value):
"""Returns the number and total time of songs matching the
tag/value query.
"""
_, key = self._tagtype_lookup(tag)
songs = 0
playtime = 0.0
for item in self.lib.items(dbcore.query.MatchQuery(key, value)):
songs += 1
playtime += item.length
yield u'songs: ' + unicode(songs)
yield u'playtime: ' + unicode(int(playtime))
# "Outputs." Just a dummy implementation because we don't control
# any outputs.
def cmd_outputs(self, conn):
"""List the available outputs."""
yield (
u'outputid: 0',
u'outputname: gstreamer',
u'outputenabled: 1',
)
def cmd_enableoutput(self, conn, output_id):
output_id = cast_arg(int, output_id)
if output_id != 0:
raise ArgumentIndexError()
def cmd_disableoutput(self, conn, output_id):
output_id = cast_arg(int, output_id)
if output_id == 0:
raise BPDError(ERROR_ARG, u'cannot disable this output')
else:
raise ArgumentIndexError()
# Playback control. The functions below hook into the
# half-implementations provided by the base class. Together, they're
# enough to implement all normal playback functionality.
def cmd_play(self, conn, index=-1):
new_index = index != -1 and index != self.current_index
was_paused = self.paused
super(Server, self).cmd_play(conn, index)
if self.current_index > -1: # Not stopped.
if was_paused and not new_index:
# Just unpause.
self.player.play()
else:
self.player.play_file(self.playlist[self.current_index].path)
def cmd_pause(self, conn, state=None):
super(Server, self).cmd_pause(conn, state)
if self.paused:
self.player.pause()
elif self.player.playing:
self.player.play()
def cmd_stop(self, conn):
super(Server, self).cmd_stop(conn)
self.player.stop()
def cmd_seek(self, conn, index, pos):
"""Seeks to the specified position in the specified song."""
index = cast_arg(int, index)
pos = cast_arg(int, pos)
super(Server, self).cmd_seek(conn, index, pos)
self.player.seek(pos)
# Volume control.
def cmd_setvol(self, conn, vol):
vol = cast_arg(int, vol)
super(Server, self).cmd_setvol(conn, vol)
self.player.volume = float(vol) / 100
# Beets plugin hooks.
class BPDPlugin(BeetsPlugin):
"""Provides the "beet bpd" command for running a music player
server.
"""
def __init__(self):
super(BPDPlugin, self).__init__()
self.config.add({
'host': u'',
'port': 6600,
'password': u'',
'volume': VOLUME_MAX,
})
self.config['password'].redact = True
def start_bpd(self, lib, host, port, password, volume, debug):
"""Starts a BPD server."""
if debug: # FIXME this should be managed by BeetsPlugin
self._log.setLevel(logging.DEBUG)
else:
self._log.setLevel(logging.WARNING)
try:
server = Server(lib, host, port, password)
server.cmd_setvol(None, volume)
server.run()
except NoGstreamerError:
global_log.error(u'Gstreamer Python bindings not found.')
global_log.error(u'Install "python-gst0.10", "py27-gst-python", '
u'or similar package to use BPD.')
def commands(self):
cmd = beets.ui.Subcommand(
'bpd', help='run an MPD-compatible music player server'
)
cmd.parser.add_option(
'-d', '--debug', action='store_true',
help='dump all MPD traffic to stdout'
)
def func(lib, opts, args):
host = args.pop(0) if args else self.config['host'].get(unicode)
port = args.pop(0) if args else self.config['port'].get(int)
if args:
raise beets.ui.UserError('too many arguments')
password = self.config['password'].get(unicode)
volume = self.config['volume'].get(int)
debug = opts.debug or False
self.start_bpd(lib, host, int(port), password, volume, debug)
cmd.func = func
return [cmd]
|
awkspace/ansible
|
refs/heads/devel
|
test/integration/targets/module_utils/library/test_override.py
|
263
|
#!/usr/bin/python
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.facts import data
results = {"data": data}
AnsibleModule(argument_spec=dict()).exit_json(**results)
|
onceuponatimeforever/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/tests/regressiontests/admin_scripts/custom_templates/project_template/project_name/settings.py
|
738
|
# Django settings for {{ project_name }} test project.
|
vmanoria/bluemix-hue-filebrowser
|
refs/heads/master
|
hue-3.8.1-bluemix/desktop/core/ext-py/Django-1.6.10/tests/admin_scripts/custom_templates/project_template/project_name/settings.py
|
738
|
# Django settings for {{ project_name }} test project.
|
patrickcurl/ztruck
|
refs/heads/master
|
dj/lib/python2.7/site-packages/django/core/serializers/__init__.py
|
121
|
"""
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_queryset)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv": "path.to.csv.serializer",
"txt": "path.to.txt.serializer",
}
"""
import importlib
from django.apps import apps
from django.conf import settings
from django.utils import six
from django.core.serializers.base import SerializerDoesNotExist
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml": "django.core.serializers.xml_serializer",
"python": "django.core.serializers.python",
"json": "django.core.serializers.json",
"yaml": "django.core.serializers.pyyaml",
}
_serializers = {}
class BadSerializer(object):
"""
Stub serializer to hold exception raised during registration
This allows the serializer registration to cache serializers and if there
is an error raised in the process of creating a serializer it will be
raised and passed along to the caller when the serializer is used.
"""
internal_use_only = False
def __init__(self, exception):
self.exception = exception
def __call__(self, *args, **kwargs):
raise self.exception
def register_serializer(format, serializer_module, serializers=None):
"""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
if serializers is None and not _serializers:
_load_serializers()
try:
module = importlib.import_module(serializer_module)
except ImportError as exc:
bad_serializer = BadSerializer(exc)
module = type('BadSerializerModule', (object,), {
'Deserializer': bad_serializer,
'Serializer': bad_serializer,
})
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return list(_serializers)
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in six.iteritems(_serializers) if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Returns an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is an instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
def sort_dependencies(app_list):
"""Sort a list of (app_config, models) pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
"""
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app_config, model_list in app_list:
if model_list is None:
model_list = app_config.get_models()
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [apps.get_model(dep) for dep in deps]
else:
deps = []
# Now add a dependency for any FK relation with a model that
# defines a natural key
for field in model._meta.fields:
if hasattr(field.rel, 'to'):
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
# Also add a dependency for any simple M2M relation with a model
# that defines a natural key. M2M relations with explicit through
# models don't count as dependencies.
for field in model._meta.many_to_many:
if field.rel.through._meta.auto_created:
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise RuntimeError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
|
dahlstrom-g/intellij-community
|
refs/heads/master
|
python/testData/intentions/PyAnnotateVariableTypeIntentionTest/annotationNotPossibleForStructuralTypeInCallable.py
|
19
|
def func(x):
x.foo()
return x
va<caret>r = func
|
kenshay/ImageScript
|
refs/heads/master
|
Script_Runner/PYTHON/Lib/lib2to3/pgen2/tokenize.py
|
5
|
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from codecs import BOM_UTF8, lookup
from lib2to3.pgen2.token import *
from . import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"]
del token
try:
bytes
except NameError:
# Support bytes type in Python <= 2.5, so 2to3 turns itself into
# valid Python 3 code.
bytes = str
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
def _combinations(*l):
return set(
x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()
)
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Binnumber = r'0[bB]_?[01]+(?:_[01]+)*'
Hexnumber = r'0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?'
Octnumber = r'0[oO]?_?[0-7]+(?:_[0-7]+)*[lL]?'
Decnumber = group(r'[1-9]\d*(?:_\d+)*[lL]?', '0[lL]?')
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+(?:_\d+)*'
Pointfloat = group(r'\d+(?:_\d+)*\.(?:\d+(?:_\d+)*)?', r'\.\d+(?:_\d+)*') + maybe(Exponent)
Expfloat = r'\d+(?:_\d+)*' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+(?:_\d+)*[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
_litprefix = r"(?:[uUrRbBfF]|[rR][fFbB]|[fFbBuU][rR])?"
Triple = group(_litprefix + "'''", _litprefix + '"""')
# Single-line ' or " string.
String = group(_litprefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
_litprefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?", r"->",
r"[+\-*/%&@|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(_litprefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
_litprefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = list(map(
re.compile, (Token, PseudoToken, Single3, Double3)))
_strprefixes = (
_combinations('r', 'R', 'f', 'F') |
_combinations('r', 'R', 'b', 'B') |
{'u', 'U', 'ur', 'uR', 'Ur', 'UR'}
)
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
**{f"{prefix}'''": single3prog for prefix in _strprefixes},
**{f'{prefix}"""': double3prog for prefix in _strprefixes},
**{prefix: None for prefix in _strprefixes}}
triple_quoted = (
{"'''", '"""'} |
{f"{prefix}'''" for prefix in _strprefixes} |
{f'{prefix}"""' for prefix in _strprefixes}
)
single_quoted = (
{"'", '"'} |
{f"{prefix}'" for prefix in _strprefixes} |
{f'{prefix}"' for prefix in _strprefixes}
)
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing
(srow, scol) = xxx_todo_changeme
(erow, ecol) = xxx_todo_changeme1
print("%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER, ASYNC, AWAIT):
tokval += ' '
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present, but
disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return bytes()
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
if not blank_re.match(first):
return default, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tokin generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
# 'stashed' and 'async_*' are used for async/await parsing
stashed = None
async_def = False
async_def_indent = 0
async_def_nl = False
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if stashed:
yield stashed
stashed = None
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
if async_def and async_def_indent >= indents[-1]:
async_def = False
async_def_nl = False
async_def_indent = 0
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
if async_def and async_def_nl and async_def_indent >= indents[-1]:
async_def = False
async_def_nl = False
async_def_indent = 0
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
elif async_def:
async_def_nl = True
if stashed:
yield stashed
stashed = None
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
if stashed:
yield stashed
stashed = None
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
if stashed:
yield stashed
stashed = None
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
if stashed:
yield stashed
stashed = None
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
if token in ('async', 'await'):
if async_def:
yield (ASYNC if token == 'async' else AWAIT,
token, spos, epos, line)
continue
tok = (NAME, token, spos, epos, line)
if token == 'async' and not stashed:
stashed = tok
continue
if token == 'def':
if (stashed
and stashed[0] == NAME
and stashed[1] == 'async'):
async_def = True
async_def_indent = indents[-1]
yield (ASYNC, stashed[1],
stashed[2], stashed[3],
stashed[4])
stashed = None
if stashed:
yield stashed
stashed = None
yield tok
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
if stashed:
yield stashed
stashed = None
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
if stashed:
yield stashed
stashed = None
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
if stashed:
yield stashed
stashed = None
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
|
ibest/grcScripts2
|
refs/heads/master
|
inst/scripts/python/extract_unmapped_reads.py
|
1
|
#!/usr/bin/env python
'''
Extract reads which aren't mapped from a SAM or SAM.gz file.
Behavior for PE:
-Write out PE only if both do not map (if either of the pair maps, neither is retained)
Behavior for SE:
-Write out SE if they don't map
Iterate over a SAM or SAM.gz file. take everything where the 3rd and
4th flag bit are set to 1 and write reads out to files.
0x1 template having multiple segments in sequencing
0x2 each segment properly aligned according to the aligner
0x4 segment unmapped
0x8 next segment in the template unmapped
0x10 SEQ being reverse complemented
0x20 SEQ of the next segment in the template being reversed
0x40 the first segment in the template
0x80 the last segment in the template
0x100 secondary alignment
0x200 not passing quality controls
0x400 PCR or optical duplicate
TODO:
1) Add support for retaining both reads if one of a pair don't map but the other does
2) Add support for retaining the pair (or SE) if a read maps with low mapq
Note:
It is necessary to double check that both pairs of the PE read really exist in the SAM
file just in case it somehow gets disordered. This is taken care of by keeping the PE
reads in a set of dictionaries and then deleting them once the pair is written.
In the case where a read is somehow labeled as paired, but the pair doesn't exist, the
read is NOT written.
'''
import sys
import os
from optparse import OptionParser # http://docs.python.org/library/optparse.html
import gzip
usage = "usage: %prog [options] -o output_base inputfile.SAM"
parser = OptionParser(usage=usage,version="%prog 2.0.0")
parser.add_option('-u', '--uncompressed', help="leave output files uncompressed",
action="store_true", dest="uncompressed")
parser.add_option('-o', '--output_base', help="output file basename",
action="store", type="str", dest="output_base",default="screened")
parser.add_option('-v', '--verbose', help="verbose output",
action="store_false", dest="verbose", default=True)
(options, args) = parser.parse_args() # uncomment this line for command line support
if len(args) == 1:
infile = args[0]
#Start opening input/output files:
if not os.path.exists(infile):
print "Error, can't find input file %s" % infile
sys.exit()
if infile.split(".")[-1] == "gz":
insam = gzip.open(infile, 'rb')
else:
insam = open(infile, 'r')
else:
## reading from stdin
insam = sys.stdin
base = options.output_base
PE1 = {}
PE2 = {}
contig_map = {}
def writeread(ID, r1, r2):
#read1
outPE1.write("@" + ID + "#0/1" '\n')
outPE1.write(r1[0] + '\n')
outPE1.write('+\n' + r1[1] + '\n')
#read2
outPE2.write("@" + ID + "#0/2" '\n')
outPE2.write(r2[0] + '\n')
outPE2.write('+\n' + r2[1] + '\n')
i = 0
PE_written = 0
SE_written = 0
SE_open = False
PE_open = False
for line in insam:
if i % 100000 == 0 and i > 0 and options.verbose:
print "Records processed: %s, PE_written: %s, SE_written: %s" % (i, PE_written, SE_written)
#Comment/header lines start with @
if line[0] != "@" and len(line.strip().split()) > 2:
i += 1
line2 = line.strip().split()
flag = int(line2[1])
#Handle SE:
# unapped SE reads have 0x1 set to 0, and 0x4 (third bit) set to 1
if (flag & 0x1 == 0) and (flag & 0x4):
ID = line2[0].split("#")[0]
if not SE_open:
if options.uncompressed:
outSE = open(base + "_SE.fastq", 'w')
else:
outSE = gzip.open(base + "_SE.fastq.gz", 'wb')
SE_open = True
outSE.write("@" + ID + '\n')
outSE.write(line2[9] + '\n')
outSE.write('+\n' + line2[10] + '\n')
SE_written += 1
continue
#Handle PE:
#logic: 0x1 = multiple segments in sequencing, 0x4 = segment unmapped, 0x8 = next segment unmapped, 0x80 the last segment in the template
if ((flag & 0x1) and (flag & 0x4) and (flag & 0x8)):
if not PE_open:
if options.uncompressed:
outPE1 = open(base + "_PE1.fastq", 'w')
outPE2 = open(base + "_PE2.fastq", 'w')
else:
outPE1 = gzip.open(base + "_PE1.fastq.gz", 'wb')
outPE2 = gzip.open(base + "_PE2.fastq.gz", 'wb')
PE_open = True
if (flag & 0x40): # is this PE1 (first segment in template)
#PE1 read, check that PE2 is in dict and write out
ID = line2[0].split("#")[0]
r1 = [line2[9], line2[10]] # sequence + qual
if ID in PE2:
writeread(ID, r1, PE2[ID])
del PE2[ID]
PE_written += 1
else:
PE1[ID] = r1
continue
elif (flag & 0x80): # is this PE2 (last segment in template)
#PE2 read, check that PE1 is in dict and write out
ID = line2[0].split("#")[0]
r2 = [line2[9], line2[10]]
if ID in PE1:
writeread(ID, PE1[ID], r2)
del PE1[ID]
PE_written += 1
else:
PE2[ID] = r2
continue
# was mapped, count it up
contig = line2[2]
if contig in contig_map.keys():
if (flag & 0x1 == 0): ## SE
contig_map[contig]["SE"] += 1
elif (flag & 0x40): ## PE, Just count the first in the pair
contig_map[contig]["PE"] += 1
else:
contig_map[contig] = {}
if (flag & 0x1 == 0): ## SE
contig_map[contig]["SE"] = 1
contig_map[contig]["PE"] = 0
elif (flag & 0x40): ## PE, Just count the first in the pair
contig_map[contig]["SE"] = 0
contig_map[contig]["PE"] = 1
print "Records processed: %s, PE_written: %s, SE_written: %s" % (i, PE_written, SE_written)
for k in contig_map.keys():
print "\tFound %s: percent: %.2f, PE mapped: %s, SE mapped: %s" % (k,(2*PE_written+SE_written)/i, contig_map[k]["PE"], contig_map[k]["SE"])
if PE_open:
outPE1.close()
outPE2.close()
if SE_open:
outSE.close()
|
davidcbucher/heroku-buildpack-geo-python
|
refs/heads/master
|
vendor/distribute-0.6.34/setuptools/command/install.py
|
216
|
import setuptools, sys, glob
from distutils.command.install import install as _install
from distutils.errors import DistutilsArgError
class install(_install):
"""Use easy_install to install the package, w/dependencies"""
user_options = _install.user_options + [
('old-and-unmanageable', None, "Try not to use this!"),
('single-version-externally-managed', None,
"used by system package builders to create 'flat' eggs"),
]
boolean_options = _install.boolean_options + [
'old-and-unmanageable', 'single-version-externally-managed',
]
new_commands = [
('install_egg_info', lambda self: True),
('install_scripts', lambda self: True),
]
_nc = dict(new_commands)
def initialize_options(self):
_install.initialize_options(self)
self.old_and_unmanageable = None
self.single_version_externally_managed = None
self.no_compile = None # make DISTUTILS_DEBUG work right!
def finalize_options(self):
_install.finalize_options(self)
if self.root:
self.single_version_externally_managed = True
elif self.single_version_externally_managed:
if not self.root and not self.record:
raise DistutilsArgError(
"You must specify --record or --root when building system"
" packages"
)
def handle_extra_path(self):
if self.root or self.single_version_externally_managed:
# explicit backward-compatibility mode, allow extra_path to work
return _install.handle_extra_path(self)
# Ignore extra_path when installing an egg (or being run by another
# command without --root or --single-version-externally-managed
self.path_file = None
self.extra_dirs = ''
def run(self):
# Explicit request for old-style install? Just do it
if self.old_and_unmanageable or self.single_version_externally_managed:
return _install.run(self)
# Attempt to detect whether we were called from setup() or by another
# command. If we were called by setup(), our caller will be the
# 'run_command' method in 'distutils.dist', and *its* caller will be
# the 'run_commands' method. If we were called any other way, our
# immediate caller *might* be 'run_command', but it won't have been
# called by 'run_commands'. This is slightly kludgy, but seems to
# work.
#
caller = sys._getframe(2)
caller_module = caller.f_globals.get('__name__','')
caller_name = caller.f_code.co_name
if caller_module != 'distutils.dist' or caller_name!='run_commands':
# We weren't called from the command line or setup(), so we
# should run in backward-compatibility mode to support bdist_*
# commands.
_install.run(self)
else:
self.do_egg_install()
def do_egg_install(self):
easy_install = self.distribution.get_command_class('easy_install')
cmd = easy_install(
self.distribution, args="x", root=self.root, record=self.record,
)
cmd.ensure_finalized() # finalize before bdist_egg munges install cmd
cmd.always_copy_from = '.' # make sure local-dir eggs get installed
# pick up setup-dir .egg files only: no .egg-info
cmd.package_index.scan(glob.glob('*.egg'))
self.run_command('bdist_egg')
args = [self.distribution.get_command_obj('bdist_egg').egg_output]
if setuptools.bootstrap_install_from:
# Bootstrap self-installation of setuptools
args.insert(0, setuptools.bootstrap_install_from)
cmd.args = args
cmd.run()
setuptools.bootstrap_install_from = None
# XXX Python 3.1 doesn't see _nc if this is inside the class
install.sub_commands = [
cmd for cmd in _install.sub_commands if cmd[0] not in install._nc
] + install.new_commands
#
|
NeuralEnsemble/elephant
|
refs/heads/master
|
elephant/spike_train_dissimilarity.py
|
2
|
# -*- coding: utf-8 -*-
"""
In neuroscience one often wants to evaluate, how similar or dissimilar pairs
or even large sets of spiketrains are. For this purpose various different
spike train dissimilarity measures were introduced in the literature.
They differ, e.g., by the properties of having the mathematical properties of
a metric or by being time-scale dependent or not. Well known representatives
of spike train dissimilarity measures are the Victor-Purpura distance and the
Van Rossum distance implemented in this module, which both are metrics in the
mathematical sense and time-scale dependent.
.. autosummary::
:toctree: _toctree/spike_train_dissimilarity
victor_purpura_distance
van_rossum_distance
:copyright: Copyright 2014-2020 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function, unicode_literals
import warnings
import numpy as np
import quantities as pq
import scipy as sp
from neo.core import SpikeTrain
import elephant.kernels as kernels
from elephant.utils import deprecated_alias
__all__ = [
"victor_purpura_distance",
"van_rossum_distance"
]
def _create_matrix_from_indexed_function(
shape, func, symmetric_2d=False, **func_params):
mat = np.empty(shape)
if symmetric_2d:
for i in range(shape[0]):
for j in range(i, shape[1]):
mat[i, j] = mat[j, i] = func(i, j, **func_params)
else:
for idx in np.ndindex(*shape):
mat[idx] = func(*idx, **func_params)
return mat
@deprecated_alias(trains='spiketrains', q='cost_factor')
def victor_purpura_distance(spiketrains, cost_factor=1.0 * pq.Hz, kernel=None,
sort=True, algorithm='fast'):
"""
Calculates the Victor-Purpura's (VP) distance. It is often denoted as
:math:`D^{\\text{spike}}[q]`.
It is defined as the minimal cost of transforming spike train `a` into
spike train `b` by using the following operations:
* Inserting or deleting a spike (cost 1.0).
* Shifting a spike from :math:`t` to :math:`t'` (cost :math:`q
\\cdot |t - t'|`).
A detailed description can be found in
*Victor, J. D., & Purpura, K. P. (1996). Nature and precision of
temporal coding in visual cortex: a metric-space analysis. Journal of
Neurophysiology.*
Given the average number of spikes :math:`n` in a spike train and
:math:`N` spike trains the run-time complexity of this function is
:math:`O(N^2 n^2)` and :math:`O(N^2 + n^2)` memory will be needed.
Parameters
----------
spiketrains : list of neo.SpikeTrain
Spike trains to calculate pairwise distance.
cost_factor : pq.Quantity, optional
A cost factor :math:`q` for spike shifts as inverse time scalar.
Extreme values :math:`q=0` meaning no cost for any shift of
spikes, or :math: `q=np.inf` meaning infinite cost for any
spike shift and hence exclusion of spike shifts, are explicitly
allowed. If `kernel` is not `None`, :math:`q` will be ignored.
Default: 1.0 * pq.Hz
kernel : elephant.kernels.Kernel or None, optional
Kernel to use in the calculation of the distance. If `kernel` is
`None`, an unnormalized triangular kernel with standard deviation
of :math:'2.0/(q * sqrt(6.0))' corresponding to a half width of
:math:`2.0/q` will be used. Usage of the default value calculates
the Victor-Purpura distance correctly with a triangular kernel of
the suitable width. The choice of another kernel is enabled, but
this leaves the framework of Victor-Purpura distances.
Default: None
sort : bool, optional
Spike trains with sorted spike times will be needed for the
calculation. You can set `sort` to `False` if you know that your
spike trains are already sorted to decrease calculation time.
Default: True
algorithm : str, optional
Allowed values are 'fast' or 'intuitive', each selecting an
algorithm with which to calculate the pairwise Victor-Purpura distance.
Typically 'fast' should be used, because while giving always the
same result as 'intuitive', within the temporary structure of
Python and add-on modules as numpy it is faster.
Default: 'fast'
Returns
-------
np.ndarray
2-D Matrix containing the VP distance of all pairs of spike trains.
Examples
--------
>>> import quantities as pq
>>> from elephant.spike_train_dissimilarity import victor_purpura_distance
>>> q = 1.0 / (10.0 * pq.ms)
>>> st_a = SpikeTrain([10, 20, 30], units='ms', t_stop= 1000.0)
>>> st_b = SpikeTrain([12, 24, 30], units='ms', t_stop= 1000.0)
>>> vp_f = victor_purpura_distance([st_a, st_b], q)[0, 1]
>>> vp_i = victor_purpura_distance([st_a, st_b], q,
... algorithm='intuitive')[0, 1]
"""
for train in spiketrains:
if not (isinstance(train, (pq.quantity.Quantity, SpikeTrain)) and
train.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality.simplified):
raise TypeError("Spike trains must have a time unit.")
if not (isinstance(cost_factor, pq.quantity.Quantity) and
cost_factor.dimensionality.simplified ==
pq.Quantity(1, "Hz").dimensionality.simplified):
raise TypeError("cost_factor must be a rate quantity.")
if kernel is None:
if cost_factor == 0.0:
num_spikes = np.atleast_2d([st.size for st in spiketrains])
return np.absolute(num_spikes.T - num_spikes)
if cost_factor == np.inf:
num_spikes = np.atleast_2d([st.size for st in spiketrains])
return num_spikes.T + num_spikes
kernel = kernels.TriangularKernel(
sigma=2.0 / (np.sqrt(6.0) * cost_factor))
if sort:
spiketrains = [np.sort(st.view(type=pq.Quantity))
for st in spiketrains]
def compute(i, j):
if i == j:
return 0.0
if algorithm == 'fast':
return _victor_purpura_dist_for_st_pair_fast(
spiketrains[i], spiketrains[j], kernel)
if algorithm == 'intuitive':
return _victor_purpura_dist_for_st_pair_intuitive(
spiketrains[i], spiketrains[j], cost_factor)
raise NameError("The algorithm must be either 'fast' or 'intuitive'.")
return _create_matrix_from_indexed_function(
(len(spiketrains), len(spiketrains)), compute, kernel.is_symmetric())
def victor_purpura_dist(*args, **kwargs):
warnings.warn("'victor_purpura_dist' funcion is deprecated; "
"use 'victor_purpura_distance'", DeprecationWarning)
return victor_purpura_distance(*args, **kwargs)
def _victor_purpura_dist_for_st_pair_fast(spiketrain_a, spiketrain_b, kernel):
"""
The algorithm used is based on the one given in
J. D. Victor and K. P. Purpura, Nature and precision of temporal
coding in visual cortex: a metric-space analysis, Journal of
Neurophysiology, 1996.
It constructs a matrix G[i, j] containing the minimal cost when only
considering the first i and j spikes of the spike trains. However, one
never needs to store more than one row and one column at the same time
for calculating the VP distance.
cost[0, :cost.shape[1] - i] corresponds to G[i:, i]. In the same way
cost[1, :cost.shape[1] - i] corresponds to G[i, i:].
Moreover, the minimum operation on the costs of the three kind of actions
(delete, insert or move spike) can be split up in two operations. One
operation depends only on the already calculated costs and kernel
evaluation (insertion of spike vs moving a spike). The other minimum
depends on that result and the cost of deleting a spike. This operation
always depends on the last calculated element in the cost array and
corresponds to a recursive application of
f(accumulated_min[i]) = min(f(accumulated_min[i-1]), accumulated_min[i])
+ 1. That '+1' can be excluded from this function if the summed value for
all recursive applications is added upfront to accumulated_min.
Afterwards it has to be removed again except one for the currently
processed spike to get the real costs up to the evaluation of i.
All currently calculated costs will be considered -1 because this saves
a number of additions as in most cases the cost would be increased by
exactly one (the only exception is shifting, but in that calculation is
already the addition of a constant involved, thus leaving the number of
operations the same). The increase by one will be added after calculating
all minima by shifting decreasing_sequence by one when removing it from
accumulated_min.
Parameters
----------
spiketrain_a, spiketrain_b : :class:`neo.core.SpikeTrain` objects of
which the Victor-Purpura distance will be calculated pairwise.
kernel: :class:`.kernels.Kernel`
Kernel to use in the calculation of the distance.
Returns
-------
float
The Victor-Purpura distance of train_a and train_b
"""
if spiketrain_a.size <= 0 or spiketrain_b.size <= 0:
return max(spiketrain_a.size, spiketrain_b.size)
if spiketrain_a.size < spiketrain_b.size:
spiketrain_a, spiketrain_b = spiketrain_b, spiketrain_a
min_dim, max_dim = spiketrain_b.size, spiketrain_a.size + 1
cost = np.asfortranarray(np.tile(np.arange(float(max_dim)), (2, 1)))
decreasing_sequence = np.asfortranarray(cost[:, ::-1])
kern = kernel((np.atleast_2d(spiketrain_a).T.view(type=pq.Quantity) -
spiketrain_b.view(type=pq.Quantity)))
as_fortran = np.asfortranarray(
((np.sqrt(6.0) * kernel.sigma) * kern).simplified)
k = 1 - 2 * as_fortran
for i in range(min_dim):
# determine G[i, i] == accumulated_min[:, 0]
accumulated_min = cost[:, :-i - 1] + k[i:, i]
accumulated_min[1, :spiketrain_b.size - i] = \
cost[1, :spiketrain_b.size - i] + k[i, i:]
accumulated_min = np.minimum(
accumulated_min, # shift
cost[:, 1:max_dim - i]) # insert
acc_dim = accumulated_min.shape[1]
# delete vs min(insert, shift)
accumulated_min[:, 0] = min(cost[1, 1], accumulated_min[0, 0])
# determine G[i, :] and G[:, i] by propagating minima.
accumulated_min += decreasing_sequence[:, -acc_dim - 1:-1]
accumulated_min = np.minimum.accumulate(accumulated_min, axis=1)
cost[:, :acc_dim] = accumulated_min - decreasing_sequence[:, -acc_dim:]
return cost[0, -min_dim - 1]
def _victor_purpura_dist_for_st_pair_intuitive(spiketrain_a, spiketrain_b,
cost_factor=1.0 * pq.Hz):
"""
Function to calculate the Victor-Purpura distance between two spike trains
described in *J. D. Victor and K. P. Purpura, Nature and precision of
temporal coding in visual cortex: a metric-space analysis,
J Neurophysiol,76(2):1310-1326, 1996*
This function originates from the spikes-module in the signals-folder
of the software package Neurotools. It represents the 'intuitive'
implementation of the Victor-Purpura distance. With respect to calculation
time at the moment this code is uncompetitive with the code implemented in
the function _victor_purpura_dist_for_st_pair_fast. However, it is
expected that the discrepancy in calculation time of the 2 algorithms
decreases drastically if the temporary valid calculation speed difference
of plain Python and Numpy routines would be removed when languages like
cython could take over. The decision then has to be made between an
intuitive and probably slightly slower algorithm versus a correct but
strange optimal solution of an optimization problem under boundary
conditions, where the boundary conditions would finally have been removed.
Hence also this algoritm is kept here.
Parameters
----------
spiketrain_a, spiketrain_b : :class:`neo.core.SpikeTrain` objects of
which the Victor-Purpura distance will be calculated pairwise.
cost_factor : Quantity scalar of rate dimension
The cost parameter.
Default: 1.0 * pq.Hz
Returns
-------
float
The Victor-Purpura distance of train_a and train_b
"""
nspk_a = len(spiketrain_a)
nspk_b = len(spiketrain_b)
scr = np.zeros((nspk_a+1, nspk_b+1))
scr[:, 0] = range(0, nspk_a+1)
scr[0, :] = range(0, nspk_b+1)
if nspk_a > 0 and nspk_b > 0:
for i in range(1, nspk_a+1):
for j in range(1, nspk_b+1):
scr[i, j] = min(scr[i-1, j]+1, scr[i, j-1]+1)
scr[i, j] = min(scr[i, j], scr[i-1, j-1] +
np.float64((
cost_factor * abs(
spiketrain_a[i - 1] -
spiketrain_b[j - 1])).simplified))
return scr[nspk_a, nspk_b]
@deprecated_alias(trains='spiketrains', tau='time_constant')
def van_rossum_distance(spiketrains, time_constant=1.0 * pq.s, sort=True):
"""
Calculates the van Rossum distance :cite:`dissimilarity-Rossum2001_751`,
defined as Euclidean distance of the spike trains convolved with a
causal decaying exponential smoothing filter.
The implementation is normalized to yield a distance of 1.0 for the
distance between an empty spike train and a spike train with a single
spike. Divide the result by sqrt(2.0) to get the normalization used in the
paper.
Given :math:`N` spike trains with :math:`n` spikes on average the run-time
complexity of this function is :math:`O(N^2 n)`.
Parameters
----------
spiketrains : Sequence of :class:`neo.core.SpikeTrain` objects of
which the van Rossum distance will be calculated pairwise.
time_constant : Quantity scalar
Decay rate of the exponential function as time scalar. Controls for
which time scale the metric will be sensitive. Denoted as :math:`t_c`
in :cite:`dissimilarity-Rossum2001_751`. This parameter will be
ignored if `kernel` is not `None`. May also be :const:`scipy.inf`
which will lead to only measuring differences in spike count.
Default: 1.0 * pq.s
sort : bool
Spike trains with sorted spike times might be needed for the
calculation. You can set `sort` to `False` if you know that your
spike trains are already sorted to decrease calculation time.
Default: True
Returns
-------
np.ndarray
2-D Matrix containing the van Rossum distances for all pairs of
spike trains.
Examples
--------
>>> from elephant.spike_train_dissimilarity import van_rossum_distance
>>> tau = 10.0 * pq.ms
>>> st_a = SpikeTrain([10, 20, 30], units='ms', t_stop= 1000.0)
>>> st_b = SpikeTrain([12, 24, 30], units='ms', t_stop= 1000.0)
>>> vr = van_rossum_distance([st_a, st_b], tau)[0, 1]
"""
for train in spiketrains:
if not (isinstance(train, (pq.quantity.Quantity, SpikeTrain)) and
train.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality.simplified):
raise TypeError("Spike trains must have a time unit.")
if not (isinstance(time_constant, pq.quantity.Quantity) and
time_constant.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality.simplified):
raise TypeError("tau must be a time quantity.")
if time_constant == 0:
spike_counts = [st.size for st in spiketrains]
return np.sqrt(spike_counts + np.atleast_2d(spike_counts).T)
if time_constant == np.inf:
spike_counts = [st.size for st in spiketrains]
return np.absolute(spike_counts - np.atleast_2d(spike_counts).T)
k_dist = _summed_dist_matrix(
[st.view(type=pq.Quantity)
for st in spiketrains], time_constant, not sort)
vr_dist = np.empty_like(k_dist)
for i, j in np.ndindex(k_dist.shape):
vr_dist[i, j] = (
k_dist[i, i] + k_dist[j, j] - k_dist[i, j] - k_dist[j, i])
return sp.sqrt(vr_dist)
def van_rossum_dist(*args, **kwargs):
warnings.warn("'van_rossum_dist' function is deprecated; "
"use 'van_rossum_distance'", DeprecationWarning)
return van_rossum_distance(*args, **kwargs)
def _summed_dist_matrix(spiketrains, tau, presorted=False):
# The algorithm underlying this implementation is described in
# Houghton, C., & Kreuz, T. (2012). On the efficient calculation of van
# Rossum distances. Network: Computation in Neural Systems, 23(1-2),
# 48-58. We would like to remark that in this paper in formula (9) the
# left side of the equation should be divided by two.
#
# Given N spiketrains with n entries on average the run-time complexity is
# O(N^2 * n). O(N^2 + N * n) memory will be needed.
if len(spiketrains) <= 0:
return np.zeros((0, 0))
if not presorted:
spiketrains = [v.copy() for v in spiketrains]
for v in spiketrains:
v.sort()
sizes = np.asarray([v.size for v in spiketrains])
values = np.empty((len(spiketrains), max(1, sizes.max())))
values.fill(np.nan)
for i, v in enumerate(spiketrains):
if v.size > 0:
values[i, :v.size] = \
(v / tau * pq.dimensionless).simplified
exp_diffs = np.exp(values[:, :-1] - values[:, 1:])
markage = np.zeros(values.shape)
for u in range(len(spiketrains)):
markage[u, 0] = 0
for i in range(sizes[u] - 1):
markage[u, i + 1] = (markage[u, i] + 1.0) * exp_diffs[u, i]
# Same spiketrain terms
D = np.empty((len(spiketrains), len(spiketrains)))
D[np.diag_indices_from(D)] = sizes + 2.0 * np.sum(markage, axis=1)
# Cross spiketrain terms
for u in range(D.shape[0]):
all_ks = np.searchsorted(values[u], values, 'left') - 1
for v in range(u):
js = np.searchsorted(values[v], values[u], 'right') - 1
ks = all_ks[v]
slice_j = np.s_[np.searchsorted(js, 0):sizes[u]]
slice_k = np.s_[np.searchsorted(ks, 0):sizes[v]]
D[u, v] = np.sum(
np.exp(values[v][js[slice_j]] - values[u][slice_j]) *
(1.0 + markage[v][js[slice_j]]))
D[u, v] += np.sum(
np.exp(values[u][ks[slice_k]] - values[v][slice_k]) *
(1.0 + markage[u][ks[slice_k]]))
D[v, u] = D[u, v]
return D
|
zenlambda/pip
|
refs/heads/develop
|
tests/functional/test_wheel.py
|
26
|
"""'pip wheel' tests"""
import os
import pytest
from os.path import exists
from pip.locations import write_delete_marker_file
from pip.status_codes import PREVIOUS_BUILD_DIR_ERROR
from tests.lib import pyversion
def test_pip_wheel_fails_without_wheel(script, data):
"""
Test 'pip wheel' fails without wheel
"""
result = script.pip(
'wheel', '--no-index', '-f', data.find_links, 'simple==3.0',
expect_error=True,
)
assert "'pip wheel' requires the 'wheel' package" in result.stderr
@pytest.mark.network
def test_pip_wheel_success(script, data):
"""
Test 'pip wheel' success.
"""
script.pip('install', 'wheel')
result = script.pip(
'wheel', '--no-index', '-f', data.find_links, 'simple==3.0',
)
wheel_file_name = 'simple-3.0-py%s-none-any.whl' % pyversion[0]
wheel_file_path = script.scratch / 'wheelhouse' / wheel_file_name
assert wheel_file_path in result.files_created, result.stdout
assert "Successfully built simple" in result.stdout, result.stdout
@pytest.mark.network
def test_pip_wheel_downloads_wheels(script, data):
"""
Test 'pip wheel' downloads wheels
"""
script.pip('install', 'wheel')
result = script.pip(
'wheel', '--no-index', '-f', data.find_links, 'simple.dist',
)
wheel_file_name = 'simple.dist-0.1-py2.py3-none-any.whl'
wheel_file_path = script.scratch / 'wheelhouse' / wheel_file_name
assert wheel_file_path in result.files_created, result.stdout
assert "Saved" in result.stdout, result.stdout
@pytest.mark.network
def test_pip_wheel_builds_when_no_binary_set(script, data):
script.pip('install', 'wheel')
res = script.pip(
'wheel', '--no-index', '--no-binary', ':all:', '-f', data.find_links,
'setuptools==0.9.8')
assert "Running setup.py bdist_wheel for setuptools" in str(res), str(res)
@pytest.mark.network
def test_pip_wheel_builds_editable_deps(script, data):
"""
Test 'pip wheel' finds and builds dependencies of editables
"""
script.pip('install', 'wheel')
editable_path = os.path.join(data.src, 'requires_simple')
result = script.pip(
'wheel', '--no-index', '-f', data.find_links, '-e', editable_path
)
wheel_file_name = 'simple-1.0-py%s-none-any.whl' % pyversion[0]
wheel_file_path = script.scratch / 'wheelhouse' / wheel_file_name
assert wheel_file_path in result.files_created, result.stdout
@pytest.mark.network
def test_pip_wheel_fail(script, data):
"""
Test 'pip wheel' failure.
"""
script.pip('install', 'wheel')
result = script.pip(
'wheel', '--no-index', '-f', data.find_links, 'wheelbroken==0.1',
expect_error=True,
)
wheel_file_name = 'wheelbroken-0.1-py%s-none-any.whl' % pyversion[0]
wheel_file_path = script.scratch / 'wheelhouse' / wheel_file_name
assert wheel_file_path not in result.files_created, (
wheel_file_path,
result.files_created,
)
assert "FakeError" in result.stdout, result.stdout
assert "Failed to build wheelbroken" in result.stdout, result.stdout
assert result.returncode != 0
@pytest.mark.network
def test_no_clean_option_blocks_cleaning_after_wheel(script, data):
"""
Test --no-clean option blocks cleaning after wheel build
"""
script.pip('install', 'wheel')
build = script.venv_path / 'build'
result = script.pip(
'wheel', '--no-clean', '--no-index', '--build', build,
'--find-links=%s' % data.find_links, 'simple',
)
build = build / 'simple'
assert exists(build), "build/simple should still exist %s" % str(result)
@pytest.mark.network
def test_pip_wheel_source_deps(script, data):
"""
Test 'pip wheel' finds and builds source archive dependencies
of wheels
"""
# 'requires_source' is a wheel that depends on the 'source' project
script.pip('install', 'wheel')
result = script.pip(
'wheel', '--no-index', '-f', data.find_links, 'requires_source',
)
wheel_file_name = 'source-1.0-py%s-none-any.whl' % pyversion[0]
wheel_file_path = script.scratch / 'wheelhouse' / wheel_file_name
assert wheel_file_path in result.files_created, result.stdout
assert "Successfully built source" in result.stdout, result.stdout
@pytest.mark.network
def test_pip_wheel_fail_cause_of_previous_build_dir(script, data):
"""
Test when 'pip wheel' tries to install a package that has a previous build
directory
"""
script.pip('install', 'wheel')
# Given that I have a previous build dir of the `simple` package
build = script.venv_path / 'build' / 'simple'
os.makedirs(build)
write_delete_marker_file(script.venv_path / 'build')
build.join('setup.py').write('#')
# When I call pip trying to install things again
result = script.pip(
'wheel', '--no-index', '--find-links=%s' % data.find_links,
'--build', script.venv_path / 'build',
'simple==3.0', expect_error=True,
)
# Then I see that the error code is the right one
assert result.returncode == PREVIOUS_BUILD_DIR_ERROR, result
|
allenta/varnish-bans-manager
|
refs/heads/master
|
varnish_bans_manager/filesystem/forms.py
|
1
|
# -*- coding: utf-8 -*-
'''
:copyright: (c) 2012 by Allenta Consulting, see AUTHORS.txt for more details.
:license: GPL, see LICENSE.txt for more details.
'''
from __future__ import absolute_import
from django.forms import ImageField as BaseImageField
from django.forms.widgets import FileInput, CheckboxInput
from django.utils.safestring import mark_safe
class ImageFileInput(FileInput):
def _clear_checkbox_name(self, name):
'''
Given the name of the file input, return the name of the clear hidden
input.
'''
return name + '-clear'
def render(self, name, value, attrs=None):
output = []
# Base content.
output.append(super(ImageFileInput, self).render(name, value, attrs))
if value and hasattr(value, "url"):
# Add clear checkbox input.
output.append(
CheckboxInput().render(
self._clear_checkbox_name(name), False, attrs={
'class': 'image-file-input-clear'
}))
# Add image preview with delete icon.
output.append(
('<ul class="thumbnails image-file-input-preview">'
' <li>'
' <a target="_blank" href="%s" class="thumbnail">'
' <img src="%s" />'
' </a>'
' <a href="#" class="close image-file-input-delete">×</a>'
' </li>'
'</ul>' % (value.url, value.url)))
# Render.
return mark_safe(
'<div class="image-file-input">' + u''.join(output) + '</div>')
def value_from_datadict(self, data, files, name):
upload = super(ImageFileInput, self).value_from_datadict(
data, files, name)
# If no upload has been done and the clear checkbox has been checked,
# clear the value.
if not upload and CheckboxInput().value_from_datadict(
data, files, self._clear_checkbox_name(name)):
# False signals to clear any existing value, as opposed to just
# None.
return False
return upload
class ImageField(BaseImageField):
widget = ImageFileInput
|
sahiljain/catapult
|
refs/heads/master
|
third_party/py_vulcanize/third_party/rjsmin/_setup/py2/__init__.py
|
43
|
# -*- coding: ascii -*-
#
# Copyright 2007, 2008, 2009, 2010, 2011
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
================
Package _setup
================
This package provides tools for main package setup.
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
from _setup.setup import run # pylint: disable = W0611
|
gerddie/nipype
|
refs/heads/master
|
nipype/pipeline/plugins/slurm.py
|
9
|
'''
Created on Aug 2, 2013
@author: chadcumba
Parallel workflow execution with SLURM
'''
import os
import re
import subprocess
from time import sleep
from .base import (SGELikeBatchManagerBase, logger, iflogger, logging)
from nipype.interfaces.base import CommandLine
class SLURMPlugin(SGELikeBatchManagerBase):
'''
Execute using SLURM
The plugin_args input to run can be used to control the SLURM execution.
Currently supported options are:
- template : template to use for batch job submission
- sbatch_args: arguments to pass prepend to the sbatch call
'''
def __init__(self, **kwargs):
template="#!/bin/bash"
self._retry_timeout = 2
self._max_tries = 2
self._template = template
self._sbatch_args = None
if 'plugin_args' in kwargs and kwargs['plugin_args']:
if 'retry_timeout' in kwargs['plugin_args']:
self._retry_timeout = kwargs['plugin_args']['retry_timeout']
if 'max_tries' in kwargs['plugin_args']:
self._max_tries = kwargs['plugin_args']['max_tries']
if 'template' in kwargs['plugin_args']:
self._template = kwargs['plugin_args']['template']
if os.path.isfile(self._template):
self._template = open(self._template).read()
if 'sbatch_args' in kwargs['plugin_args']:
self._sbatch_args = kwargs['plugin_args']['sbatch_args']
self._pending = {}
super(SLURMPlugin, self).__init__(self._template, **kwargs)
def _is_pending(self, taskid):
# subprocess.Popen requires taskid to be a string
proc = subprocess.Popen(["squeue", '-j', '%s' % taskid],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
o, _ = proc.communicate()
return o.find(str(taskid)) > -1
def _submit_batchtask(self, scriptfile, node):
"""
This is more or less the _submit_batchtask from sge.py with flipped variable
names, different command line switches, and different output formatting/processing
"""
cmd = CommandLine('sbatch', environ=os.environ.data,
terminal_output='allatonce')
path = os.path.dirname(scriptfile)
sbatch_args = ''
if self._sbatch_args:
sbatch_args = self._sbatch_args
if 'sbatch_args' in node.plugin_args:
if 'overwrite' in node.plugin_args and\
node.plugin_args['overwrite']:
sbatch_args = node.plugin_args['sbatch_args']
else:
sbatch_args += (" " + node.plugin_args['sbatch_args'])
if '-o' not in sbatch_args:
sbatch_args = '%s -o %s' % (sbatch_args, os.path.join(path, 'slurm-%j.out'))
if '-e' not in sbatch_args:
sbatch_args = '%s -e %s' % (sbatch_args, os.path.join(path, 'slurm-%j.out'))
if node._hierarchy:
jobname = '.'.join((os.environ.data['LOGNAME'],
node._hierarchy,
node._id))
else:
jobname = '.'.join((os.environ.data['LOGNAME'],
node._id))
jobnameitems = jobname.split('.')
jobnameitems.reverse()
jobname = '.'.join(jobnameitems)
cmd.inputs.args = '%s -J %s %s' % (sbatch_args,
jobname,
scriptfile)
oldlevel = iflogger.level
iflogger.setLevel(logging.getLevelName('CRITICAL'))
tries = 0
while True:
try:
result = cmd.run()
except Exception, e:
if tries < self._max_tries:
tries += 1
sleep(self._retry_timeout) # sleep 2 seconds and try again.
else:
iflogger.setLevel(oldlevel)
raise RuntimeError('\n'.join((('Could not submit sbatch task'
' for node %s') % node._id,
str(e))))
else:
break
logger.debug('Ran command ({0})'.format(cmd.cmdline))
iflogger.setLevel(oldlevel)
# retrieve taskid
lines = [line for line in result.runtime.stdout.split('\n') if line]
taskid = int(re.match("Submitted batch job ([0-9]*)",
lines[-1]).groups()[0])
self._pending[taskid] = node.output_dir()
logger.debug('submitted sbatch task: %d for node %s' % (taskid, node._id))
return taskid
|
csdevsc/mcs_website
|
refs/heads/master
|
tasks/__init__.py
|
12133432
| |
baylee-d/osf.io
|
refs/heads/develop
|
api_tests/test/views/__init__.py
|
12133432
| |
davehunt/kuma
|
refs/heads/master
|
vendor/packages/logilab/common/test/data/find_test/module.py
|
12133432
| |
mcmaxwell/idea_digital_agency
|
refs/heads/master
|
idea/feincms/content/raw/__init__.py
|
12133432
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.